1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #ifndef __iwl_trans_int_pcie_h__
8 #define __iwl_trans_int_pcie_h__
10 #include <linux/spinlock.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/wait.h>
14 #include <linux/pci.h>
15 #include <linux/timer.h>
16 #include <linux/cpu.h>
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
23 #include "iwl-op-mode.h"
28 * RX related structures and functions
30 #define RX_NUM_QUEUES 1
31 #define RX_POST_REQ_ALLOC 2
32 #define RX_CLAIM_REQ_ALLOC 8
33 #define RX_PENDING_WATERMARK 16
34 #define FIRST_RX_QUEUE 512
38 /*This file includes the declaration that are internal to the
42 * struct iwl_rx_mem_buffer
43 * @page_dma: bus address of rxb page
44 * @page: driver's pointer to the rxb page
45 * @invalid: rxb is in driver ownership - not owned by HW
46 * @vid: index of this rxb in the global table
47 * @offset: indicates which offset of the page (in bytes)
48 * this buffer uses (if multiple RBs fit into one page)
50 struct iwl_rx_mem_buffer {
55 struct list_head list;
60 * struct isr_statistics - interrupt statistics
63 struct isr_statistics {
78 * struct iwl_rx_transfer_desc - transfer descriptor
79 * @addr: ptr to free buffer start address
80 * @rbid: unique tag of the buffer
83 struct iwl_rx_transfer_desc {
89 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
92 * struct iwl_rx_completion_desc - completion descriptor
93 * @reserved1: reserved
94 * @rbid: unique tag of the received buffer
95 * @flags: flags (0: fragmented, all others: reserved)
96 * @reserved2: reserved
98 struct iwl_rx_completion_desc {
106 * struct iwl_rxq - Rx queue
108 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
109 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
110 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
111 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
112 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
113 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
114 * @tr_tail: driver's pointer to the transmission ring tail buffer
115 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
116 * @cr_tail: driver's pointer to the completion ring tail buffer
117 * @cr_tail_dma: physical address of the buffer for the completion ring tail
118 * @read: Shared index to newest available Rx buffer
119 * @write: Shared index to oldest written Rx packet
120 * @free_count: Number of pre-allocated buffers in rx_free
121 * @used_count: Number of RBDs handled to allocator to use for allocation
123 * @rx_free: list of RBDs with allocated RB ready for use
124 * @rx_used: list of RBDs with no RB attached
125 * @need_update: flag to indicate we need to update read/write index
126 * @rb_stts: driver's pointer to receive buffer status
127 * @rb_stts_dma: bus address of receive buffer status
129 * @queue: actual rx queue. Not used for multi-rx queue.
130 * @next_rb_is_fragment: indicates that the previous RB that we handled set
131 * the fragmented flag, so the next one is still another fragment
133 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
142 struct iwl_rx_completion_desc *cd;
144 dma_addr_t used_bd_dma;
146 dma_addr_t tr_tail_dma;
148 dma_addr_t cr_tail_dma;
155 struct list_head rx_free;
156 struct list_head rx_used;
157 bool need_update, next_rb_is_fragment;
159 dma_addr_t rb_stts_dma;
161 struct napi_struct napi;
162 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
166 * struct iwl_rb_allocator - Rx allocator
167 * @req_pending: number of requests the allcator had not processed yet
168 * @req_ready: number of requests honored and ready for claiming
169 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
170 * the queue. This is a list of &struct iwl_rx_mem_buffer
171 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
172 * of &struct iwl_rx_mem_buffer
173 * @lock: protects the rbd_allocated and rbd_empty lists
174 * @alloc_wq: work queue for background calls
175 * @rx_alloc: work struct for background calls
177 struct iwl_rb_allocator {
178 atomic_t req_pending;
180 struct list_head rbd_allocated;
181 struct list_head rbd_empty;
183 struct workqueue_struct *alloc_wq;
184 struct work_struct rx_alloc;
188 * iwl_get_closed_rb_stts - get closed rb stts from different structs
189 * @rxq - the rxq to get the rb stts from
191 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
194 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
195 __le16 *rb_stts = rxq->rb_stts;
197 return READ_ONCE(*rb_stts);
199 struct iwl_rb_status *rb_stts = rxq->rb_stts;
201 return READ_ONCE(rb_stts->closed_rb_num);
205 #ifdef CONFIG_IWLWIFI_DEBUGFS
207 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
210 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
211 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
212 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
213 * set the file can no longer be used.
215 enum iwl_fw_mon_dbgfs_state {
216 IWL_FW_MON_DBGFS_STATE_CLOSED,
217 IWL_FW_MON_DBGFS_STATE_OPEN,
218 IWL_FW_MON_DBGFS_STATE_DISABLED,
223 * enum iwl_shared_irq_flags - level of sharing for irq
224 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
225 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
227 enum iwl_shared_irq_flags {
228 IWL_SHARED_IRQ_NON_RX = BIT(0),
229 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
233 * enum iwl_image_response_code - image response values
234 * @IWL_IMAGE_RESP_DEF: the default value of the register
235 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
236 * @IWL_IMAGE_RESP_FAIL: iml reading failed
238 enum iwl_image_response_code {
239 IWL_IMAGE_RESP_DEF = 0,
240 IWL_IMAGE_RESP_SUCCESS = 1,
241 IWL_IMAGE_RESP_FAIL = 2,
245 * struct cont_rec: continuous recording data structure
246 * @prev_wr_ptr: the last address that was read in monitor_data
248 * @prev_wrap_cnt: the wrap count that was used during the last read in
249 * monitor_data debugfs file
250 * @state: the state of monitor_data debugfs file as described
251 * in &iwl_fw_mon_dbgfs_state enum
252 * @mutex: locked while reading from monitor_data debugfs file
254 #ifdef CONFIG_IWLWIFI_DEBUGFS
259 /* Used to sync monitor_data debugfs file with driver unload flow */
265 * struct iwl_trans_pcie - PCIe transport specific data
266 * @rxq: all the RX queue data
267 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
268 * @global_table: table mapping received VID from hw to rxb
269 * @rba: allocator for RX replenishing
270 * @ctxt_info: context information for FW self init
271 * @ctxt_info_gen3: context information for gen3 devices
272 * @prph_info: prph info for self init
273 * @prph_scratch: prph scratch for self init
274 * @ctxt_info_dma_addr: dma addr of context information
275 * @prph_info_dma_addr: dma addr of prph info
276 * @prph_scratch_dma_addr: dma addr of prph scratch
277 * @ctxt_info_dma_addr: dma addr of context information
278 * @init_dram: DRAM data of firmware image (including paging).
279 * Context information addresses will be taken from here.
280 * This is driver's local copy for keeping track of size and
281 * count for allocating and freeing the memory.
282 * @trans: pointer to the generic transport area
283 * @scd_base_addr: scheduler sram base address in SRAM
284 * @kw: keep warm address
285 * @pnvm_dram: DRAM area that contains the PNVM data
286 * @pci_dev: basic pci-network driver stuff
287 * @hw_base: pci hardware address support
288 * @ucode_write_complete: indicates that the ucode has been copied.
289 * @ucode_write_waitq: wait queue for uCode load
290 * @cmd_queue - command queue number
291 * @def_rx_queue - default rx queue number
292 * @rx_buf_size: Rx buffer size
293 * @scd_set_active: should the transport configure the SCD for HCMD queue
294 * @rx_page_order: page order for receive buffer size
295 * @rx_buf_bytes: RX buffer (RB) size in bytes
296 * @reg_lock: protect hw register access
297 * @mutex: to protect stop_device / start_fw / start_hw
298 * @cmd_in_flight: true when we have a host command in flight
299 #ifdef CONFIG_IWLWIFI_DEBUGFS
300 * @fw_mon_data: fw continuous recording data
302 * @msix_entries: array of MSI-X entries
303 * @msix_enabled: true if managed to enable MSI-X
304 * @shared_vec_mask: the type of causes the shared vector handles
305 * (see iwl_shared_irq_flags).
306 * @alloc_vecs: the number of interrupt vectors allocated by the OS
307 * @def_irq: default irq for non rx causes
308 * @fh_init_mask: initial unmasked fh causes
309 * @hw_init_mask: initial unmasked hw causes
310 * @fh_mask: current unmasked fh causes
311 * @hw_mask: current unmasked hw causes
312 * @in_rescan: true if we have triggered a device rescan
313 * @base_rb_stts: base virtual address of receive buffer status for all queues
314 * @base_rb_stts_dma: base physical address of receive buffer status
315 * @supported_dma_mask: DMA mask to validate the actual address against,
316 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
317 * @alloc_page_lock: spinlock for the page allocator
318 * @alloc_page: allocated page to still use parts of
319 * @alloc_page_used: how much of the allocated page was already used (bytes)
320 * @rf_name: name/version of the CRF, if any
322 struct iwl_trans_pcie {
324 struct iwl_rx_mem_buffer *rx_pool;
325 struct iwl_rx_mem_buffer **global_table;
326 struct iwl_rb_allocator rba;
328 struct iwl_context_info *ctxt_info;
329 struct iwl_context_info_gen3 *ctxt_info_gen3;
331 struct iwl_prph_info *prph_info;
332 struct iwl_prph_scratch *prph_scratch;
333 dma_addr_t ctxt_info_dma_addr;
334 dma_addr_t prph_info_dma_addr;
335 dma_addr_t prph_scratch_dma_addr;
336 dma_addr_t iml_dma_addr;
337 struct iwl_trans *trans;
339 struct net_device napi_dev;
343 dma_addr_t ict_tbl_dma;
346 bool is_down, opmode_down;
348 struct isr_statistics isr_stats;
354 struct iwl_dma_ptr kw;
356 struct iwl_dram_data pnvm_dram;
358 struct iwl_txq *txq_memory;
360 /* PCI bus related data */
361 struct pci_dev *pci_dev;
362 void __iomem *hw_base;
364 bool ucode_write_complete;
366 wait_queue_head_t ucode_write_waitq;
367 wait_queue_head_t sx_waitq;
370 u8 n_no_reclaim_cmds;
371 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
374 enum iwl_amsdu_size rx_buf_size;
376 bool pcie_dbg_dumped_once;
379 u32 supported_dma_mask;
381 /* allocator lock for the two values below */
382 spinlock_t alloc_page_lock;
383 struct page *alloc_page;
386 /*protect hw register */
388 bool cmd_hold_nic_awake;
390 #ifdef CONFIG_IWLWIFI_DEBUGFS
391 struct cont_rec fw_mon_data;
394 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
403 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
404 u16 tx_cmd_queue_size;
408 dma_addr_t base_rb_stts_dma;
410 bool fw_reset_handshake;
412 wait_queue_head_t fw_reset_waitq;
417 static inline struct iwl_trans_pcie *
418 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
420 return (void *)trans->trans_specific;
423 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
426 * Before sending the interrupt the HW disables it to prevent
427 * a nested interrupt. This is done by writing 1 to the corresponding
428 * bit in the mask register. After handling the interrupt, it should be
429 * re-enabled by clearing this bit. This register is defined as
430 * write 1 clear (W1C) register, meaning that it's being clear
431 * by writing 1 to the bit.
433 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
436 static inline struct iwl_trans *
437 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
439 return container_of((void *)trans_pcie, struct iwl_trans,
444 * Convention: trans API functions: iwl_trans_pcie_XXX
445 * Other functions: iwl_pcie_XXX
448 *iwl_trans_pcie_alloc(struct pci_dev *pdev,
449 const struct pci_device_id *ent,
450 const struct iwl_cfg_trans_params *cfg_trans);
451 void iwl_trans_pcie_free(struct iwl_trans *trans);
453 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
454 #define _iwl_trans_pcie_grab_nic_access(trans) \
455 __cond_lock(nic_access_nobh, \
456 likely(__iwl_trans_pcie_grab_nic_access(trans)))
458 /*****************************************************
460 ******************************************************/
461 int iwl_pcie_rx_init(struct iwl_trans *trans);
462 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
463 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
464 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
465 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
466 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
467 int iwl_pcie_rx_stop(struct iwl_trans *trans);
468 void iwl_pcie_rx_free(struct iwl_trans *trans);
469 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
470 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
471 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
472 struct iwl_rxq *rxq);
474 /*****************************************************
475 * ICT - interrupt handling
476 ******************************************************/
477 irqreturn_t iwl_pcie_isr(int irq, void *data);
478 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
479 void iwl_pcie_free_ict(struct iwl_trans *trans);
480 void iwl_pcie_reset_ict(struct iwl_trans *trans);
481 void iwl_pcie_disable_ict(struct iwl_trans *trans);
483 /*****************************************************
485 ******************************************************/
486 int iwl_pcie_tx_init(struct iwl_trans *trans);
487 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
488 int iwl_pcie_tx_stop(struct iwl_trans *trans);
489 void iwl_pcie_tx_free(struct iwl_trans *trans);
490 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
491 const struct iwl_trans_txq_scd_cfg *cfg,
492 unsigned int wdg_timeout);
493 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
495 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
497 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
498 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
499 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
500 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
501 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
502 struct iwl_rx_cmd_buffer *rxb);
503 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
505 /*****************************************************
507 ******************************************************/
508 void iwl_pcie_dump_csr(struct iwl_trans *trans);
510 /*****************************************************
512 ******************************************************/
513 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
515 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
517 clear_bit(STATUS_INT_ENABLED, &trans->status);
518 if (!trans_pcie->msix_enabled) {
519 /* disable interrupts from uCode/NIC to host */
520 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
522 /* acknowledge/clear/reset any interrupts still pending
523 * from uCode or flow handler (Rx/Tx DMA) */
524 iwl_write32(trans, CSR_INT, 0xffffffff);
525 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
527 /* disable all the interrupt we might use */
528 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
529 trans_pcie->fh_init_mask);
530 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
531 trans_pcie->hw_init_mask);
533 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
536 #define IWL_NUM_OF_COMPLETION_RINGS 31
537 #define IWL_NUM_OF_TRANSFER_RINGS 527
539 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
544 while (start < fw->num_sec &&
545 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
546 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
554 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
556 struct iwl_self_init_dram *dram = &trans->init_dram;
560 WARN_ON(dram->fw_cnt);
564 for (i = 0; i < dram->fw_cnt; i++)
565 dma_free_coherent(trans->dev, dram->fw[i].size,
566 dram->fw[i].block, dram->fw[i].physical);
573 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
575 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
577 spin_lock_bh(&trans_pcie->irq_lock);
578 _iwl_disable_interrupts(trans);
579 spin_unlock_bh(&trans_pcie->irq_lock);
582 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
584 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
586 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
587 set_bit(STATUS_INT_ENABLED, &trans->status);
588 if (!trans_pcie->msix_enabled) {
589 trans_pcie->inta_mask = CSR_INI_SET_MASK;
590 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
593 * fh/hw_mask keeps all the unmasked causes.
594 * Unlike msi, in msix cause is enabled when it is unset.
596 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
597 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
598 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
599 ~trans_pcie->fh_mask);
600 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
601 ~trans_pcie->hw_mask);
605 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
607 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
609 spin_lock_bh(&trans_pcie->irq_lock);
610 _iwl_enable_interrupts(trans);
611 spin_unlock_bh(&trans_pcie->irq_lock);
613 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
615 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
617 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
618 trans_pcie->hw_mask = msk;
621 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
623 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
625 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
626 trans_pcie->fh_mask = msk;
629 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
631 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
633 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
634 if (!trans_pcie->msix_enabled) {
635 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
636 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
638 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
639 trans_pcie->hw_init_mask);
640 iwl_enable_fh_int_msk_msix(trans,
641 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
645 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
647 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
649 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
651 if (!trans_pcie->msix_enabled) {
653 * When we'll receive the ALIVE interrupt, the ISR will call
654 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
655 * interrupt (which is not really needed anymore) but also the
656 * RX interrupt which will allow us to receive the ALIVE
657 * notification (which is Rx) and continue the flow.
659 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
660 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
662 iwl_enable_hw_int_msk_msix(trans,
663 MSIX_HW_INT_CAUSES_REG_ALIVE);
665 * Leave all the FH causes enabled to get the ALIVE
668 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
672 static inline const char *queue_name(struct device *dev,
673 struct iwl_trans_pcie *trans_p, int i)
675 if (trans_p->shared_vec_mask) {
676 int vec = trans_p->shared_vec_mask &
677 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
680 return DRV_NAME ": shared IRQ";
682 return devm_kasprintf(dev, GFP_KERNEL,
683 DRV_NAME ": queue %d", i + vec);
686 return DRV_NAME ": default queue";
688 if (i == trans_p->alloc_vecs - 1)
689 return DRV_NAME ": exception";
691 return devm_kasprintf(dev, GFP_KERNEL,
692 DRV_NAME ": queue %d", i);
695 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
697 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
699 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
700 if (!trans_pcie->msix_enabled) {
701 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
702 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
704 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
705 trans_pcie->fh_init_mask);
706 iwl_enable_hw_int_msk_msix(trans,
707 MSIX_HW_INT_CAUSES_REG_RF_KILL);
710 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
712 * On 9000-series devices this bit isn't enabled by default, so
713 * when we power down the device we need set the bit to allow it
714 * to wake up the PCI-E bus for RF-kill interrupts.
716 iwl_set_bit(trans, CSR_GP_CNTRL,
717 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
721 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
723 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
725 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
727 lockdep_assert_held(&trans_pcie->mutex);
729 if (trans_pcie->debug_rfkill == 1)
732 return !(iwl_read32(trans, CSR_GP_CNTRL) &
733 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
736 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
737 u32 reg, u32 mask, u32 value)
741 #ifdef CONFIG_IWLWIFI_DEBUG
742 WARN_ON_ONCE(value & ~mask);
745 v = iwl_read32(trans, reg);
748 iwl_write32(trans, reg, v);
751 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
754 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
757 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
760 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
763 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
765 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
768 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
769 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
771 #ifdef CONFIG_IWLWIFI_DEBUGFS
772 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
774 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
777 void iwl_pcie_rx_allocator_work(struct work_struct *data);
779 /* common functions that are used by gen2 transport */
780 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
781 void iwl_pcie_apm_config(struct iwl_trans *trans);
782 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
783 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
784 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
785 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
787 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
788 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
789 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
790 struct iwl_dma_ptr *ptr, size_t size);
791 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
792 void iwl_pcie_apply_destination(struct iwl_trans *trans);
794 /* common functions that are used by gen3 transport */
795 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
797 /* transport gen 2 exported functions */
798 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
799 const struct fw_img *fw, bool run_in_rfkill);
800 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
801 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
802 struct iwl_host_cmd *cmd);
803 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
804 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
805 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
806 bool test, bool reset);
807 int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
808 struct iwl_host_cmd *cmd);
809 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
810 struct iwl_host_cmd *cmd);
811 #endif /* __iwl_trans_int_pcie_h__ */