1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2003-2015, 2018-2020 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #ifndef __iwl_trans_int_pcie_h__
8 #define __iwl_trans_int_pcie_h__
10 #include <linux/spinlock.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/wait.h>
14 #include <linux/pci.h>
15 #include <linux/timer.h>
16 #include <linux/cpu.h>
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
23 #include "iwl-op-mode.h"
28 * RX related structures and functions
30 #define RX_NUM_QUEUES 1
31 #define RX_POST_REQ_ALLOC 2
32 #define RX_CLAIM_REQ_ALLOC 8
33 #define RX_PENDING_WATERMARK 16
34 #define FIRST_RX_QUEUE 512
38 /*This file includes the declaration that are internal to the
42 * struct iwl_rx_mem_buffer
43 * @page_dma: bus address of rxb page
44 * @page: driver's pointer to the rxb page
45 * @invalid: rxb is in driver ownership - not owned by HW
46 * @vid: index of this rxb in the global table
47 * @offset: indicates which offset of the page (in bytes)
48 * this buffer uses (if multiple RBs fit into one page)
50 struct iwl_rx_mem_buffer {
55 struct list_head list;
60 * struct isr_statistics - interrupt statistics
63 struct isr_statistics {
78 * struct iwl_rx_transfer_desc - transfer descriptor
79 * @addr: ptr to free buffer start address
80 * @rbid: unique tag of the buffer
83 struct iwl_rx_transfer_desc {
89 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
92 * struct iwl_rx_completion_desc - completion descriptor
93 * @reserved1: reserved
94 * @rbid: unique tag of the received buffer
95 * @flags: flags (0: fragmented, all others: reserved)
96 * @reserved2: reserved
98 struct iwl_rx_completion_desc {
106 * struct iwl_rxq - Rx queue
108 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
109 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
110 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
111 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
112 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
113 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
114 * @tr_tail: driver's pointer to the transmission ring tail buffer
115 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
116 * @cr_tail: driver's pointer to the completion ring tail buffer
117 * @cr_tail_dma: physical address of the buffer for the completion ring tail
118 * @read: Shared index to newest available Rx buffer
119 * @write: Shared index to oldest written Rx packet
120 * @free_count: Number of pre-allocated buffers in rx_free
121 * @used_count: Number of RBDs handled to allocator to use for allocation
123 * @rx_free: list of RBDs with allocated RB ready for use
124 * @rx_used: list of RBDs with no RB attached
125 * @need_update: flag to indicate we need to update read/write index
126 * @rb_stts: driver's pointer to receive buffer status
127 * @rb_stts_dma: bus address of receive buffer status
129 * @queue: actual rx queue. Not used for multi-rx queue.
130 * @next_rb_is_fragment: indicates that the previous RB that we handled set
131 * the fragmented flag, so the next one is still another fragment
133 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
142 struct iwl_rx_completion_desc *cd;
144 dma_addr_t used_bd_dma;
146 dma_addr_t tr_tail_dma;
148 dma_addr_t cr_tail_dma;
155 struct list_head rx_free;
156 struct list_head rx_used;
157 bool need_update, next_rb_is_fragment;
159 dma_addr_t rb_stts_dma;
161 struct napi_struct napi;
162 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
166 * struct iwl_rb_allocator - Rx allocator
167 * @req_pending: number of requests the allcator had not processed yet
168 * @req_ready: number of requests honored and ready for claiming
169 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
170 * the queue. This is a list of &struct iwl_rx_mem_buffer
171 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
172 * of &struct iwl_rx_mem_buffer
173 * @lock: protects the rbd_allocated and rbd_empty lists
174 * @alloc_wq: work queue for background calls
175 * @rx_alloc: work struct for background calls
177 struct iwl_rb_allocator {
178 atomic_t req_pending;
180 struct list_head rbd_allocated;
181 struct list_head rbd_empty;
183 struct workqueue_struct *alloc_wq;
184 struct work_struct rx_alloc;
188 * iwl_get_closed_rb_stts - get closed rb stts from different structs
189 * @rxq - the rxq to get the rb stts from
191 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
194 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
195 __le16 *rb_stts = rxq->rb_stts;
197 return READ_ONCE(*rb_stts);
199 struct iwl_rb_status *rb_stts = rxq->rb_stts;
201 return READ_ONCE(rb_stts->closed_rb_num);
205 #ifdef CONFIG_IWLWIFI_DEBUGFS
207 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
210 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
211 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
212 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
213 * set the file can no longer be used.
215 enum iwl_fw_mon_dbgfs_state {
216 IWL_FW_MON_DBGFS_STATE_CLOSED,
217 IWL_FW_MON_DBGFS_STATE_OPEN,
218 IWL_FW_MON_DBGFS_STATE_DISABLED,
223 * enum iwl_shared_irq_flags - level of sharing for irq
224 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
225 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
227 enum iwl_shared_irq_flags {
228 IWL_SHARED_IRQ_NON_RX = BIT(0),
229 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
233 * enum iwl_image_response_code - image response values
234 * @IWL_IMAGE_RESP_DEF: the default value of the register
235 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
236 * @IWL_IMAGE_RESP_FAIL: iml reading failed
238 enum iwl_image_response_code {
239 IWL_IMAGE_RESP_DEF = 0,
240 IWL_IMAGE_RESP_SUCCESS = 1,
241 IWL_IMAGE_RESP_FAIL = 2,
245 * struct cont_rec: continuous recording data structure
246 * @prev_wr_ptr: the last address that was read in monitor_data
248 * @prev_wrap_cnt: the wrap count that was used during the last read in
249 * monitor_data debugfs file
250 * @state: the state of monitor_data debugfs file as described
251 * in &iwl_fw_mon_dbgfs_state enum
252 * @mutex: locked while reading from monitor_data debugfs file
254 #ifdef CONFIG_IWLWIFI_DEBUGFS
259 /* Used to sync monitor_data debugfs file with driver unload flow */
265 * struct iwl_trans_pcie - PCIe transport specific data
266 * @rxq: all the RX queue data
267 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
268 * @global_table: table mapping received VID from hw to rxb
269 * @rba: allocator for RX replenishing
270 * @ctxt_info: context information for FW self init
271 * @ctxt_info_gen3: context information for gen3 devices
272 * @prph_info: prph info for self init
273 * @prph_scratch: prph scratch for self init
274 * @ctxt_info_dma_addr: dma addr of context information
275 * @prph_info_dma_addr: dma addr of prph info
276 * @prph_scratch_dma_addr: dma addr of prph scratch
277 * @ctxt_info_dma_addr: dma addr of context information
278 * @init_dram: DRAM data of firmware image (including paging).
279 * Context information addresses will be taken from here.
280 * This is driver's local copy for keeping track of size and
281 * count for allocating and freeing the memory.
282 * @trans: pointer to the generic transport area
283 * @scd_base_addr: scheduler sram base address in SRAM
284 * @kw: keep warm address
285 * @pnvm_dram: DRAM area that contains the PNVM data
286 * @pci_dev: basic pci-network driver stuff
287 * @hw_base: pci hardware address support
288 * @ucode_write_complete: indicates that the ucode has been copied.
289 * @ucode_write_waitq: wait queue for uCode load
290 * @cmd_queue - command queue number
291 * @def_rx_queue - default rx queue number
292 * @rx_buf_size: Rx buffer size
293 * @scd_set_active: should the transport configure the SCD for HCMD queue
294 * @rx_page_order: page order for receive buffer size
295 * @rx_buf_bytes: RX buffer (RB) size in bytes
296 * @reg_lock: protect hw register access
297 * @mutex: to protect stop_device / start_fw / start_hw
298 * @cmd_in_flight: true when we have a host command in flight
299 #ifdef CONFIG_IWLWIFI_DEBUGFS
300 * @fw_mon_data: fw continuous recording data
302 * @msix_entries: array of MSI-X entries
303 * @msix_enabled: true if managed to enable MSI-X
304 * @shared_vec_mask: the type of causes the shared vector handles
305 * (see iwl_shared_irq_flags).
306 * @alloc_vecs: the number of interrupt vectors allocated by the OS
307 * @def_irq: default irq for non rx causes
308 * @fh_init_mask: initial unmasked fh causes
309 * @hw_init_mask: initial unmasked hw causes
310 * @fh_mask: current unmasked fh causes
311 * @hw_mask: current unmasked hw causes
312 * @in_rescan: true if we have triggered a device rescan
313 * @base_rb_stts: base virtual address of receive buffer status for all queues
314 * @base_rb_stts_dma: base physical address of receive buffer status
315 * @supported_dma_mask: DMA mask to validate the actual address against,
316 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
317 * @alloc_page_lock: spinlock for the page allocator
318 * @alloc_page: allocated page to still use parts of
319 * @alloc_page_used: how much of the allocated page was already used (bytes)
321 struct iwl_trans_pcie {
323 struct iwl_rx_mem_buffer *rx_pool;
324 struct iwl_rx_mem_buffer **global_table;
325 struct iwl_rb_allocator rba;
327 struct iwl_context_info *ctxt_info;
328 struct iwl_context_info_gen3 *ctxt_info_gen3;
330 struct iwl_prph_info *prph_info;
331 struct iwl_prph_scratch *prph_scratch;
332 dma_addr_t ctxt_info_dma_addr;
333 dma_addr_t prph_info_dma_addr;
334 dma_addr_t prph_scratch_dma_addr;
335 dma_addr_t iml_dma_addr;
336 struct iwl_trans *trans;
338 struct net_device napi_dev;
342 dma_addr_t ict_tbl_dma;
345 bool is_down, opmode_down;
347 struct isr_statistics isr_stats;
353 struct iwl_dma_ptr kw;
355 struct iwl_dram_data pnvm_dram;
357 struct iwl_txq *txq_memory;
359 /* PCI bus related data */
360 struct pci_dev *pci_dev;
361 void __iomem *hw_base;
363 bool ucode_write_complete;
365 wait_queue_head_t ucode_write_waitq;
366 wait_queue_head_t sx_waitq;
369 u8 n_no_reclaim_cmds;
370 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
373 enum iwl_amsdu_size rx_buf_size;
375 bool pcie_dbg_dumped_once;
378 u32 supported_dma_mask;
380 /* allocator lock for the two values below */
381 spinlock_t alloc_page_lock;
382 struct page *alloc_page;
385 /*protect hw register */
387 bool cmd_hold_nic_awake;
389 #ifdef CONFIG_IWLWIFI_DEBUGFS
390 struct cont_rec fw_mon_data;
393 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
402 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
403 u16 tx_cmd_queue_size;
407 dma_addr_t base_rb_stts_dma;
409 bool fw_reset_handshake;
411 wait_queue_head_t fw_reset_waitq;
414 static inline struct iwl_trans_pcie *
415 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
417 return (void *)trans->trans_specific;
420 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
423 * Before sending the interrupt the HW disables it to prevent
424 * a nested interrupt. This is done by writing 1 to the corresponding
425 * bit in the mask register. After handling the interrupt, it should be
426 * re-enabled by clearing this bit. This register is defined as
427 * write 1 clear (W1C) register, meaning that it's being clear
428 * by writing 1 to the bit.
430 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
433 static inline struct iwl_trans *
434 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
436 return container_of((void *)trans_pcie, struct iwl_trans,
441 * Convention: trans API functions: iwl_trans_pcie_XXX
442 * Other functions: iwl_pcie_XXX
445 *iwl_trans_pcie_alloc(struct pci_dev *pdev,
446 const struct pci_device_id *ent,
447 const struct iwl_cfg_trans_params *cfg_trans);
448 void iwl_trans_pcie_free(struct iwl_trans *trans);
450 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
451 #define _iwl_trans_pcie_grab_nic_access(trans) \
452 __cond_lock(nic_access_nobh, \
453 likely(__iwl_trans_pcie_grab_nic_access(trans)))
455 /*****************************************************
457 ******************************************************/
458 int iwl_pcie_rx_init(struct iwl_trans *trans);
459 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
460 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
461 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
462 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
463 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
464 int iwl_pcie_rx_stop(struct iwl_trans *trans);
465 void iwl_pcie_rx_free(struct iwl_trans *trans);
466 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
467 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
468 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
469 struct iwl_rxq *rxq);
471 /*****************************************************
472 * ICT - interrupt handling
473 ******************************************************/
474 irqreturn_t iwl_pcie_isr(int irq, void *data);
475 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
476 void iwl_pcie_free_ict(struct iwl_trans *trans);
477 void iwl_pcie_reset_ict(struct iwl_trans *trans);
478 void iwl_pcie_disable_ict(struct iwl_trans *trans);
480 /*****************************************************
482 ******************************************************/
483 int iwl_pcie_tx_init(struct iwl_trans *trans);
484 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
485 int iwl_pcie_tx_stop(struct iwl_trans *trans);
486 void iwl_pcie_tx_free(struct iwl_trans *trans);
487 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
488 const struct iwl_trans_txq_scd_cfg *cfg,
489 unsigned int wdg_timeout);
490 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
492 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
494 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
495 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
496 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
497 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
498 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
499 struct iwl_rx_cmd_buffer *rxb);
500 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
502 /*****************************************************
504 ******************************************************/
505 void iwl_pcie_dump_csr(struct iwl_trans *trans);
507 /*****************************************************
509 ******************************************************/
510 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
512 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
514 clear_bit(STATUS_INT_ENABLED, &trans->status);
515 if (!trans_pcie->msix_enabled) {
516 /* disable interrupts from uCode/NIC to host */
517 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
519 /* acknowledge/clear/reset any interrupts still pending
520 * from uCode or flow handler (Rx/Tx DMA) */
521 iwl_write32(trans, CSR_INT, 0xffffffff);
522 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
524 /* disable all the interrupt we might use */
525 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
526 trans_pcie->fh_init_mask);
527 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
528 trans_pcie->hw_init_mask);
530 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
533 #define IWL_NUM_OF_COMPLETION_RINGS 31
534 #define IWL_NUM_OF_TRANSFER_RINGS 527
536 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
541 while (start < fw->num_sec &&
542 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
543 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
551 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
553 struct iwl_self_init_dram *dram = &trans->init_dram;
557 WARN_ON(dram->fw_cnt);
561 for (i = 0; i < dram->fw_cnt; i++)
562 dma_free_coherent(trans->dev, dram->fw[i].size,
563 dram->fw[i].block, dram->fw[i].physical);
570 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
574 spin_lock_bh(&trans_pcie->irq_lock);
575 _iwl_disable_interrupts(trans);
576 spin_unlock_bh(&trans_pcie->irq_lock);
579 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
581 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
583 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
584 set_bit(STATUS_INT_ENABLED, &trans->status);
585 if (!trans_pcie->msix_enabled) {
586 trans_pcie->inta_mask = CSR_INI_SET_MASK;
587 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
590 * fh/hw_mask keeps all the unmasked causes.
591 * Unlike msi, in msix cause is enabled when it is unset.
593 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
594 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
595 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
596 ~trans_pcie->fh_mask);
597 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
598 ~trans_pcie->hw_mask);
602 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
604 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
606 spin_lock_bh(&trans_pcie->irq_lock);
607 _iwl_enable_interrupts(trans);
608 spin_unlock_bh(&trans_pcie->irq_lock);
610 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
612 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
614 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
615 trans_pcie->hw_mask = msk;
618 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
620 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
622 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
623 trans_pcie->fh_mask = msk;
626 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
628 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
630 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
631 if (!trans_pcie->msix_enabled) {
632 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
633 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
635 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
636 trans_pcie->hw_init_mask);
637 iwl_enable_fh_int_msk_msix(trans,
638 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
642 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
644 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
646 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
648 if (!trans_pcie->msix_enabled) {
650 * When we'll receive the ALIVE interrupt, the ISR will call
651 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
652 * interrupt (which is not really needed anymore) but also the
653 * RX interrupt which will allow us to receive the ALIVE
654 * notification (which is Rx) and continue the flow.
656 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
657 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
659 iwl_enable_hw_int_msk_msix(trans,
660 MSIX_HW_INT_CAUSES_REG_ALIVE);
662 * Leave all the FH causes enabled to get the ALIVE
665 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
669 static inline const char *queue_name(struct device *dev,
670 struct iwl_trans_pcie *trans_p, int i)
672 if (trans_p->shared_vec_mask) {
673 int vec = trans_p->shared_vec_mask &
674 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
677 return DRV_NAME ": shared IRQ";
679 return devm_kasprintf(dev, GFP_KERNEL,
680 DRV_NAME ": queue %d", i + vec);
683 return DRV_NAME ": default queue";
685 if (i == trans_p->alloc_vecs - 1)
686 return DRV_NAME ": exception";
688 return devm_kasprintf(dev, GFP_KERNEL,
689 DRV_NAME ": queue %d", i);
692 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
694 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
696 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
697 if (!trans_pcie->msix_enabled) {
698 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
699 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
701 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
702 trans_pcie->fh_init_mask);
703 iwl_enable_hw_int_msk_msix(trans,
704 MSIX_HW_INT_CAUSES_REG_RF_KILL);
707 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
709 * On 9000-series devices this bit isn't enabled by default, so
710 * when we power down the device we need set the bit to allow it
711 * to wake up the PCI-E bus for RF-kill interrupts.
713 iwl_set_bit(trans, CSR_GP_CNTRL,
714 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
718 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
720 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
722 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
724 lockdep_assert_held(&trans_pcie->mutex);
726 if (trans_pcie->debug_rfkill == 1)
729 return !(iwl_read32(trans, CSR_GP_CNTRL) &
730 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
733 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
734 u32 reg, u32 mask, u32 value)
738 #ifdef CONFIG_IWLWIFI_DEBUG
739 WARN_ON_ONCE(value & ~mask);
742 v = iwl_read32(trans, reg);
745 iwl_write32(trans, reg, v);
748 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
751 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
754 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
757 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
760 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
762 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
765 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
766 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
768 #ifdef CONFIG_IWLWIFI_DEBUGFS
769 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
771 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
774 void iwl_pcie_rx_allocator_work(struct work_struct *data);
776 /* common functions that are used by gen2 transport */
777 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
778 void iwl_pcie_apm_config(struct iwl_trans *trans);
779 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
780 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
781 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
782 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
784 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
785 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
786 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
787 struct iwl_dma_ptr *ptr, size_t size);
788 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
789 void iwl_pcie_apply_destination(struct iwl_trans *trans);
791 /* common functions that are used by gen3 transport */
792 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
794 /* transport gen 2 exported functions */
795 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
796 const struct fw_img *fw, bool run_in_rfkill);
797 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
798 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
799 struct iwl_host_cmd *cmd);
800 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
801 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
802 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
803 bool test, bool reset);
804 int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
805 struct iwl_host_cmd *cmd);
806 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
807 struct iwl_host_cmd *cmd);
808 #endif /* __iwl_trans_int_pcie_h__ */