1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2003-2015, 2018-2021 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #ifndef __iwl_trans_int_pcie_h__
8 #define __iwl_trans_int_pcie_h__
10 #include <linux/spinlock.h>
11 #include <linux/interrupt.h>
12 #include <linux/skbuff.h>
13 #include <linux/wait.h>
14 #include <linux/pci.h>
15 #include <linux/timer.h>
16 #include <linux/cpu.h>
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
23 #include "iwl-op-mode.h"
28 * RX related structures and functions
30 #define RX_NUM_QUEUES 1
31 #define RX_POST_REQ_ALLOC 2
32 #define RX_CLAIM_REQ_ALLOC 8
33 #define RX_PENDING_WATERMARK 16
34 #define FIRST_RX_QUEUE 512
38 /*This file includes the declaration that are internal to the
42 * struct iwl_rx_mem_buffer
43 * @page_dma: bus address of rxb page
44 * @page: driver's pointer to the rxb page
45 * @invalid: rxb is in driver ownership - not owned by HW
46 * @vid: index of this rxb in the global table
47 * @offset: indicates which offset of the page (in bytes)
48 * this buffer uses (if multiple RBs fit into one page)
50 struct iwl_rx_mem_buffer {
55 struct list_head list;
60 * struct isr_statistics - interrupt statistics
63 struct isr_statistics {
78 * struct iwl_rx_transfer_desc - transfer descriptor
79 * @addr: ptr to free buffer start address
80 * @rbid: unique tag of the buffer
83 struct iwl_rx_transfer_desc {
89 #define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
92 * struct iwl_rx_completion_desc - completion descriptor
93 * @reserved1: reserved
94 * @rbid: unique tag of the received buffer
95 * @flags: flags (0: fragmented, all others: reserved)
96 * @reserved2: reserved
98 struct iwl_rx_completion_desc {
106 * struct iwl_rxq - Rx queue
108 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
109 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
110 * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
111 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
112 * @used_bd: driver's pointer to buffer of used receive buffer descriptors (rbd)
113 * @used_bd_dma: physical address of buffer of used receive buffer descriptors (rbd)
114 * @read: Shared index to newest available Rx buffer
115 * @write: Shared index to oldest written Rx packet
116 * @free_count: Number of pre-allocated buffers in rx_free
117 * @used_count: Number of RBDs handled to allocator to use for allocation
119 * @rx_free: list of RBDs with allocated RB ready for use
120 * @rx_used: list of RBDs with no RB attached
121 * @need_update: flag to indicate we need to update read/write index
122 * @rb_stts: driver's pointer to receive buffer status
123 * @rb_stts_dma: bus address of receive buffer status
125 * @queue: actual rx queue. Not used for multi-rx queue.
126 * @next_rb_is_fragment: indicates that the previous RB that we handled set
127 * the fragmented flag, so the next one is still another fragment
129 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
138 struct iwl_rx_completion_desc *cd;
140 dma_addr_t used_bd_dma;
147 struct list_head rx_free;
148 struct list_head rx_used;
149 bool need_update, next_rb_is_fragment;
151 dma_addr_t rb_stts_dma;
153 struct napi_struct napi;
154 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
158 * struct iwl_rb_allocator - Rx allocator
159 * @req_pending: number of requests the allcator had not processed yet
160 * @req_ready: number of requests honored and ready for claiming
161 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
162 * the queue. This is a list of &struct iwl_rx_mem_buffer
163 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
164 * of &struct iwl_rx_mem_buffer
165 * @lock: protects the rbd_allocated and rbd_empty lists
166 * @alloc_wq: work queue for background calls
167 * @rx_alloc: work struct for background calls
169 struct iwl_rb_allocator {
170 atomic_t req_pending;
172 struct list_head rbd_allocated;
173 struct list_head rbd_empty;
175 struct workqueue_struct *alloc_wq;
176 struct work_struct rx_alloc;
180 * iwl_get_closed_rb_stts - get closed rb stts from different structs
181 * @rxq - the rxq to get the rb stts from
183 static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
186 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
187 __le16 *rb_stts = rxq->rb_stts;
189 return READ_ONCE(*rb_stts);
191 struct iwl_rb_status *rb_stts = rxq->rb_stts;
193 return READ_ONCE(rb_stts->closed_rb_num);
197 #ifdef CONFIG_IWLWIFI_DEBUGFS
199 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
202 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
203 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
204 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
205 * set the file can no longer be used.
207 enum iwl_fw_mon_dbgfs_state {
208 IWL_FW_MON_DBGFS_STATE_CLOSED,
209 IWL_FW_MON_DBGFS_STATE_OPEN,
210 IWL_FW_MON_DBGFS_STATE_DISABLED,
215 * enum iwl_shared_irq_flags - level of sharing for irq
216 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
217 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
219 enum iwl_shared_irq_flags {
220 IWL_SHARED_IRQ_NON_RX = BIT(0),
221 IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
225 * enum iwl_image_response_code - image response values
226 * @IWL_IMAGE_RESP_DEF: the default value of the register
227 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
228 * @IWL_IMAGE_RESP_FAIL: iml reading failed
230 enum iwl_image_response_code {
231 IWL_IMAGE_RESP_DEF = 0,
232 IWL_IMAGE_RESP_SUCCESS = 1,
233 IWL_IMAGE_RESP_FAIL = 2,
237 * struct cont_rec: continuous recording data structure
238 * @prev_wr_ptr: the last address that was read in monitor_data
240 * @prev_wrap_cnt: the wrap count that was used during the last read in
241 * monitor_data debugfs file
242 * @state: the state of monitor_data debugfs file as described
243 * in &iwl_fw_mon_dbgfs_state enum
244 * @mutex: locked while reading from monitor_data debugfs file
246 #ifdef CONFIG_IWLWIFI_DEBUGFS
251 /* Used to sync monitor_data debugfs file with driver unload flow */
257 * struct iwl_trans_pcie - PCIe transport specific data
258 * @rxq: all the RX queue data
259 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
260 * @global_table: table mapping received VID from hw to rxb
261 * @rba: allocator for RX replenishing
262 * @ctxt_info: context information for FW self init
263 * @ctxt_info_gen3: context information for gen3 devices
264 * @prph_info: prph info for self init
265 * @prph_scratch: prph scratch for self init
266 * @ctxt_info_dma_addr: dma addr of context information
267 * @prph_info_dma_addr: dma addr of prph info
268 * @prph_scratch_dma_addr: dma addr of prph scratch
269 * @ctxt_info_dma_addr: dma addr of context information
270 * @init_dram: DRAM data of firmware image (including paging).
271 * Context information addresses will be taken from here.
272 * This is driver's local copy for keeping track of size and
273 * count for allocating and freeing the memory.
274 * @trans: pointer to the generic transport area
275 * @scd_base_addr: scheduler sram base address in SRAM
276 * @kw: keep warm address
277 * @pnvm_dram: DRAM area that contains the PNVM data
278 * @pci_dev: basic pci-network driver stuff
279 * @hw_base: pci hardware address support
280 * @ucode_write_complete: indicates that the ucode has been copied.
281 * @ucode_write_waitq: wait queue for uCode load
282 * @cmd_queue - command queue number
283 * @def_rx_queue - default rx queue number
284 * @rx_buf_size: Rx buffer size
285 * @scd_set_active: should the transport configure the SCD for HCMD queue
286 * @rx_page_order: page order for receive buffer size
287 * @rx_buf_bytes: RX buffer (RB) size in bytes
288 * @reg_lock: protect hw register access
289 * @mutex: to protect stop_device / start_fw / start_hw
290 * @cmd_in_flight: true when we have a host command in flight
291 #ifdef CONFIG_IWLWIFI_DEBUGFS
292 * @fw_mon_data: fw continuous recording data
294 * @msix_entries: array of MSI-X entries
295 * @msix_enabled: true if managed to enable MSI-X
296 * @shared_vec_mask: the type of causes the shared vector handles
297 * (see iwl_shared_irq_flags).
298 * @alloc_vecs: the number of interrupt vectors allocated by the OS
299 * @def_irq: default irq for non rx causes
300 * @fh_init_mask: initial unmasked fh causes
301 * @hw_init_mask: initial unmasked hw causes
302 * @fh_mask: current unmasked fh causes
303 * @hw_mask: current unmasked hw causes
304 * @in_rescan: true if we have triggered a device rescan
305 * @base_rb_stts: base virtual address of receive buffer status for all queues
306 * @base_rb_stts_dma: base physical address of receive buffer status
307 * @supported_dma_mask: DMA mask to validate the actual address against,
308 * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
309 * @alloc_page_lock: spinlock for the page allocator
310 * @alloc_page: allocated page to still use parts of
311 * @alloc_page_used: how much of the allocated page was already used (bytes)
312 * @rf_name: name/version of the CRF, if any
314 struct iwl_trans_pcie {
316 struct iwl_rx_mem_buffer *rx_pool;
317 struct iwl_rx_mem_buffer **global_table;
318 struct iwl_rb_allocator rba;
320 struct iwl_context_info *ctxt_info;
321 struct iwl_context_info_gen3 *ctxt_info_gen3;
323 struct iwl_prph_info *prph_info;
324 struct iwl_prph_scratch *prph_scratch;
325 dma_addr_t ctxt_info_dma_addr;
326 dma_addr_t prph_info_dma_addr;
327 dma_addr_t prph_scratch_dma_addr;
328 dma_addr_t iml_dma_addr;
329 struct iwl_trans *trans;
331 struct net_device napi_dev;
335 dma_addr_t ict_tbl_dma;
338 bool is_down, opmode_down;
340 struct isr_statistics isr_stats;
346 struct iwl_dma_ptr kw;
348 struct iwl_dram_data pnvm_dram;
350 struct iwl_txq *txq_memory;
352 /* PCI bus related data */
353 struct pci_dev *pci_dev;
354 void __iomem *hw_base;
356 bool ucode_write_complete;
358 wait_queue_head_t ucode_write_waitq;
359 wait_queue_head_t sx_waitq;
362 u8 n_no_reclaim_cmds;
363 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
366 enum iwl_amsdu_size rx_buf_size;
368 bool pcie_dbg_dumped_once;
371 u32 supported_dma_mask;
373 /* allocator lock for the two values below */
374 spinlock_t alloc_page_lock;
375 struct page *alloc_page;
378 /*protect hw register */
380 bool cmd_hold_nic_awake;
382 #ifdef CONFIG_IWLWIFI_DEBUGFS
383 struct cont_rec fw_mon_data;
386 struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
395 cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
396 u16 tx_cmd_queue_size;
400 dma_addr_t base_rb_stts_dma;
402 bool fw_reset_handshake;
404 wait_queue_head_t fw_reset_waitq;
409 static inline struct iwl_trans_pcie *
410 IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
412 return (void *)trans->trans_specific;
415 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
418 * Before sending the interrupt the HW disables it to prevent
419 * a nested interrupt. This is done by writing 1 to the corresponding
420 * bit in the mask register. After handling the interrupt, it should be
421 * re-enabled by clearing this bit. This register is defined as
422 * write 1 clear (W1C) register, meaning that it's being clear
423 * by writing 1 to the bit.
425 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
428 static inline struct iwl_trans *
429 iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
431 return container_of((void *)trans_pcie, struct iwl_trans,
436 * Convention: trans API functions: iwl_trans_pcie_XXX
437 * Other functions: iwl_pcie_XXX
440 *iwl_trans_pcie_alloc(struct pci_dev *pdev,
441 const struct pci_device_id *ent,
442 const struct iwl_cfg_trans_params *cfg_trans);
443 void iwl_trans_pcie_free(struct iwl_trans *trans);
445 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
446 #define _iwl_trans_pcie_grab_nic_access(trans) \
447 __cond_lock(nic_access_nobh, \
448 likely(__iwl_trans_pcie_grab_nic_access(trans)))
450 /*****************************************************
452 ******************************************************/
453 int iwl_pcie_rx_init(struct iwl_trans *trans);
454 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
455 irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
456 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
457 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
458 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
459 int iwl_pcie_rx_stop(struct iwl_trans *trans);
460 void iwl_pcie_rx_free(struct iwl_trans *trans);
461 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
462 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
463 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
464 struct iwl_rxq *rxq);
466 /*****************************************************
467 * ICT - interrupt handling
468 ******************************************************/
469 irqreturn_t iwl_pcie_isr(int irq, void *data);
470 int iwl_pcie_alloc_ict(struct iwl_trans *trans);
471 void iwl_pcie_free_ict(struct iwl_trans *trans);
472 void iwl_pcie_reset_ict(struct iwl_trans *trans);
473 void iwl_pcie_disable_ict(struct iwl_trans *trans);
475 /*****************************************************
477 ******************************************************/
478 int iwl_pcie_tx_init(struct iwl_trans *trans);
479 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
480 int iwl_pcie_tx_stop(struct iwl_trans *trans);
481 void iwl_pcie_tx_free(struct iwl_trans *trans);
482 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
483 const struct iwl_trans_txq_scd_cfg *cfg,
484 unsigned int wdg_timeout);
485 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
487 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
489 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
490 struct iwl_device_tx_cmd *dev_cmd, int txq_id);
491 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
492 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
493 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
494 struct iwl_rx_cmd_buffer *rxb);
495 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
497 /*****************************************************
499 ******************************************************/
500 void iwl_pcie_dump_csr(struct iwl_trans *trans);
502 /*****************************************************
504 ******************************************************/
505 static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
507 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
509 clear_bit(STATUS_INT_ENABLED, &trans->status);
510 if (!trans_pcie->msix_enabled) {
511 /* disable interrupts from uCode/NIC to host */
512 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
514 /* acknowledge/clear/reset any interrupts still pending
515 * from uCode or flow handler (Rx/Tx DMA) */
516 iwl_write32(trans, CSR_INT, 0xffffffff);
517 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
519 /* disable all the interrupt we might use */
520 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
521 trans_pcie->fh_init_mask);
522 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
523 trans_pcie->hw_init_mask);
525 IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
528 static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
533 while (start < fw->num_sec &&
534 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
535 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
543 static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
545 struct iwl_self_init_dram *dram = &trans->init_dram;
549 WARN_ON(dram->fw_cnt);
553 for (i = 0; i < dram->fw_cnt; i++)
554 dma_free_coherent(trans->dev, dram->fw[i].size,
555 dram->fw[i].block, dram->fw[i].physical);
562 static inline void iwl_disable_interrupts(struct iwl_trans *trans)
564 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
566 spin_lock_bh(&trans_pcie->irq_lock);
567 _iwl_disable_interrupts(trans);
568 spin_unlock_bh(&trans_pcie->irq_lock);
571 static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
573 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
575 IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
576 set_bit(STATUS_INT_ENABLED, &trans->status);
577 if (!trans_pcie->msix_enabled) {
578 trans_pcie->inta_mask = CSR_INI_SET_MASK;
579 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
582 * fh/hw_mask keeps all the unmasked causes.
583 * Unlike msi, in msix cause is enabled when it is unset.
585 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
586 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
587 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
588 ~trans_pcie->fh_mask);
589 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
590 ~trans_pcie->hw_mask);
594 static inline void iwl_enable_interrupts(struct iwl_trans *trans)
596 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
598 spin_lock_bh(&trans_pcie->irq_lock);
599 _iwl_enable_interrupts(trans);
600 spin_unlock_bh(&trans_pcie->irq_lock);
602 static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
604 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
606 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
607 trans_pcie->hw_mask = msk;
610 static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
612 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
614 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
615 trans_pcie->fh_mask = msk;
618 static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
620 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
622 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
623 if (!trans_pcie->msix_enabled) {
624 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
625 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
627 iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
628 trans_pcie->hw_init_mask);
629 iwl_enable_fh_int_msk_msix(trans,
630 MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
634 static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
636 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
638 IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
640 if (!trans_pcie->msix_enabled) {
642 * When we'll receive the ALIVE interrupt, the ISR will call
643 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
644 * interrupt (which is not really needed anymore) but also the
645 * RX interrupt which will allow us to receive the ALIVE
646 * notification (which is Rx) and continue the flow.
648 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
649 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
651 iwl_enable_hw_int_msk_msix(trans,
652 MSIX_HW_INT_CAUSES_REG_ALIVE);
654 * Leave all the FH causes enabled to get the ALIVE
657 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
661 static inline const char *queue_name(struct device *dev,
662 struct iwl_trans_pcie *trans_p, int i)
664 if (trans_p->shared_vec_mask) {
665 int vec = trans_p->shared_vec_mask &
666 IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
669 return DRV_NAME ": shared IRQ";
671 return devm_kasprintf(dev, GFP_KERNEL,
672 DRV_NAME ": queue %d", i + vec);
675 return DRV_NAME ": default queue";
677 if (i == trans_p->alloc_vecs - 1)
678 return DRV_NAME ": exception";
680 return devm_kasprintf(dev, GFP_KERNEL,
681 DRV_NAME ": queue %d", i);
684 static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
686 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
688 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
689 if (!trans_pcie->msix_enabled) {
690 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
691 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
693 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
694 trans_pcie->fh_init_mask);
695 iwl_enable_hw_int_msk_msix(trans,
696 MSIX_HW_INT_CAUSES_REG_RF_KILL);
699 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
701 * On 9000-series devices this bit isn't enabled by default, so
702 * when we power down the device we need set the bit to allow it
703 * to wake up the PCI-E bus for RF-kill interrupts.
705 iwl_set_bit(trans, CSR_GP_CNTRL,
706 CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
710 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
712 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
714 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
716 lockdep_assert_held(&trans_pcie->mutex);
718 if (trans_pcie->debug_rfkill == 1)
721 return !(iwl_read32(trans, CSR_GP_CNTRL) &
722 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
725 static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
726 u32 reg, u32 mask, u32 value)
730 #ifdef CONFIG_IWLWIFI_DEBUG
731 WARN_ON_ONCE(value & ~mask);
734 v = iwl_read32(trans, reg);
737 iwl_write32(trans, reg, v);
740 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
743 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
746 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
749 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
752 static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
754 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
757 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
758 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
760 #ifdef CONFIG_IWLWIFI_DEBUGFS
761 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
763 static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
766 void iwl_pcie_rx_allocator_work(struct work_struct *data);
768 /* common functions that are used by gen2 transport */
769 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
770 void iwl_pcie_apm_config(struct iwl_trans *trans);
771 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
772 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
773 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
774 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
776 void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
777 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
778 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
779 struct iwl_dma_ptr *ptr, size_t size);
780 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
781 void iwl_pcie_apply_destination(struct iwl_trans *trans);
783 /* common functions that are used by gen3 transport */
784 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
786 /* transport gen 2 exported functions */
787 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
788 const struct fw_img *fw, bool run_in_rfkill);
789 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
790 int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
791 struct iwl_host_cmd *cmd);
792 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
793 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
794 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
795 bool test, bool reset);
796 int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
797 struct iwl_host_cmd *cmd);
798 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
799 struct iwl_host_cmd *cmd);
800 #endif /* __iwl_trans_int_pcie_h__ */