1 /******************************************************************************
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6 * Copyright(c) 2018 Intel Corporation
8 * Portions of this file are derived from the ipw3945 project, as well
9 * as portions of the ieee80211 subsystem header files.
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
23 * Contact Information:
24 * Intel Linux Wireless <linuxwifi@intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28 #include <linux/sched.h>
29 #include <linux/wait.h>
30 #include <linux/gfp.h>
35 #include "iwl-op-mode.h"
36 #include "iwl-context-info-gen3.h"
38 /******************************************************************************
42 ******************************************************************************/
45 * Rx theory of operation
47 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
48 * each of which point to Receive Buffers to be filled by the NIC. These get
49 * used not only for Rx frames, but for any command response or notification
50 * from the NIC. The driver and NIC manage the Rx buffers by means
51 * of indexes into the circular buffer.
54 * The host/firmware share two index registers for managing the Rx buffers.
56 * The READ index maps to the first position that the firmware may be writing
57 * to -- the driver can read up to (but not including) this position and get
59 * The READ index is managed by the firmware once the card is enabled.
61 * The WRITE index maps to the last position the driver has read from -- the
62 * position preceding WRITE is the last slot the firmware can place a packet.
64 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
67 * During initialization, the host sets up the READ queue position to the first
68 * INDEX position, and WRITE to the last (READ - 1 wrapped)
70 * When the firmware places a packet in a buffer, it will advance the READ index
71 * and fire the RX interrupt. The driver can then query the READ index and
72 * process as many packets as possible, moving the WRITE index forward as it
73 * resets the Rx queue buffers with new memory.
75 * The management in the driver is as follows:
76 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
77 * When the interrupt handler is called, the request is processed.
78 * The page is either stolen - transferred to the upper layer
79 * or reused - added immediately to the iwl->rxq->rx_free list.
80 * + When the page is stolen - the driver updates the matching queue's used
81 * count, detaches the RBD and transfers it to the queue used list.
82 * When there are two used RBDs - they are transferred to the allocator empty
83 * list. Work is then scheduled for the allocator to start allocating
85 * When there are another 6 used RBDs - they are transferred to the allocator
86 * empty list and the driver tries to claim the pre-allocated buffers and
87 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * When there are 8+ buffers in the free list - either from allocation or from
90 * 8 reused unstolen pages - restock is called to update the FW and indexes.
91 * + In order to make sure the allocator always has RBDs to use for allocation
92 * the allocator has initial pool in the size of num_queues*(8-2) - the
93 * maximum missing RBDs per allocation request (request posted with 2
94 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
95 * The queues supplies the recycle of the rest of the RBDs.
96 * + A received packet is processed and handed to the kernel network stack,
97 * detached from the iwl->rxq. The driver 'processed' index is updated.
98 * + If there are no allocated buffers in iwl->rxq->rx_free,
99 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
100 * If there were enough free buffers and RX_STALLED is set it is cleared.
105 * iwl_rxq_alloc() Allocates rx_free
106 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
107 * iwl_pcie_rxq_restock.
108 * Used only during initialization.
109 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
110 * queue, updates firmware pointers, and updates
112 * iwl_pcie_rx_allocator() Background work for allocating pages.
114 * -- enable interrupts --
115 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
116 * READ INDEX, detaching the SKB from the pool.
117 * Moves the packet buffer from queue to rx_used.
118 * Posts and claims requests to the allocator.
119 * Calls iwl_pcie_rxq_restock to refill any empty
125 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 * Regular Receive interrupt:
129 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
130 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * rxq.queue -> rxq.rx_free -> rxq.queue
138 * iwl_rxq_space - Return number of free slots available in queue.
140 static int iwl_rxq_space(const struct iwl_rxq *rxq)
142 /* Make sure rx queue size is a power of 2 */
143 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
146 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
147 * between empty and completely full queues.
148 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
149 * defined for negative dividends.
151 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
155 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
157 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
159 return cpu_to_le32((u32)(dma_addr >> 8));
163 * iwl_pcie_rx_stop - stops the Rx DMA
165 int iwl_pcie_rx_stop(struct iwl_trans *trans)
167 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
168 /* TODO: remove this for 22560 once fw does it */
169 iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
170 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
171 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
172 } else if (trans->cfg->mq_rx_supported) {
173 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
174 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
175 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
177 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
178 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
179 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
185 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
187 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
192 lockdep_assert_held(&rxq->lock);
195 * explicitly wake up the NIC if:
196 * 1. shadow registers aren't enabled
197 * 2. there is a chance that the NIC is asleep
199 if (!trans->cfg->base_params->shadow_reg_enable &&
200 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
201 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
203 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
204 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
206 iwl_set_bit(trans, CSR_GP_CNTRL,
207 BIT(trans->cfg->csr->flag_mac_access_req));
208 rxq->need_update = true;
213 rxq->write_actual = round_down(rxq->write, 8);
214 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
215 iwl_write32(trans, HBUS_TARG_WRPTR,
217 ((FIRST_RX_QUEUE + rxq->id) << 16)));
218 else if (trans->cfg->mq_rx_supported)
219 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
222 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
225 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
227 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
230 for (i = 0; i < trans->num_rx_queues; i++) {
231 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
233 if (!rxq->need_update)
235 spin_lock(&rxq->lock);
236 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
237 rxq->need_update = false;
238 spin_unlock(&rxq->lock);
242 static void iwl_pcie_restock_bd(struct iwl_trans *trans,
244 struct iwl_rx_mem_buffer *rxb)
246 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
247 struct iwl_rx_transfer_desc *bd = rxq->bd;
249 bd[rxq->write].type_n_size =
250 cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
251 ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
252 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
253 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
255 __le64 *bd = rxq->bd;
257 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
262 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
264 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
267 struct iwl_rx_mem_buffer *rxb;
270 * If the device isn't enabled - no need to try to add buffers...
271 * This can happen when we stop the device and still have an interrupt
272 * pending. We stop the APM before we sync the interrupts because we
273 * have to (see comment there). On the other hand, since the APM is
274 * stopped, we cannot access the HW (in particular not prph).
275 * So don't try to restock if the APM has been already stopped.
277 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
280 spin_lock(&rxq->lock);
281 while (rxq->free_count) {
282 /* Get next free Rx buffer, remove from free list */
283 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
285 list_del(&rxb->list);
286 rxb->invalid = false;
287 /* 12 first bits are expected to be empty */
288 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
289 /* Point to Rx buffer via next RBD in circular buffer */
290 iwl_pcie_restock_bd(trans, rxq, rxb);
291 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
294 spin_unlock(&rxq->lock);
297 * If we've added more space for the firmware to place data, tell it.
298 * Increment device's write pointer in multiples of 8.
300 if (rxq->write_actual != (rxq->write & ~0x7)) {
301 spin_lock(&rxq->lock);
302 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
303 spin_unlock(&rxq->lock);
308 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
310 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
313 struct iwl_rx_mem_buffer *rxb;
316 * If the device isn't enabled - not need to try to add buffers...
317 * This can happen when we stop the device and still have an interrupt
318 * pending. We stop the APM before we sync the interrupts because we
319 * have to (see comment there). On the other hand, since the APM is
320 * stopped, we cannot access the HW (in particular not prph).
321 * So don't try to restock if the APM has been already stopped.
323 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
326 spin_lock(&rxq->lock);
327 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
328 __le32 *bd = (__le32 *)rxq->bd;
329 /* The overwritten rxb must be a used one */
330 rxb = rxq->queue[rxq->write];
331 BUG_ON(rxb && rxb->page);
333 /* Get next free Rx buffer, remove from free list */
334 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
336 list_del(&rxb->list);
337 rxb->invalid = false;
339 /* Point to Rx buffer via next RBD in circular buffer */
340 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
341 rxq->queue[rxq->write] = rxb;
342 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
345 spin_unlock(&rxq->lock);
347 /* If we've added more space for the firmware to place data, tell it.
348 * Increment device's write pointer in multiples of 8. */
349 if (rxq->write_actual != (rxq->write & ~0x7)) {
350 spin_lock(&rxq->lock);
351 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
352 spin_unlock(&rxq->lock);
357 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
359 * If there are slots in the RX queue that need to be restocked,
360 * and we have free pre-allocated buffers, fill the ranks as much
361 * as we can, pulling from rx_free.
363 * This moves the 'write' index forward to catch up with 'processed', and
364 * also updates the memory address in the firmware to reference the new
368 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
370 if (trans->cfg->mq_rx_supported)
371 iwl_pcie_rxmq_restock(trans, rxq);
373 iwl_pcie_rxsq_restock(trans, rxq);
377 * iwl_pcie_rx_alloc_page - allocates and returns a page.
380 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
383 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
385 gfp_t gfp_mask = priority;
387 if (trans_pcie->rx_page_order > 0)
388 gfp_mask |= __GFP_COMP;
390 /* Alloc a new receive buffer */
391 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
394 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
395 trans_pcie->rx_page_order);
397 * Issue an error if we don't have enough pre-allocated
400 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
402 "Failed to alloc_pages\n");
409 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
411 * A used RBD is an Rx buffer that has been given to the stack. To use it again
412 * a page must be allocated and the RBD must point to the page. This function
413 * doesn't change the HW pointer but handles the list of pages that is used by
414 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
417 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
420 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
421 struct iwl_rx_mem_buffer *rxb;
425 spin_lock(&rxq->lock);
426 if (list_empty(&rxq->rx_used)) {
427 spin_unlock(&rxq->lock);
430 spin_unlock(&rxq->lock);
432 /* Alloc a new receive buffer */
433 page = iwl_pcie_rx_alloc_page(trans, priority);
437 spin_lock(&rxq->lock);
439 if (list_empty(&rxq->rx_used)) {
440 spin_unlock(&rxq->lock);
441 __free_pages(page, trans_pcie->rx_page_order);
444 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
446 list_del(&rxb->list);
447 spin_unlock(&rxq->lock);
451 /* Get physical address of the RB */
453 dma_map_page(trans->dev, page, 0,
454 PAGE_SIZE << trans_pcie->rx_page_order,
456 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
458 spin_lock(&rxq->lock);
459 list_add(&rxb->list, &rxq->rx_used);
460 spin_unlock(&rxq->lock);
461 __free_pages(page, trans_pcie->rx_page_order);
465 spin_lock(&rxq->lock);
467 list_add_tail(&rxb->list, &rxq->rx_free);
470 spin_unlock(&rxq->lock);
474 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
476 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
479 for (i = 0; i < RX_POOL_SIZE; i++) {
480 if (!trans_pcie->rx_pool[i].page)
482 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
483 PAGE_SIZE << trans_pcie->rx_page_order,
485 __free_pages(trans_pcie->rx_pool[i].page,
486 trans_pcie->rx_page_order);
487 trans_pcie->rx_pool[i].page = NULL;
492 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
494 * Allocates for each received request 8 pages
495 * Called as a scheduled work item.
497 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
499 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
500 struct iwl_rb_allocator *rba = &trans_pcie->rba;
501 struct list_head local_empty;
502 int pending = atomic_xchg(&rba->req_pending, 0);
504 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
506 /* If we were scheduled - there is at least one request */
507 spin_lock(&rba->lock);
508 /* swap out the rba->rbd_empty to a local list */
509 list_replace_init(&rba->rbd_empty, &local_empty);
510 spin_unlock(&rba->lock);
514 LIST_HEAD(local_allocated);
515 gfp_t gfp_mask = GFP_KERNEL;
517 /* Do not post a warning if there are only a few requests */
518 if (pending < RX_PENDING_WATERMARK)
519 gfp_mask |= __GFP_NOWARN;
521 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
522 struct iwl_rx_mem_buffer *rxb;
525 /* List should never be empty - each reused RBD is
526 * returned to the list, and initial pool covers any
527 * possible gap between the time the page is allocated
528 * to the time the RBD is added.
530 BUG_ON(list_empty(&local_empty));
531 /* Get the first rxb from the rbd list */
532 rxb = list_first_entry(&local_empty,
533 struct iwl_rx_mem_buffer, list);
536 /* Alloc a new receive buffer */
537 page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
542 /* Get physical address of the RB */
543 rxb->page_dma = dma_map_page(trans->dev, page, 0,
544 PAGE_SIZE << trans_pcie->rx_page_order,
546 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
548 __free_pages(page, trans_pcie->rx_page_order);
552 /* move the allocated entry to the out list */
553 list_move(&rxb->list, &local_allocated);
559 pending = atomic_xchg(&rba->req_pending, 0);
561 "Pending allocation requests = %d\n",
565 spin_lock(&rba->lock);
566 /* add the allocated rbds to the allocator allocated list */
567 list_splice_tail(&local_allocated, &rba->rbd_allocated);
568 /* get more empty RBDs for current pending requests */
569 list_splice_tail_init(&rba->rbd_empty, &local_empty);
570 spin_unlock(&rba->lock);
572 atomic_inc(&rba->req_ready);
575 spin_lock(&rba->lock);
576 /* return unused rbds to the allocator empty list */
577 list_splice_tail(&local_empty, &rba->rbd_empty);
578 spin_unlock(&rba->lock);
582 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
584 .* Called by queue when the queue posted allocation request and
585 * has freed 8 RBDs in order to restock itself.
586 * This function directly moves the allocated RBs to the queue's ownership
587 * and updates the relevant counters.
589 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
592 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
593 struct iwl_rb_allocator *rba = &trans_pcie->rba;
596 lockdep_assert_held(&rxq->lock);
599 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
600 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
601 * function will return early, as there are no ready requests.
602 * atomic_dec_if_positive will perofrm the *actual* decrement only if
603 * req_ready > 0, i.e. - there are ready requests and the function
604 * hands one request to the caller.
606 if (atomic_dec_if_positive(&rba->req_ready) < 0)
609 spin_lock(&rba->lock);
610 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
611 /* Get next free Rx buffer, remove it from free list */
612 struct iwl_rx_mem_buffer *rxb =
613 list_first_entry(&rba->rbd_allocated,
614 struct iwl_rx_mem_buffer, list);
616 list_move(&rxb->list, &rxq->rx_free);
618 spin_unlock(&rba->lock);
620 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
621 rxq->free_count += RX_CLAIM_REQ_ALLOC;
624 void iwl_pcie_rx_allocator_work(struct work_struct *data)
626 struct iwl_rb_allocator *rba_p =
627 container_of(data, struct iwl_rb_allocator, rx_alloc);
628 struct iwl_trans_pcie *trans_pcie =
629 container_of(rba_p, struct iwl_trans_pcie, rba);
631 iwl_pcie_rx_allocator(trans_pcie->trans);
634 static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
636 struct iwl_rx_transfer_desc *rx_td;
639 return sizeof(*rx_td);
641 return trans->cfg->mq_rx_supported ? sizeof(__le64) :
645 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
648 struct device *dev = trans->dev;
649 bool use_rx_td = (trans->cfg->device_family >=
650 IWL_DEVICE_FAMILY_22560);
651 int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
654 dma_free_coherent(trans->dev,
655 free_size * rxq->queue_size,
656 rxq->bd, rxq->bd_dma);
661 dma_free_coherent(trans->dev,
662 use_rx_td ? sizeof(__le16) :
663 sizeof(struct iwl_rb_status),
664 rxq->rb_stts, rxq->rb_stts_dma);
665 rxq->rb_stts_dma = 0;
669 dma_free_coherent(trans->dev,
670 (use_rx_td ? sizeof(*rxq->cd) :
671 sizeof(__le32)) * rxq->queue_size,
672 rxq->used_bd, rxq->used_bd_dma);
673 rxq->used_bd_dma = 0;
676 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
680 dma_free_coherent(dev, sizeof(__le16),
681 rxq->tr_tail, rxq->tr_tail_dma);
682 rxq->tr_tail_dma = 0;
686 dma_free_coherent(dev, sizeof(__le16),
687 rxq->cr_tail, rxq->cr_tail_dma);
688 rxq->cr_tail_dma = 0;
692 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
695 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
696 struct device *dev = trans->dev;
699 bool use_rx_td = (trans->cfg->device_family >=
700 IWL_DEVICE_FAMILY_22560);
702 spin_lock_init(&rxq->lock);
703 if (trans->cfg->mq_rx_supported)
704 rxq->queue_size = MQ_RX_TABLE_SIZE;
706 rxq->queue_size = RX_QUEUE_SIZE;
708 free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
711 * Allocate the circular buffer of Read Buffer Descriptors
714 rxq->bd = dma_zalloc_coherent(dev,
715 free_size * rxq->queue_size,
716 &rxq->bd_dma, GFP_KERNEL);
720 if (trans->cfg->mq_rx_supported) {
721 rxq->used_bd = dma_zalloc_coherent(dev,
732 /* Allocate the driver's pointer to receive buffer status */
733 rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
735 sizeof(struct iwl_rb_status),
744 /* Allocate the driver's pointer to TR tail */
745 rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
751 /* Allocate the driver's pointer to CR tail */
752 rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
758 * W/A 22560 device step Z0 must be non zero bug
759 * TODO: remove this when stop supporting Z0
761 *rxq->cr_tail = cpu_to_le16(500);
766 for (i = 0; i < trans->num_rx_queues; i++) {
767 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
769 iwl_pcie_free_rxq_dma(trans, rxq);
771 kfree(trans_pcie->rxq);
776 int iwl_pcie_rx_alloc(struct iwl_trans *trans)
778 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
779 struct iwl_rb_allocator *rba = &trans_pcie->rba;
782 if (WARN_ON(trans_pcie->rxq))
785 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
787 if (!trans_pcie->rxq)
790 spin_lock_init(&rba->lock);
792 for (i = 0; i < trans->num_rx_queues; i++) {
793 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
795 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
802 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
804 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
807 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
809 switch (trans_pcie->rx_buf_size) {
811 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
814 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
817 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
821 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
824 if (!iwl_trans_grab_nic_access(trans, &flags))
828 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
829 /* reset and flush pointers */
830 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
831 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
832 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
834 /* Reset driver's Rx queue write index */
835 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
837 /* Tell device where to find RBD circular buffer in DRAM */
838 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
839 (u32)(rxq->bd_dma >> 8));
841 /* Tell device where in DRAM to update its Rx status */
842 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
843 rxq->rb_stts_dma >> 4);
846 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
847 * the credit mechanism in 5000 HW RX FIFO
848 * Direct rx interrupts to hosts
849 * Rx buffer size 4 or 8k or 12k
853 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
854 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
855 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
856 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
858 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
859 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
861 iwl_trans_release_nic_access(trans, &flags);
863 /* Set interrupt coalescing timer to default (2048 usecs) */
864 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
866 /* W/A for interrupt coalescing bug in 7260 and 3160 */
867 if (trans->cfg->host_interrupt_operation_mode)
868 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
871 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
873 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
876 if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
879 if (!trans->cfg->integrated)
883 * Turn on the chicken-bits that cause MAC wakeup for RX-related
885 * This costs some power, but needed for W/A 9000 integrated A-step
886 * bug where shadow registers are not in the retention list and their
887 * value is lost when NIC powers down
889 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
890 CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
891 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
892 CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
895 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
897 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
898 u32 rb_size, enabled = 0;
902 switch (trans_pcie->rx_buf_size) {
904 rb_size = RFH_RXF_DMA_RB_SIZE_2K;
907 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
910 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
913 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
917 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
920 if (!iwl_trans_grab_nic_access(trans, &flags))
924 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
925 /* disable free amd used rx queue operation */
926 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
928 for (i = 0; i < trans->num_rx_queues; i++) {
929 /* Tell device where to find RBD free table in DRAM */
930 iwl_write_prph64_no_grab(trans,
931 RFH_Q_FRBDCB_BA_LSB(i),
932 trans_pcie->rxq[i].bd_dma);
933 /* Tell device where to find RBD used table in DRAM */
934 iwl_write_prph64_no_grab(trans,
935 RFH_Q_URBDCB_BA_LSB(i),
936 trans_pcie->rxq[i].used_bd_dma);
937 /* Tell device where in DRAM to update its Rx status */
938 iwl_write_prph64_no_grab(trans,
939 RFH_Q_URBD_STTS_WPTR_LSB(i),
940 trans_pcie->rxq[i].rb_stts_dma);
941 /* Reset device indice tables */
942 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
943 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
944 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
946 enabled |= BIT(i) | BIT(i + 16);
951 * Rx buffer size 4 or 8k or 12k
953 * Drop frames that exceed RB size
956 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
957 RFH_DMA_EN_ENABLE_VAL | rb_size |
958 RFH_RXF_DMA_MIN_RB_4_8 |
959 RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
960 RFH_RXF_DMA_RBDCB_SIZE_512);
963 * Activate DMA snooping.
964 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
967 iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
968 RFH_GEN_CFG_RFH_DMA_SNOOP |
969 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
970 RFH_GEN_CFG_SERVICE_DMA_SNOOP |
971 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
972 trans->cfg->integrated ?
973 RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
974 RFH_GEN_CFG_RB_CHUNK_SIZE_128));
975 /* Enable the relevant rx queues */
976 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
978 iwl_trans_release_nic_access(trans, &flags);
980 /* Set interrupt coalescing timer to default (2048 usecs) */
981 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
983 iwl_pcie_enable_rx_wake(trans, true);
986 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
988 lockdep_assert_held(&rxq->lock);
990 INIT_LIST_HEAD(&rxq->rx_free);
991 INIT_LIST_HEAD(&rxq->rx_used);
996 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1002 int _iwl_pcie_rx_init(struct iwl_trans *trans)
1004 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1005 struct iwl_rxq *def_rxq;
1006 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1007 int i, err, queue_size, allocator_pool_size, num_alloc;
1009 if (!trans_pcie->rxq) {
1010 err = iwl_pcie_rx_alloc(trans);
1014 def_rxq = trans_pcie->rxq;
1016 cancel_work_sync(&rba->rx_alloc);
1018 spin_lock(&rba->lock);
1019 atomic_set(&rba->req_pending, 0);
1020 atomic_set(&rba->req_ready, 0);
1021 INIT_LIST_HEAD(&rba->rbd_allocated);
1022 INIT_LIST_HEAD(&rba->rbd_empty);
1023 spin_unlock(&rba->lock);
1025 /* free all first - we might be reconfigured for a different size */
1026 iwl_pcie_free_rbs_pool(trans);
1028 for (i = 0; i < RX_QUEUE_SIZE; i++)
1029 def_rxq->queue[i] = NULL;
1031 for (i = 0; i < trans->num_rx_queues; i++) {
1032 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1036 spin_lock(&rxq->lock);
1038 * Set read write pointer to reflect that we have processed
1039 * and used all buffers, but have not restocked the Rx queue
1040 * with fresh buffers
1044 rxq->write_actual = 0;
1045 memset(rxq->rb_stts, 0,
1046 (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
1047 sizeof(__le16) : sizeof(struct iwl_rb_status));
1049 iwl_pcie_rx_init_rxb_lists(rxq);
1051 if (!rxq->napi.poll)
1052 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1053 iwl_pcie_dummy_napi_poll, 64);
1055 spin_unlock(&rxq->lock);
1058 /* move the pool to the default queue and allocator ownerships */
1059 queue_size = trans->cfg->mq_rx_supported ?
1060 MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
1061 allocator_pool_size = trans->num_rx_queues *
1062 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1063 num_alloc = queue_size + allocator_pool_size;
1064 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
1065 ARRAY_SIZE(trans_pcie->rx_pool));
1066 for (i = 0; i < num_alloc; i++) {
1067 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1069 if (i < allocator_pool_size)
1070 list_add(&rxb->list, &rba->rbd_empty);
1072 list_add(&rxb->list, &def_rxq->rx_used);
1073 trans_pcie->global_table[i] = rxb;
1074 rxb->vid = (u16)(i + 1);
1075 rxb->invalid = true;
1078 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1083 int iwl_pcie_rx_init(struct iwl_trans *trans)
1085 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1086 int ret = _iwl_pcie_rx_init(trans);
1091 if (trans->cfg->mq_rx_supported)
1092 iwl_pcie_rx_mq_hw_init(trans);
1094 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1096 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1098 spin_lock(&trans_pcie->rxq->lock);
1099 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1100 spin_unlock(&trans_pcie->rxq->lock);
1105 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1107 /* Set interrupt coalescing timer to default (2048 usecs) */
1108 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1111 * We don't configure the RFH.
1112 * Restock will be done at alive, after firmware configured the RFH.
1114 return _iwl_pcie_rx_init(trans);
1117 void iwl_pcie_rx_free(struct iwl_trans *trans)
1119 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1120 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1124 * if rxq is NULL, it means that nothing has been allocated,
1127 if (!trans_pcie->rxq) {
1128 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1132 cancel_work_sync(&rba->rx_alloc);
1134 iwl_pcie_free_rbs_pool(trans);
1136 for (i = 0; i < trans->num_rx_queues; i++) {
1137 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1139 iwl_pcie_free_rxq_dma(trans, rxq);
1142 netif_napi_del(&rxq->napi);
1144 kfree(trans_pcie->rxq);
1147 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1148 struct iwl_rb_allocator *rba)
1150 spin_lock(&rba->lock);
1151 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1152 spin_unlock(&rba->lock);
1156 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1158 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1159 * When there are 2 empty RBDs - a request for allocation is posted
1161 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1162 struct iwl_rx_mem_buffer *rxb,
1163 struct iwl_rxq *rxq, bool emergency)
1165 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1166 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1168 /* Move the RBD to the used list, will be moved to allocator in batches
1169 * before claiming or posting a request*/
1170 list_add_tail(&rxb->list, &rxq->rx_used);
1172 if (unlikely(emergency))
1175 /* Count the allocator owned RBDs */
1178 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1179 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1180 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1181 * after but we still need to post another request.
1183 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1184 /* Move the 2 RBDs to the allocator ownership.
1185 Allocator has another 6 from pool for the request completion*/
1186 iwl_pcie_rx_move_to_allocator(rxq, rba);
1188 atomic_inc(&rba->req_pending);
1189 queue_work(rba->alloc_wq, &rba->rx_alloc);
1193 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1194 struct iwl_rxq *rxq,
1195 struct iwl_rx_mem_buffer *rxb,
1199 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1200 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1201 bool page_stolen = false;
1202 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1208 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1210 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1211 struct iwl_rx_packet *pkt;
1214 int index, cmd_index, len;
1215 struct iwl_rx_cmd_buffer rxcb = {
1217 ._rx_page_order = trans_pcie->rx_page_order,
1219 ._page_stolen = false,
1220 .truesize = max_len,
1223 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1224 rxcb.status = rxq->cd[i].status;
1226 pkt = rxb_addr(&rxcb);
1228 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1230 "Q %d: RB end marker at offset %d\n",
1235 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1236 FH_RSCSR_RXQ_POS != rxq->id,
1237 "frame on invalid queue - is on %d and indicates %d\n",
1239 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1243 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1245 iwl_get_cmd_string(trans,
1246 iwl_cmd_id(pkt->hdr.cmd,
1249 pkt->hdr.group_id, pkt->hdr.cmd,
1250 le16_to_cpu(pkt->hdr.sequence));
1252 len = iwl_rx_packet_len(pkt);
1253 len += sizeof(u32); /* account for status word */
1254 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1255 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1257 /* Reclaim a command buffer only if this packet is a response
1258 * to a (driver-originated) command.
1259 * If the packet (e.g. Rx frame) originated from uCode,
1260 * there is no command buffer to reclaim.
1261 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1262 * but apparently a few don't get set; catch them here. */
1263 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1264 if (reclaim && !pkt->hdr.group_id) {
1267 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1268 if (trans_pcie->no_reclaim_cmds[i] ==
1276 sequence = le16_to_cpu(pkt->hdr.sequence);
1277 index = SEQ_TO_INDEX(sequence);
1278 cmd_index = iwl_pcie_get_cmd_index(txq, index);
1280 if (rxq->id == trans_pcie->def_rx_queue)
1281 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1284 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1288 kzfree(txq->entries[cmd_index].free_buf);
1289 txq->entries[cmd_index].free_buf = NULL;
1293 * After here, we should always check rxcb._page_stolen,
1294 * if it is true then one of the handlers took the page.
1298 /* Invoke any callbacks, transfer the buffer to caller,
1299 * and fire off the (possibly) blocking
1300 * iwl_trans_send_cmd()
1301 * as we reclaim the driver command queue */
1302 if (!rxcb._page_stolen)
1303 iwl_pcie_hcmd_complete(trans, &rxcb);
1305 IWL_WARN(trans, "Claim null rxb?\n");
1308 page_stolen |= rxcb._page_stolen;
1309 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1311 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1314 /* page was stolen from us -- free our reference */
1316 __free_pages(rxb->page, trans_pcie->rx_page_order);
1320 /* Reuse the page if possible. For notification packets and
1321 * SKBs that fail to Rx correctly, add them back into the
1322 * rx_free list for reuse later. */
1323 if (rxb->page != NULL) {
1325 dma_map_page(trans->dev, rxb->page, 0,
1326 PAGE_SIZE << trans_pcie->rx_page_order,
1328 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1330 * free the page(s) as well to not break
1331 * the invariant that the items on the used
1332 * list have no page(s)
1334 __free_pages(rxb->page, trans_pcie->rx_page_order);
1336 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1338 list_add_tail(&rxb->list, &rxq->rx_free);
1342 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1345 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1346 struct iwl_rxq *rxq, int i)
1348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1349 struct iwl_rx_mem_buffer *rxb;
1352 if (!trans->cfg->mq_rx_supported) {
1353 rxb = rxq->queue[i];
1354 rxq->queue[i] = NULL;
1358 /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
1359 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1360 vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
1362 vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
1364 if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
1367 rxb = trans_pcie->global_table[vid - 1];
1371 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1372 rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
1374 rxb->invalid = true;
1379 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1380 iwl_force_nmi(trans);
1385 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1387 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1389 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1390 struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
1391 u32 r, i, count = 0;
1392 bool emergency = false;
1395 spin_lock(&rxq->lock);
1396 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1397 * buffer that the driver may process (last buffer filled by ucode). */
1398 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1401 /* W/A 9000 device step A0 wrap-around bug */
1402 r &= (rxq->queue_size - 1);
1404 /* Rx interrupt, but nothing sent from uCode */
1406 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1409 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1410 struct iwl_rx_mem_buffer *rxb;
1411 /* number of RBDs still waiting for page allocation */
1412 u32 rb_pending_alloc =
1413 atomic_read(&trans_pcie->rba.req_pending) *
1416 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1418 iwl_pcie_rx_move_to_allocator(rxq, rba);
1422 rxb = iwl_pcie_get_rxb(trans, rxq, i);
1426 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1427 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1429 i = (i + 1) & (rxq->queue_size - 1);
1432 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1433 * try to claim the pre-allocated buffers from the allocator.
1434 * If not ready - will try to reclaim next time.
1435 * There is no need to reschedule work - allocator exits only
1438 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1439 iwl_pcie_rx_allocator_get(trans, rxq);
1441 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1442 /* Add the remaining empty RBDs for allocator use */
1443 iwl_pcie_rx_move_to_allocator(rxq, rba);
1444 } else if (emergency) {
1448 if (rb_pending_alloc < rxq->queue_size / 3)
1452 spin_unlock(&rxq->lock);
1453 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1454 iwl_pcie_rxq_restock(trans, rxq);
1460 /* Backtrack one entry */
1462 /* update cr tail with the rxq read pointer */
1463 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1464 *rxq->cr_tail = cpu_to_le16(r);
1465 spin_unlock(&rxq->lock);
1468 * handle a case where in emergency there are some unallocated RBDs.
1469 * those RBDs are in the used list, but are not tracked by the queue's
1470 * used_count which counts allocator owned RBDs.
1471 * unallocated emergency RBDs must be allocated on exit, otherwise
1472 * when called again the function may not be in emergency mode and
1473 * they will be handed to the allocator with no tracking in the RBD
1474 * allocator counters, which will lead to them never being claimed back
1476 * by allocating them here, they are now in the queue free list, and
1477 * will be restocked by the next call of iwl_pcie_rxq_restock.
1479 if (unlikely(emergency && count))
1480 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1483 napi_gro_flush(&rxq->napi, false);
1485 iwl_pcie_rxq_restock(trans, rxq);
1488 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1490 u8 queue = entry->entry;
1491 struct msix_entry *entries = entry - queue;
1493 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1497 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1498 * This interrupt handler should be used with RSS queue only.
1500 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1502 struct msix_entry *entry = dev_id;
1503 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1504 struct iwl_trans *trans = trans_pcie->trans;
1506 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1508 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1511 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1514 iwl_pcie_rx_handle(trans, entry->entry);
1517 iwl_pcie_clear_irq(trans, entry);
1519 lock_map_release(&trans->sync_cmd_lockdep_map);
1525 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1527 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1529 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1532 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1533 if (trans->cfg->internal_wimax_coex &&
1534 !trans->cfg->apmg_not_supported &&
1535 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1536 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1537 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1538 APMG_PS_CTRL_VAL_RESET_REQ))) {
1539 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1540 iwl_op_mode_wimax_active(trans->op_mode);
1541 wake_up(&trans_pcie->wait_command_queue);
1545 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1546 if (!trans_pcie->txq[i])
1548 del_timer(&trans_pcie->txq[i]->stuck_timer);
1551 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1552 * before we wake up the command caller, to ensure a proper cleanup. */
1553 iwl_trans_fw_error(trans);
1555 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1556 wake_up(&trans_pcie->wait_command_queue);
1559 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1563 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1565 trace_iwlwifi_dev_irq(trans->dev);
1567 /* Discover which interrupts are active/pending */
1568 inta = iwl_read32(trans, CSR_INT);
1570 /* the thread will service interrupts and re-enable them */
1574 /* a device (PCI-E) page is 4096 bytes long */
1575 #define ICT_SHIFT 12
1576 #define ICT_SIZE (1 << ICT_SHIFT)
1577 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1579 /* interrupt handler using ict table, with this interrupt driver will
1580 * stop using INTA register to get device's interrupt, reading this register
1581 * is expensive, device will write interrupts in ICT dram table, increment
1582 * index then will fire interrupt to driver, driver will OR all ICT table
1583 * entries from current index up to table entry with 0 value. the result is
1584 * the interrupt we need to service, driver will set the entries back to 0 and
1587 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1589 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1594 trace_iwlwifi_dev_irq(trans->dev);
1596 /* Ignore interrupt if there's nothing in NIC to service.
1597 * This may be due to IRQ shared with another device,
1598 * or due to sporadic interrupts thrown from our NIC. */
1599 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1600 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1605 * Collect all entries up to the first 0, starting from ict_index;
1606 * note we already read at ict_index.
1610 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1611 trans_pcie->ict_index, read);
1612 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1613 trans_pcie->ict_index =
1614 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1616 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1617 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1621 /* We should not get this value, just ignore it. */
1622 if (val == 0xffffffff)
1626 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1627 * (bit 15 before shifting it to 31) to clear when using interrupt
1628 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1629 * so we use them to decide on the real state of the Rx bit.
1630 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1635 inta = (0xff & val) | ((0xff00 & val) << 16);
1639 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1641 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1642 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1643 bool hw_rfkill, prev, report;
1645 mutex_lock(&trans_pcie->mutex);
1646 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1647 hw_rfkill = iwl_is_rfkill_set(trans);
1649 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1650 set_bit(STATUS_RFKILL_HW, &trans->status);
1652 if (trans_pcie->opmode_down)
1655 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1657 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1658 hw_rfkill ? "disable radio" : "enable radio");
1660 isr_stats->rfkill++;
1663 iwl_trans_pcie_rf_kill(trans, report);
1664 mutex_unlock(&trans_pcie->mutex);
1667 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1669 IWL_DEBUG_RF_KILL(trans,
1670 "Rfkill while SYNC HCMD in flight\n");
1671 wake_up(&trans_pcie->wait_command_queue);
1673 clear_bit(STATUS_RFKILL_HW, &trans->status);
1674 if (trans_pcie->opmode_down)
1675 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1679 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1681 struct iwl_trans *trans = dev_id;
1682 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1683 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1687 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1689 spin_lock(&trans_pcie->irq_lock);
1691 /* dram interrupt table not set yet,
1692 * use legacy interrupt.
1694 if (likely(trans_pcie->use_ict))
1695 inta = iwl_pcie_int_cause_ict(trans);
1697 inta = iwl_pcie_int_cause_non_ict(trans);
1699 if (iwl_have_debug_level(IWL_DL_ISR)) {
1700 IWL_DEBUG_ISR(trans,
1701 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1702 inta, trans_pcie->inta_mask,
1703 iwl_read32(trans, CSR_INT_MASK),
1704 iwl_read32(trans, CSR_FH_INT_STATUS));
1705 if (inta & (~trans_pcie->inta_mask))
1706 IWL_DEBUG_ISR(trans,
1707 "We got a masked interrupt (0x%08x)\n",
1708 inta & (~trans_pcie->inta_mask));
1711 inta &= trans_pcie->inta_mask;
1714 * Ignore interrupt if there's nothing in NIC to service.
1715 * This may be due to IRQ shared with another device,
1716 * or due to sporadic interrupts thrown from our NIC.
1718 if (unlikely(!inta)) {
1719 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1721 * Re-enable interrupts here since we don't
1722 * have anything to service
1724 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1725 _iwl_enable_interrupts(trans);
1726 spin_unlock(&trans_pcie->irq_lock);
1727 lock_map_release(&trans->sync_cmd_lockdep_map);
1731 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1733 * Hardware disappeared. It might have
1734 * already raised an interrupt.
1736 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1737 spin_unlock(&trans_pcie->irq_lock);
1741 /* Ack/clear/reset pending uCode interrupts.
1742 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1744 /* There is a hardware bug in the interrupt mask function that some
1745 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1746 * they are disabled in the CSR_INT_MASK register. Furthermore the
1747 * ICT interrupt handling mechanism has another bug that might cause
1748 * these unmasked interrupts fail to be detected. We workaround the
1749 * hardware bugs here by ACKing all the possible interrupts so that
1750 * interrupt coalescing can still be achieved.
1752 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1754 if (iwl_have_debug_level(IWL_DL_ISR))
1755 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1756 inta, iwl_read32(trans, CSR_INT_MASK));
1758 spin_unlock(&trans_pcie->irq_lock);
1760 /* Now service all interrupt bits discovered above. */
1761 if (inta & CSR_INT_BIT_HW_ERR) {
1762 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1764 /* Tell the device to stop sending interrupts */
1765 iwl_disable_interrupts(trans);
1768 iwl_pcie_irq_handle_error(trans);
1770 handled |= CSR_INT_BIT_HW_ERR;
1775 if (iwl_have_debug_level(IWL_DL_ISR)) {
1776 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1777 if (inta & CSR_INT_BIT_SCD) {
1778 IWL_DEBUG_ISR(trans,
1779 "Scheduler finished to transmit the frame/frames.\n");
1783 /* Alive notification via Rx interrupt will do the real work */
1784 if (inta & CSR_INT_BIT_ALIVE) {
1785 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1787 if (trans->cfg->gen2) {
1789 * We can restock, since firmware configured
1792 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1797 /* Safely ignore these bits for debug checks below */
1798 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1800 /* HW RF KILL switch toggled */
1801 if (inta & CSR_INT_BIT_RF_KILL) {
1802 iwl_pcie_handle_rfkill_irq(trans);
1803 handled |= CSR_INT_BIT_RF_KILL;
1806 /* Chip got too hot and stopped itself */
1807 if (inta & CSR_INT_BIT_CT_KILL) {
1808 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1809 isr_stats->ctkill++;
1810 handled |= CSR_INT_BIT_CT_KILL;
1813 /* Error detected by uCode */
1814 if (inta & CSR_INT_BIT_SW_ERR) {
1815 IWL_ERR(trans, "Microcode SW error detected. "
1816 " Restarting 0x%X.\n", inta);
1818 iwl_pcie_irq_handle_error(trans);
1819 handled |= CSR_INT_BIT_SW_ERR;
1822 /* uCode wakes up after power-down sleep */
1823 if (inta & CSR_INT_BIT_WAKEUP) {
1824 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1825 iwl_pcie_rxq_check_wrptr(trans);
1826 iwl_pcie_txq_check_wrptrs(trans);
1828 isr_stats->wakeup++;
1830 handled |= CSR_INT_BIT_WAKEUP;
1833 /* All uCode command responses, including Tx command responses,
1834 * Rx "responses" (frame-received notification), and other
1835 * notifications from uCode come through here*/
1836 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1837 CSR_INT_BIT_RX_PERIODIC)) {
1838 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1839 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1840 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1841 iwl_write32(trans, CSR_FH_INT_STATUS,
1842 CSR_FH_INT_RX_MASK);
1844 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1845 handled |= CSR_INT_BIT_RX_PERIODIC;
1847 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1849 /* Sending RX interrupt require many steps to be done in the
1851 * 1- write interrupt to current index in ICT table.
1853 * 3- update RX shared data to indicate last write index.
1854 * 4- send interrupt.
1855 * This could lead to RX race, driver could receive RX interrupt
1856 * but the shared data changes does not reflect this;
1857 * periodic interrupt will detect any dangling Rx activity.
1860 /* Disable periodic interrupt; we use it as just a one-shot. */
1861 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1862 CSR_INT_PERIODIC_DIS);
1865 * Enable periodic interrupt in 8 msec only if we received
1866 * real RX interrupt (instead of just periodic int), to catch
1867 * any dangling Rx interrupt. If it was just the periodic
1868 * interrupt, there was no dangling Rx activity, and no need
1869 * to extend the periodic interrupt; one-shot is enough.
1871 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1872 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1873 CSR_INT_PERIODIC_ENA);
1878 iwl_pcie_rx_handle(trans, 0);
1882 /* This "Tx" DMA channel is used only for loading uCode */
1883 if (inta & CSR_INT_BIT_FH_TX) {
1884 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1885 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1887 handled |= CSR_INT_BIT_FH_TX;
1888 /* Wake up uCode load routine, now that load is complete */
1889 trans_pcie->ucode_write_complete = true;
1890 wake_up(&trans_pcie->ucode_write_waitq);
1893 if (inta & ~handled) {
1894 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1895 isr_stats->unhandled++;
1898 if (inta & ~(trans_pcie->inta_mask)) {
1899 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1900 inta & ~trans_pcie->inta_mask);
1903 spin_lock(&trans_pcie->irq_lock);
1904 /* only Re-enable all interrupt if disabled by irq */
1905 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1906 _iwl_enable_interrupts(trans);
1907 /* we are loading the firmware, enable FH_TX interrupt only */
1908 else if (handled & CSR_INT_BIT_FH_TX)
1909 iwl_enable_fw_load_int(trans);
1910 /* Re-enable RF_KILL if it occurred */
1911 else if (handled & CSR_INT_BIT_RF_KILL)
1912 iwl_enable_rfkill_int(trans);
1913 spin_unlock(&trans_pcie->irq_lock);
1916 lock_map_release(&trans->sync_cmd_lockdep_map);
1920 /******************************************************************************
1924 ******************************************************************************/
1926 /* Free dram table */
1927 void iwl_pcie_free_ict(struct iwl_trans *trans)
1929 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1931 if (trans_pcie->ict_tbl) {
1932 dma_free_coherent(trans->dev, ICT_SIZE,
1933 trans_pcie->ict_tbl,
1934 trans_pcie->ict_tbl_dma);
1935 trans_pcie->ict_tbl = NULL;
1936 trans_pcie->ict_tbl_dma = 0;
1941 * allocate dram shared table, it is an aligned memory
1942 * block of ICT_SIZE.
1943 * also reset all data related to ICT table interrupt.
1945 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
1947 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1949 trans_pcie->ict_tbl =
1950 dma_zalloc_coherent(trans->dev, ICT_SIZE,
1951 &trans_pcie->ict_tbl_dma,
1953 if (!trans_pcie->ict_tbl)
1956 /* just an API sanity check ... it is guaranteed to be aligned */
1957 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
1958 iwl_pcie_free_ict(trans);
1965 /* Device is going up inform it about using ICT interrupt table,
1966 * also we need to tell the driver to start using ICT interrupt.
1968 void iwl_pcie_reset_ict(struct iwl_trans *trans)
1970 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1973 if (!trans_pcie->ict_tbl)
1976 spin_lock(&trans_pcie->irq_lock);
1977 _iwl_disable_interrupts(trans);
1979 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
1981 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
1983 val |= CSR_DRAM_INT_TBL_ENABLE |
1984 CSR_DRAM_INIT_TBL_WRAP_CHECK |
1985 CSR_DRAM_INIT_TBL_WRITE_POINTER;
1987 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
1989 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
1990 trans_pcie->use_ict = true;
1991 trans_pcie->ict_index = 0;
1992 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
1993 _iwl_enable_interrupts(trans);
1994 spin_unlock(&trans_pcie->irq_lock);
1997 /* Device is going down disable ict interrupt usage */
1998 void iwl_pcie_disable_ict(struct iwl_trans *trans)
2000 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2002 spin_lock(&trans_pcie->irq_lock);
2003 trans_pcie->use_ict = false;
2004 spin_unlock(&trans_pcie->irq_lock);
2007 irqreturn_t iwl_pcie_isr(int irq, void *data)
2009 struct iwl_trans *trans = data;
2014 /* Disable (but don't clear!) interrupts here to avoid
2015 * back-to-back ISRs and sporadic interrupts from our NIC.
2016 * If we have something to service, the tasklet will re-enable ints.
2017 * If we *don't* have something, we'll re-enable before leaving here.
2019 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2021 return IRQ_WAKE_THREAD;
2024 irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2026 return IRQ_WAKE_THREAD;
2029 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2031 struct msix_entry *entry = dev_id;
2032 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2033 struct iwl_trans *trans = trans_pcie->trans;
2034 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2035 u32 inta_fh, inta_hw;
2037 lock_map_acquire(&trans->sync_cmd_lockdep_map);
2039 spin_lock(&trans_pcie->irq_lock);
2040 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2041 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2043 * Clear causes registers to avoid being handling the same cause.
2045 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
2046 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2047 spin_unlock(&trans_pcie->irq_lock);
2049 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2051 if (unlikely(!(inta_fh | inta_hw))) {
2052 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2053 lock_map_release(&trans->sync_cmd_lockdep_map);
2057 if (iwl_have_debug_level(IWL_DL_ISR))
2058 IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
2060 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2062 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2063 inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2065 iwl_pcie_rx_handle(trans, 0);
2069 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2070 inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2072 iwl_pcie_rx_handle(trans, 1);
2076 /* This "Tx" DMA channel is used only for loading uCode */
2077 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2078 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2081 * Wake up uCode load routine,
2082 * now that load is complete
2084 trans_pcie->ucode_write_complete = true;
2085 wake_up(&trans_pcie->ucode_write_waitq);
2088 /* Error detected by uCode */
2089 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
2090 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
2091 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
2093 "Microcode SW error detected. Restarting 0x%X.\n",
2096 iwl_pcie_irq_handle_error(trans);
2099 /* After checking FH register check HW register */
2100 if (iwl_have_debug_level(IWL_DL_ISR))
2101 IWL_DEBUG_ISR(trans,
2102 "ISR inta_hw 0x%08x, enabled 0x%08x\n",
2104 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2106 /* Alive notification via Rx interrupt will do the real work */
2107 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2108 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2110 if (trans->cfg->gen2) {
2111 /* We can restock, since firmware configured the RFH */
2112 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2116 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
2117 inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
2118 /* Reflect IML transfer status */
2119 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2121 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2122 if (res == IWL_IMAGE_RESP_FAIL) {
2124 iwl_pcie_irq_handle_error(trans);
2126 } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2127 /* uCode wakes up after power-down sleep */
2128 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2129 iwl_pcie_rxq_check_wrptr(trans);
2130 iwl_pcie_txq_check_wrptrs(trans);
2132 isr_stats->wakeup++;
2135 /* Chip got too hot and stopped itself */
2136 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2137 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2138 isr_stats->ctkill++;
2141 /* HW RF KILL switch toggled */
2142 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2143 iwl_pcie_handle_rfkill_irq(trans);
2145 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2147 "Hardware error detected. Restarting.\n");
2150 iwl_pcie_irq_handle_error(trans);
2153 iwl_pcie_clear_irq(trans, entry);
2155 lock_map_release(&trans->sync_cmd_lockdep_map);