2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/prefetch.h>
46 #include "firmware_exports.h"
47 #include "cxgb3_offload.h"
51 #define SGE_RX_SM_BUF_SIZE 1536
53 #define SGE_RX_COPY_THRES 256
54 #define SGE_RX_PULL_LEN 128
56 #define SGE_PG_RSVD SMP_CACHE_BYTES
58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
62 #define FL0_PG_CHUNK_SIZE 2048
63 #define FL0_PG_ORDER 0
64 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65 #define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67 #define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
69 #define SGE_RX_DROP_THRES 16
70 #define RX_RECLAIM_PERIOD (HZ/4)
73 * Max number of Rx buffers we replenish at a time.
75 #define MAX_RX_REFILL 16U
77 * Period of the Tx buffer reclaim timer. This timer does not need to run
78 * frequently as Tx buffers are usually reclaimed by new Tx packets.
80 #define TX_RECLAIM_PERIOD (HZ / 4)
81 #define TX_RECLAIM_TIMER_CHUNK 64U
82 #define TX_RECLAIM_CHUNK 16U
84 /* WR size in bytes */
85 #define WR_LEN (WR_FLITS * 8)
88 * Types of Tx queues in each queue set. Order here matters, do not change.
90 enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
92 /* Values for sge_txq.flags */
94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
99 __be64 flit[TX_DESC_FLITS];
109 struct tx_sw_desc { /* SW state per Tx descriptor */
111 u8 eop; /* set if last descriptor for packet */
112 u8 addr_idx; /* buffer index of first SGL entry in descriptor */
113 u8 fragidx; /* first page fragment associated with descriptor */
114 s8 sflit; /* start flit of first SGL entry in descriptor */
117 struct rx_sw_desc { /* SW state per Rx descriptor */
120 struct fl_pg_chunk pg_chunk;
122 DEFINE_DMA_UNMAP_ADDR(dma_addr);
125 struct rsp_desc { /* response queue descriptor */
126 struct rss_header rss_hdr;
134 * Holds unmapping information for Tx packets that need deferred unmapping.
135 * This structure lives at skb->head and must be allocated by callers.
137 struct deferred_unmap_info {
138 struct pci_dev *pdev;
139 dma_addr_t addr[MAX_SKB_FRAGS + 1];
143 * Maps a number of flits to the number of Tx descriptors that can hold them.
146 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
148 * HW allows up to 4 descriptors to be combined into a WR.
150 static u8 flit_desc_map[] = {
152 #if SGE_NUM_GENBITS == 1
153 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157 #elif SGE_NUM_GENBITS == 2
158 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
163 # error "SGE_NUM_GENBITS must be 1 or 2"
167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
169 return container_of(q, struct sge_qset, fl[qidx]);
172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
174 return container_of(q, struct sge_qset, rspq);
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
179 return container_of(q, struct sge_qset, txq[qidx]);
183 * refill_rspq - replenish an SGE response queue
184 * @adapter: the adapter
185 * @q: the response queue to replenish
186 * @credits: how many new responses to make available
188 * Replenishes a response queue by making the supplied number of responses
191 static inline void refill_rspq(struct adapter *adapter,
192 const struct sge_rspq *q, unsigned int credits)
195 t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
200 * need_skb_unmap - does the platform need unmapping of sk_buffs?
202 * Returns true if the platform needs sk_buff unmapping. The compiler
203 * optimizes away unnecessary code if this returns true.
205 static inline int need_skb_unmap(void)
207 #ifdef CONFIG_NEED_DMA_MAP_STATE
215 * unmap_skb - unmap a packet main body and its page fragments
217 * @q: the Tx queue containing Tx descriptors for the packet
218 * @cidx: index of Tx descriptor
219 * @pdev: the PCI device
221 * Unmap the main body of an sk_buff and its page fragments, if any.
222 * Because of the fairly complicated structure of our SGLs and the desire
223 * to conserve space for metadata, the information necessary to unmap an
224 * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225 * descriptors (the physical addresses of the various data buffers), and
226 * the SW descriptor state (assorted indices). The send functions
227 * initialize the indices for the first packet descriptor so we can unmap
228 * the buffers held in the first Tx descriptor here, and we have enough
229 * information at this point to set the state for the next Tx descriptor.
231 * Note that it is possible to clean up the first descriptor of a packet
232 * before the send routines have written the next descriptors, but this
233 * race does not cause any problem. We just end up writing the unmapping
234 * info for the descriptor first.
236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237 unsigned int cidx, struct pci_dev *pdev)
239 const struct sg_ent *sgp;
240 struct tx_sw_desc *d = &q->sdesc[cidx];
241 int nfrags, frag_idx, curflit, j = d->addr_idx;
243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244 frag_idx = d->fragidx;
246 if (frag_idx == 0 && skb_headlen(skb)) {
247 dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]),
248 skb_headlen(skb), DMA_TO_DEVICE);
252 curflit = d->sflit + 1 + j;
253 nfrags = skb_shinfo(skb)->nr_frags;
255 while (frag_idx < nfrags && curflit < WR_FLITS) {
256 dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]),
257 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
268 if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
269 d = cidx + 1 == q->size ? q->sdesc : d + 1;
270 d->fragidx = frag_idx;
272 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
277 * free_tx_desc - reclaims Tx descriptors and their buffers
278 * @adapter: the adapter
279 * @q: the Tx queue to reclaim descriptors from
280 * @n: the number of descriptors to reclaim
282 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283 * Tx buffers. Called with the Tx queue lock held.
285 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
288 struct tx_sw_desc *d;
289 struct pci_dev *pdev = adapter->pdev;
290 unsigned int cidx = q->cidx;
292 const int need_unmap = need_skb_unmap() &&
293 q->cntxt_id >= FW_TUNNEL_SGEEC_START;
297 if (d->skb) { /* an SGL is present */
299 unmap_skb(d->skb, q, cidx, pdev);
301 dev_consume_skb_any(d->skb);
306 if (++cidx == q->size) {
315 * reclaim_completed_tx - reclaims completed Tx descriptors
316 * @adapter: the adapter
317 * @q: the Tx queue to reclaim completed descriptors from
318 * @chunk: maximum number of descriptors to reclaim
320 * Reclaims Tx descriptors that the SGE has indicated it has processed,
321 * and frees the associated buffers if possible. Called with the Tx
324 static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
328 unsigned int reclaim = q->processed - q->cleaned;
330 reclaim = min(chunk, reclaim);
332 free_tx_desc(adapter, q, reclaim);
333 q->cleaned += reclaim;
334 q->in_use -= reclaim;
336 return q->processed - q->cleaned;
340 * should_restart_tx - are there enough resources to restart a Tx queue?
343 * Checks if there are enough descriptors to restart a suspended Tx queue.
345 static inline int should_restart_tx(const struct sge_txq *q)
347 unsigned int r = q->processed - q->cleaned;
349 return q->in_use - r < (q->size >> 1);
352 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353 struct rx_sw_desc *d)
355 if (q->use_pages && d->pg_chunk.page) {
356 (*d->pg_chunk.p_cnt)--;
357 if (!*d->pg_chunk.p_cnt)
358 dma_unmap_page(&pdev->dev, d->pg_chunk.mapping,
359 q->alloc_size, DMA_FROM_DEVICE);
361 put_page(d->pg_chunk.page);
362 d->pg_chunk.page = NULL;
364 dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr),
365 q->buf_size, DMA_FROM_DEVICE);
372 * free_rx_bufs - free the Rx buffers on an SGE free list
373 * @pdev: the PCI device associated with the adapter
374 * @q: the SGE free list to clean up
376 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
377 * this queue should be stopped before calling this function.
379 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381 unsigned int cidx = q->cidx;
383 while (q->credits--) {
384 struct rx_sw_desc *d = &q->sdesc[cidx];
387 clear_rx_desc(pdev, q, d);
388 if (++cidx == q->size)
392 if (q->pg_chunk.page) {
393 __free_pages(q->pg_chunk.page, q->order);
394 q->pg_chunk.page = NULL;
399 * add_one_rx_buf - add a packet buffer to a free-buffer list
400 * @va: buffer start VA
401 * @len: the buffer length
402 * @d: the HW Rx descriptor to write
403 * @sd: the SW Rx descriptor to write
404 * @gen: the generation bit value
405 * @pdev: the PCI device associated with the adapter
407 * Add a buffer of the given length to the supplied HW and SW Rx
410 static inline int add_one_rx_buf(void *va, unsigned int len,
411 struct rx_desc *d, struct rx_sw_desc *sd,
412 unsigned int gen, struct pci_dev *pdev)
416 mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE);
417 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
420 dma_unmap_addr_set(sd, dma_addr, mapping);
422 d->addr_lo = cpu_to_be32(mapping);
423 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
425 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
426 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
430 static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
433 d->addr_lo = cpu_to_be32(mapping);
434 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
436 d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
437 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
441 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
442 struct rx_sw_desc *sd, gfp_t gfp,
445 if (!q->pg_chunk.page) {
448 q->pg_chunk.page = alloc_pages(gfp, order);
449 if (unlikely(!q->pg_chunk.page))
451 q->pg_chunk.va = page_address(q->pg_chunk.page);
452 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
454 q->pg_chunk.offset = 0;
455 mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page,
456 0, q->alloc_size, DMA_FROM_DEVICE);
457 if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) {
458 __free_pages(q->pg_chunk.page, order);
459 q->pg_chunk.page = NULL;
462 q->pg_chunk.mapping = mapping;
464 sd->pg_chunk = q->pg_chunk;
466 prefetch(sd->pg_chunk.p_cnt);
468 q->pg_chunk.offset += q->buf_size;
469 if (q->pg_chunk.offset == (PAGE_SIZE << order))
470 q->pg_chunk.page = NULL;
472 q->pg_chunk.va += q->buf_size;
473 get_page(q->pg_chunk.page);
476 if (sd->pg_chunk.offset == 0)
477 *sd->pg_chunk.p_cnt = 1;
479 *sd->pg_chunk.p_cnt += 1;
484 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
486 if (q->pend_cred >= q->credits / 4) {
489 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
494 * refill_fl - refill an SGE free-buffer list
496 * @q: the free-list to refill
497 * @n: the number of new buffers to allocate
498 * @gfp: the gfp flags for allocating new buffers
500 * (Re)populate an SGE free-buffer list with up to @n new packet buffers,
501 * allocated with the supplied gfp flags. The caller must assure that
502 * @n does not exceed the queue's capacity.
504 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
506 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
507 struct rx_desc *d = &q->desc[q->pidx];
508 unsigned int count = 0;
515 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
517 nomem: q->alloc_failed++;
520 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
521 dma_unmap_addr_set(sd, dma_addr, mapping);
523 add_one_rx_chunk(mapping, d, q->gen);
524 dma_sync_single_for_device(&adap->pdev->dev, mapping,
525 q->buf_size - SGE_PG_RSVD,
530 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
535 buf_start = skb->data;
536 err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
539 clear_rx_desc(adap->pdev, q, sd);
546 if (++q->pidx == q->size) {
556 q->pend_cred += count;
562 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
564 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
565 GFP_ATOMIC | __GFP_COMP);
569 * recycle_rx_buf - recycle a receive buffer
571 * @q: the SGE free list
572 * @idx: index of buffer to recycle
574 * Recycles the specified buffer on the given free list by adding it at
575 * the next available slot on the list.
577 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
580 struct rx_desc *from = &q->desc[idx];
581 struct rx_desc *to = &q->desc[q->pidx];
583 q->sdesc[q->pidx] = q->sdesc[idx];
584 to->addr_lo = from->addr_lo; /* already big endian */
585 to->addr_hi = from->addr_hi; /* likewise */
587 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
588 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
590 if (++q->pidx == q->size) {
601 * alloc_ring - allocate resources for an SGE descriptor ring
602 * @pdev: the PCI device
603 * @nelem: the number of descriptors
604 * @elem_size: the size of each descriptor
605 * @sw_size: the size of the SW state associated with each ring element
606 * @phys: the physical address of the allocated ring
607 * @metadata: address of the array holding the SW state for the ring
609 * Allocates resources for an SGE descriptor ring, such as Tx queues,
610 * free buffer lists, or response queues. Each SGE ring requires
611 * space for its HW descriptors plus, optionally, space for the SW state
612 * associated with each HW entry (the metadata). The function returns
613 * three values: the virtual address for the HW ring (the return value
614 * of the function), the physical address of the HW ring, and the address
617 static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
618 size_t sw_size, dma_addr_t * phys, void *metadata)
620 size_t len = nelem * elem_size;
622 void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
626 if (sw_size && metadata) {
627 s = kcalloc(nelem, sw_size, GFP_KERNEL);
630 dma_free_coherent(&pdev->dev, len, p, *phys);
633 *(void **)metadata = s;
639 * t3_reset_qset - reset a sge qset
642 * Reset the qset structure.
643 * the NAPI structure is preserved in the event of
644 * the qset's reincarnation, for example during EEH recovery.
646 static void t3_reset_qset(struct sge_qset *q)
649 !(q->adap->flags & NAPI_INIT)) {
650 memset(q, 0, sizeof(*q));
655 memset(&q->rspq, 0, sizeof(q->rspq));
656 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
657 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
659 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
660 q->rx_reclaim_timer.function = NULL;
662 napi_free_frags(&q->napi);
667 * t3_free_qset - free the resources of an SGE queue set
668 * @adapter: the adapter owning the queue set
671 * Release the HW and SW resources associated with an SGE queue set, such
672 * as HW contexts, packet buffers, and descriptor rings. Traffic to the
673 * queue set must be quiesced prior to calling this.
675 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
678 struct pci_dev *pdev = adapter->pdev;
680 for (i = 0; i < SGE_RXQ_PER_SET; ++i)
682 spin_lock_irq(&adapter->sge.reg_lock);
683 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
684 spin_unlock_irq(&adapter->sge.reg_lock);
685 free_rx_bufs(pdev, &q->fl[i]);
686 kfree(q->fl[i].sdesc);
687 dma_free_coherent(&pdev->dev,
689 sizeof(struct rx_desc), q->fl[i].desc,
693 for (i = 0; i < SGE_TXQ_PER_SET; ++i)
694 if (q->txq[i].desc) {
695 spin_lock_irq(&adapter->sge.reg_lock);
696 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
697 spin_unlock_irq(&adapter->sge.reg_lock);
698 if (q->txq[i].sdesc) {
699 free_tx_desc(adapter, &q->txq[i],
701 kfree(q->txq[i].sdesc);
703 dma_free_coherent(&pdev->dev,
705 sizeof(struct tx_desc),
706 q->txq[i].desc, q->txq[i].phys_addr);
707 __skb_queue_purge(&q->txq[i].sendq);
711 spin_lock_irq(&adapter->sge.reg_lock);
712 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
713 spin_unlock_irq(&adapter->sge.reg_lock);
714 dma_free_coherent(&pdev->dev,
715 q->rspq.size * sizeof(struct rsp_desc),
716 q->rspq.desc, q->rspq.phys_addr);
723 * init_qset_cntxt - initialize an SGE queue set context info
725 * @id: the queue set id
727 * Initializes the TIDs and context ids for the queues of a queue set.
729 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
731 qs->rspq.cntxt_id = id;
732 qs->fl[0].cntxt_id = 2 * id;
733 qs->fl[1].cntxt_id = 2 * id + 1;
734 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
735 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
736 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
737 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
738 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
742 * sgl_len - calculates the size of an SGL of the given capacity
743 * @n: the number of SGL entries
745 * Calculates the number of flits needed for a scatter/gather list that
746 * can hold the given number of entries.
748 static inline unsigned int sgl_len(unsigned int n)
750 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */
751 return (3 * n) / 2 + (n & 1);
755 * flits_to_desc - returns the num of Tx descriptors for the given flits
756 * @n: the number of flits
758 * Calculates the number of Tx descriptors needed for the supplied number
761 static inline unsigned int flits_to_desc(unsigned int n)
763 BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
764 return flit_desc_map[n];
768 * get_packet - return the next ingress packet buffer from a free list
769 * @adap: the adapter that received the packet
770 * @fl: the SGE free list holding the packet
771 * @len: the packet length including any SGE padding
772 * @drop_thres: # of remaining buffers before we start dropping packets
774 * Get the next packet from a free list and complete setup of the
775 * sk_buff. If the packet is small we make a copy and recycle the
776 * original buffer, otherwise we use the original buffer itself. If a
777 * positive drop threshold is supplied packets are dropped and their
778 * buffers recycled if (a) the number of remaining buffers is under the
779 * threshold and the packet is too big to copy, or (b) the packet should
780 * be copied but there is no memory for the copy.
782 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
783 unsigned int len, unsigned int drop_thres)
785 struct sk_buff *skb = NULL;
786 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
788 prefetch(sd->skb->data);
791 if (len <= SGE_RX_COPY_THRES) {
792 skb = alloc_skb(len, GFP_ATOMIC);
793 if (likely(skb != NULL)) {
795 dma_sync_single_for_cpu(&adap->pdev->dev,
796 dma_unmap_addr(sd, dma_addr),
797 len, DMA_FROM_DEVICE);
798 memcpy(skb->data, sd->skb->data, len);
799 dma_sync_single_for_device(&adap->pdev->dev,
800 dma_unmap_addr(sd, dma_addr),
801 len, DMA_FROM_DEVICE);
802 } else if (!drop_thres)
805 recycle_rx_buf(adap, fl, fl->cidx);
809 if (unlikely(fl->credits < drop_thres) &&
810 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
811 GFP_ATOMIC | __GFP_COMP) == 0)
815 dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr),
816 fl->buf_size, DMA_FROM_DEVICE);
819 __refill_fl(adap, fl);
824 * get_packet_pg - return the next ingress packet buffer from a free list
825 * @adap: the adapter that received the packet
826 * @fl: the SGE free list holding the packet
828 * @len: the packet length including any SGE padding
829 * @drop_thres: # of remaining buffers before we start dropping packets
831 * Get the next packet from a free list populated with page chunks.
832 * If the packet is small we make a copy and recycle the original buffer,
833 * otherwise we attach the original buffer as a page fragment to a fresh
834 * sk_buff. If a positive drop threshold is supplied packets are dropped
835 * and their buffers recycled if (a) the number of remaining buffers is
836 * under the threshold and the packet is too big to copy, or (b) there's
839 * Note: this function is similar to @get_packet but deals with Rx buffers
840 * that are page chunks rather than sk_buffs.
842 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
843 struct sge_rspq *q, unsigned int len,
844 unsigned int drop_thres)
846 struct sk_buff *newskb, *skb;
847 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
849 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
851 newskb = skb = q->pg_skb;
852 if (!skb && (len <= SGE_RX_COPY_THRES)) {
853 newskb = alloc_skb(len, GFP_ATOMIC);
854 if (likely(newskb != NULL)) {
855 __skb_put(newskb, len);
856 dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr,
857 len, DMA_FROM_DEVICE);
858 memcpy(newskb->data, sd->pg_chunk.va, len);
859 dma_sync_single_for_device(&adap->pdev->dev, dma_addr,
860 len, DMA_FROM_DEVICE);
861 } else if (!drop_thres)
865 recycle_rx_buf(adap, fl, fl->cidx);
870 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
873 prefetch(sd->pg_chunk.p_cnt);
876 newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
878 if (unlikely(!newskb)) {
884 dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len,
886 (*sd->pg_chunk.p_cnt)--;
887 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
888 dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
889 fl->alloc_size, DMA_FROM_DEVICE);
891 __skb_put(newskb, SGE_RX_PULL_LEN);
892 memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
893 skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
894 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
895 len - SGE_RX_PULL_LEN);
897 newskb->data_len = len - SGE_RX_PULL_LEN;
898 newskb->truesize += newskb->data_len;
900 skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
902 sd->pg_chunk.offset, len);
904 newskb->data_len += len;
905 newskb->truesize += len;
910 * We do not refill FLs here, we let the caller do it to overlap a
917 * get_imm_packet - return the next ingress packet buffer from a response
918 * @resp: the response descriptor containing the packet data
920 * Return a packet containing the immediate data of the given response.
922 static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
924 struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
927 __skb_put(skb, IMMED_PKT_SIZE);
928 skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
934 * calc_tx_descs - calculate the number of Tx descriptors for a packet
937 * Returns the number of Tx descriptors needed for the given Ethernet
938 * packet. Ethernet packets require addition of WR and CPL headers.
940 static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
944 if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
947 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
948 if (skb_shinfo(skb)->gso_size)
950 return flits_to_desc(flits);
953 /* map_skb - map a packet main body and its page fragments
954 * @pdev: the PCI device
956 * @addr: placeholder to save the mapped addresses
958 * map the main body of an sk_buff and its page fragments, if any.
960 static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
963 const skb_frag_t *fp, *end;
964 const struct skb_shared_info *si;
966 if (skb_headlen(skb)) {
967 *addr = dma_map_single(&pdev->dev, skb->data,
968 skb_headlen(skb), DMA_TO_DEVICE);
969 if (dma_mapping_error(&pdev->dev, *addr))
974 si = skb_shinfo(skb);
975 end = &si->frags[si->nr_frags];
977 for (fp = si->frags; fp < end; fp++) {
978 *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
980 if (dma_mapping_error(&pdev->dev, *addr))
987 while (fp-- > si->frags)
988 dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
991 dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb),
998 * write_sgl - populate a scatter/gather list for a packet
1000 * @sgp: the SGL to populate
1001 * @start: start address of skb main body data to include in the SGL
1002 * @len: length of skb main body data to include in the SGL
1003 * @addr: the list of the mapped addresses
1005 * Copies the scatter/gather list for the buffers that make up a packet
1006 * and returns the SGL size in 8-byte words. The caller must size the SGL
1009 static inline unsigned int write_sgl(const struct sk_buff *skb,
1010 struct sg_ent *sgp, unsigned char *start,
1011 unsigned int len, const dma_addr_t *addr)
1013 unsigned int i, j = 0, k = 0, nfrags;
1016 sgp->len[0] = cpu_to_be32(len);
1017 sgp->addr[j++] = cpu_to_be64(addr[k++]);
1020 nfrags = skb_shinfo(skb)->nr_frags;
1021 for (i = 0; i < nfrags; i++) {
1022 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1024 sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1025 sgp->addr[j] = cpu_to_be64(addr[k++]);
1032 return ((nfrags + (len != 0)) * 3) / 2 + j;
1036 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1037 * @adap: the adapter
1040 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1041 * where the HW is going to sleep just after we checked, however,
1042 * then the interrupt handler will detect the outstanding TX packet
1043 * and ring the doorbell for us.
1045 * When GTS is disabled we unconditionally ring the doorbell.
1047 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1050 clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1051 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1052 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1053 t3_write_reg(adap, A_SG_KDOORBELL,
1054 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1057 wmb(); /* write descriptors before telling HW */
1058 t3_write_reg(adap, A_SG_KDOORBELL,
1059 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1063 static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1065 #if SGE_NUM_GENBITS == 2
1066 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1071 * write_wr_hdr_sgl - write a WR header and, optionally, SGL
1072 * @ndesc: number of Tx descriptors spanned by the SGL
1073 * @skb: the packet corresponding to the WR
1074 * @d: first Tx descriptor to be written
1075 * @pidx: index of above descriptors
1076 * @q: the SGE Tx queue
1078 * @flits: number of flits to the start of the SGL in the first descriptor
1079 * @sgl_flits: the SGL size in flits
1080 * @gen: the Tx descriptor generation
1081 * @wr_hi: top 32 bits of WR header based on WR type (big endian)
1082 * @wr_lo: low 32 bits of WR header based on WR type (big endian)
1084 * Write a work request header and an associated SGL. If the SGL is
1085 * small enough to fit into one Tx descriptor it has already been written
1086 * and we just need to write the WR header. Otherwise we distribute the
1087 * SGL across the number of descriptors it spans.
1089 static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1090 struct tx_desc *d, unsigned int pidx,
1091 const struct sge_txq *q,
1092 const struct sg_ent *sgl,
1093 unsigned int flits, unsigned int sgl_flits,
1094 unsigned int gen, __be32 wr_hi,
1097 struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1098 struct tx_sw_desc *sd = &q->sdesc[pidx];
1101 if (need_skb_unmap()) {
1107 if (likely(ndesc == 1)) {
1109 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1110 V_WR_SGLSFLT(flits)) | wr_hi;
1112 wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1113 V_WR_GEN(gen)) | wr_lo;
1116 unsigned int ogen = gen;
1117 const u64 *fp = (const u64 *)sgl;
1118 struct work_request_hdr *wp = wrp;
1120 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1121 V_WR_SGLSFLT(flits)) | wr_hi;
1124 unsigned int avail = WR_FLITS - flits;
1126 if (avail > sgl_flits)
1128 memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1138 if (++pidx == q->size) {
1146 wrp = (struct work_request_hdr *)d;
1147 wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1148 V_WR_SGLSFLT(1)) | wr_hi;
1149 wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1151 V_WR_GEN(gen)) | wr_lo;
1156 wrp->wr_hi |= htonl(F_WR_EOP);
1158 wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1159 wr_gen2((struct tx_desc *)wp, ogen);
1160 WARN_ON(ndesc != 0);
1165 * write_tx_pkt_wr - write a TX_PKT work request
1166 * @adap: the adapter
1167 * @skb: the packet to send
1168 * @pi: the egress interface
1169 * @pidx: index of the first Tx descriptor to write
1170 * @gen: the generation value to use
1172 * @ndesc: number of descriptors the packet will occupy
1173 * @compl: the value of the COMPL bit to use
1176 * Generate a TX_PKT work request to send the supplied packet.
1178 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1179 const struct port_info *pi,
1180 unsigned int pidx, unsigned int gen,
1181 struct sge_txq *q, unsigned int ndesc,
1182 unsigned int compl, const dma_addr_t *addr)
1184 unsigned int flits, sgl_flits, cntrl, tso_info;
1185 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1186 struct tx_desc *d = &q->desc[pidx];
1187 struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1189 cpl->len = htonl(skb->len);
1190 cntrl = V_TXPKT_INTF(pi->port_id);
1192 if (skb_vlan_tag_present(skb))
1193 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1195 tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1198 struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1201 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1202 hdr->cntrl = htonl(cntrl);
1203 eth_type = skb_network_offset(skb) == ETH_HLEN ?
1204 CPL_ETH_II : CPL_ETH_II_VLAN;
1205 tso_info |= V_LSO_ETH_TYPE(eth_type) |
1206 V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1207 V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1208 hdr->lso_info = htonl(tso_info);
1211 cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1212 cntrl |= F_TXPKT_IPCSUM_DIS; /* SW calculates IP csum */
1213 cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1214 cpl->cntrl = htonl(cntrl);
1216 if (skb->len <= WR_LEN - sizeof(*cpl)) {
1217 q->sdesc[pidx].skb = NULL;
1219 skb_copy_from_linear_data(skb, &d->flit[2],
1222 skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1224 flits = (skb->len + 7) / 8 + 2;
1225 cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1226 V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1227 | F_WR_SOP | F_WR_EOP | compl);
1229 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1230 V_WR_TID(q->token));
1232 dev_consume_skb_any(skb);
1239 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1240 sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1242 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1243 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1244 htonl(V_WR_TID(q->token)));
1247 static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1248 struct sge_qset *qs, struct sge_txq *q)
1250 netif_tx_stop_queue(txq);
1251 set_bit(TXQ_ETH, &qs->txq_stopped);
1256 * t3_eth_xmit - add a packet to the Ethernet Tx queue
1258 * @dev: the egress net device
1260 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1262 netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1265 unsigned int ndesc, pidx, credits, gen, compl;
1266 const struct port_info *pi = netdev_priv(dev);
1267 struct adapter *adap = pi->adapter;
1268 struct netdev_queue *txq;
1269 struct sge_qset *qs;
1271 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1274 * The chip min packet length is 9 octets but play safe and reject
1275 * anything shorter than an Ethernet header.
1277 if (unlikely(skb->len < ETH_HLEN)) {
1278 dev_kfree_skb_any(skb);
1279 return NETDEV_TX_OK;
1282 qidx = skb_get_queue_mapping(skb);
1284 q = &qs->txq[TXQ_ETH];
1285 txq = netdev_get_tx_queue(dev, qidx);
1287 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1289 credits = q->size - q->in_use;
1290 ndesc = calc_tx_descs(skb);
1292 if (unlikely(credits < ndesc)) {
1293 t3_stop_tx_queue(txq, qs, q);
1294 dev_err(&adap->pdev->dev,
1295 "%s: Tx ring %u full while queue awake!\n",
1296 dev->name, q->cntxt_id & 7);
1297 return NETDEV_TX_BUSY;
1300 /* Check if ethernet packet can't be sent as immediate data */
1301 if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
1302 if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1304 return NETDEV_TX_OK;
1309 if (unlikely(credits - ndesc < q->stop_thres)) {
1310 t3_stop_tx_queue(txq, qs, q);
1312 if (should_restart_tx(q) &&
1313 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1315 netif_tx_start_queue(txq);
1320 q->unacked += ndesc;
1321 compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1325 if (q->pidx >= q->size) {
1330 /* update port statistics */
1331 if (skb->ip_summed == CHECKSUM_PARTIAL)
1332 qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1333 if (skb_shinfo(skb)->gso_size)
1334 qs->port_stats[SGE_PSTAT_TSO]++;
1335 if (skb_vlan_tag_present(skb))
1336 qs->port_stats[SGE_PSTAT_VLANINS]++;
1339 * We do not use Tx completion interrupts to free DMAd Tx packets.
1340 * This is good for performance but means that we rely on new Tx
1341 * packets arriving to run the destructors of completed packets,
1342 * which open up space in their sockets' send queues. Sometimes
1343 * we do not get such new packets causing Tx to stall. A single
1344 * UDP transmitter is a good example of this situation. We have
1345 * a clean up timer that periodically reclaims completed packets
1346 * but it doesn't run often enough (nor do we want it to) to prevent
1347 * lengthy stalls. A solution to this problem is to run the
1348 * destructor early, after the packet is queued but before it's DMAd.
1349 * A cons is that we lie to socket memory accounting, but the amount
1350 * of extra memory is reasonable (limited by the number of Tx
1351 * descriptors), the packets do actually get freed quickly by new
1352 * packets almost always, and for protocols like TCP that wait for
1353 * acks to really free up the data the extra memory is even less.
1354 * On the positive side we run the destructors on the sending CPU
1355 * rather than on a potentially different completing CPU, usually a
1356 * good thing. We also run them without holding our Tx queue lock,
1357 * unlike what reclaim_completed_tx() would otherwise do.
1359 * Run the destructor before telling the DMA engine about the packet
1360 * to make sure it doesn't complete and get freed prematurely.
1362 if (likely(!skb_shared(skb)))
1365 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1366 check_ring_tx_db(adap, q);
1367 return NETDEV_TX_OK;
1371 * write_imm - write a packet into a Tx descriptor as immediate data
1372 * @d: the Tx descriptor to write
1374 * @len: the length of packet data to write as immediate data
1375 * @gen: the generation bit value to write
1377 * Writes a packet as immediate data into a Tx descriptor. The packet
1378 * contains a work request at its beginning. We must write the packet
1379 * carefully so the SGE doesn't read it accidentally before it's written
1382 static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1383 unsigned int len, unsigned int gen)
1385 struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1386 struct work_request_hdr *to = (struct work_request_hdr *)d;
1388 if (likely(!skb->data_len))
1389 memcpy(&to[1], &from[1], len - sizeof(*from));
1391 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1393 to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1394 V_WR_BCNTLFLT(len & 7));
1396 to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1397 V_WR_LEN((len + 7) / 8));
1403 * check_desc_avail - check descriptor availability on a send queue
1404 * @adap: the adapter
1405 * @q: the send queue
1406 * @skb: the packet needing the descriptors
1407 * @ndesc: the number of Tx descriptors needed
1408 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1410 * Checks if the requested number of Tx descriptors is available on an
1411 * SGE send queue. If the queue is already suspended or not enough
1412 * descriptors are available the packet is queued for later transmission.
1413 * Must be called with the Tx queue locked.
1415 * Returns 0 if enough descriptors are available, 1 if there aren't
1416 * enough descriptors and the packet has been queued, and 2 if the caller
1417 * needs to retry because there weren't enough descriptors at the
1418 * beginning of the call but some freed up in the mean time.
1420 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1421 struct sk_buff *skb, unsigned int ndesc,
1424 if (unlikely(!skb_queue_empty(&q->sendq))) {
1425 addq_exit:__skb_queue_tail(&q->sendq, skb);
1428 if (unlikely(q->size - q->in_use < ndesc)) {
1429 struct sge_qset *qs = txq_to_qset(q, qid);
1431 set_bit(qid, &qs->txq_stopped);
1432 smp_mb__after_atomic();
1434 if (should_restart_tx(q) &&
1435 test_and_clear_bit(qid, &qs->txq_stopped))
1445 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1446 * @q: the SGE control Tx queue
1448 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1449 * that send only immediate data (presently just the control queues) and
1450 * thus do not have any sk_buffs to release.
1452 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1454 unsigned int reclaim = q->processed - q->cleaned;
1456 q->in_use -= reclaim;
1457 q->cleaned += reclaim;
1460 static inline int immediate(const struct sk_buff *skb)
1462 return skb->len <= WR_LEN;
1466 * ctrl_xmit - send a packet through an SGE control Tx queue
1467 * @adap: the adapter
1468 * @q: the control queue
1471 * Send a packet through an SGE control Tx queue. Packets sent through
1472 * a control queue must fit entirely as immediate data in a single Tx
1473 * descriptor and have no page fragments.
1475 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1476 struct sk_buff *skb)
1479 struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1481 if (unlikely(!immediate(skb))) {
1484 return NET_XMIT_SUCCESS;
1487 wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1488 wrp->wr_lo = htonl(V_WR_TID(q->token));
1490 spin_lock(&q->lock);
1491 again:reclaim_completed_tx_imm(q);
1493 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1494 if (unlikely(ret)) {
1496 spin_unlock(&q->lock);
1502 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1505 if (++q->pidx >= q->size) {
1509 spin_unlock(&q->lock);
1511 t3_write_reg(adap, A_SG_KDOORBELL,
1512 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1513 return NET_XMIT_SUCCESS;
1517 * restart_ctrlq - restart a suspended control queue
1518 * @w: pointer to the work associated with this handler
1520 * Resumes transmission on a suspended Tx control queue.
1522 static void restart_ctrlq(struct work_struct *w)
1524 struct sk_buff *skb;
1525 struct sge_qset *qs = container_of(w, struct sge_qset,
1526 txq[TXQ_CTRL].qresume_task);
1527 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1529 spin_lock(&q->lock);
1530 again:reclaim_completed_tx_imm(q);
1532 while (q->in_use < q->size &&
1533 (skb = __skb_dequeue(&q->sendq)) != NULL) {
1535 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1537 if (++q->pidx >= q->size) {
1544 if (!skb_queue_empty(&q->sendq)) {
1545 set_bit(TXQ_CTRL, &qs->txq_stopped);
1546 smp_mb__after_atomic();
1548 if (should_restart_tx(q) &&
1549 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1554 spin_unlock(&q->lock);
1556 t3_write_reg(qs->adap, A_SG_KDOORBELL,
1557 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1561 * Send a management message through control queue 0
1563 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1567 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1574 * deferred_unmap_destructor - unmap a packet when it is freed
1577 * This is the packet destructor used for Tx packets that need to remain
1578 * mapped until they are freed rather than until their Tx descriptors are
1581 static void deferred_unmap_destructor(struct sk_buff *skb)
1584 const dma_addr_t *p;
1585 const struct skb_shared_info *si;
1586 const struct deferred_unmap_info *dui;
1588 dui = (struct deferred_unmap_info *)skb->head;
1591 if (skb_tail_pointer(skb) - skb_transport_header(skb))
1592 dma_unmap_single(&dui->pdev->dev, *p++,
1593 skb_tail_pointer(skb) - skb_transport_header(skb),
1596 si = skb_shinfo(skb);
1597 for (i = 0; i < si->nr_frags; i++)
1598 dma_unmap_page(&dui->pdev->dev, *p++,
1599 skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
1602 static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1603 const struct sg_ent *sgl, int sgl_flits)
1606 struct deferred_unmap_info *dui;
1608 dui = (struct deferred_unmap_info *)skb->head;
1610 for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1611 *p++ = be64_to_cpu(sgl->addr[0]);
1612 *p++ = be64_to_cpu(sgl->addr[1]);
1615 *p = be64_to_cpu(sgl->addr[0]);
1619 * write_ofld_wr - write an offload work request
1620 * @adap: the adapter
1621 * @skb: the packet to send
1623 * @pidx: index of the first Tx descriptor to write
1624 * @gen: the generation value to use
1625 * @ndesc: number of descriptors the packet will occupy
1626 * @addr: the address
1628 * Write an offload work request to send the supplied packet. The packet
1629 * data already carry the work request with most fields populated.
1631 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1632 struct sge_txq *q, unsigned int pidx,
1633 unsigned int gen, unsigned int ndesc,
1634 const dma_addr_t *addr)
1636 unsigned int sgl_flits, flits;
1637 struct work_request_hdr *from;
1638 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1639 struct tx_desc *d = &q->desc[pidx];
1641 if (immediate(skb)) {
1642 q->sdesc[pidx].skb = NULL;
1643 write_imm(d, skb, skb->len, gen);
1647 /* Only TX_DATA builds SGLs */
1649 from = (struct work_request_hdr *)skb->data;
1650 memcpy(&d->flit[1], &from[1],
1651 skb_transport_offset(skb) - sizeof(*from));
1653 flits = skb_transport_offset(skb) / 8;
1654 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1655 sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1656 skb_tail_pointer(skb) - skb_transport_header(skb),
1658 if (need_skb_unmap()) {
1659 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1660 skb->destructor = deferred_unmap_destructor;
1663 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1664 gen, from->wr_hi, from->wr_lo);
1668 * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1671 * Returns the number of Tx descriptors needed for the given offload
1672 * packet. These packets are already fully constructed.
1674 static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1676 unsigned int flits, cnt;
1678 if (skb->len <= WR_LEN)
1679 return 1; /* packet fits as immediate data */
1681 flits = skb_transport_offset(skb) / 8; /* headers */
1682 cnt = skb_shinfo(skb)->nr_frags;
1683 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1685 return flits_to_desc(flits + sgl_len(cnt));
1689 * ofld_xmit - send a packet through an offload queue
1690 * @adap: the adapter
1691 * @q: the Tx offload queue
1694 * Send an offload packet through an SGE offload queue.
1696 static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1697 struct sk_buff *skb)
1700 unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1702 spin_lock(&q->lock);
1703 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1705 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1706 if (unlikely(ret)) {
1708 skb->priority = ndesc; /* save for restart */
1709 spin_unlock(&q->lock);
1715 if (!immediate(skb) &&
1716 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1717 spin_unlock(&q->lock);
1718 return NET_XMIT_SUCCESS;
1725 if (q->pidx >= q->size) {
1729 spin_unlock(&q->lock);
1731 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1732 check_ring_tx_db(adap, q);
1733 return NET_XMIT_SUCCESS;
1737 * restart_offloadq - restart a suspended offload queue
1738 * @w: pointer to the work associated with this handler
1740 * Resumes transmission on a suspended Tx offload queue.
1742 static void restart_offloadq(struct work_struct *w)
1744 struct sk_buff *skb;
1745 struct sge_qset *qs = container_of(w, struct sge_qset,
1746 txq[TXQ_OFLD].qresume_task);
1747 struct sge_txq *q = &qs->txq[TXQ_OFLD];
1748 const struct port_info *pi = netdev_priv(qs->netdev);
1749 struct adapter *adap = pi->adapter;
1750 unsigned int written = 0;
1752 spin_lock(&q->lock);
1753 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1755 while ((skb = skb_peek(&q->sendq)) != NULL) {
1756 unsigned int gen, pidx;
1757 unsigned int ndesc = skb->priority;
1759 if (unlikely(q->size - q->in_use < ndesc)) {
1760 set_bit(TXQ_OFLD, &qs->txq_stopped);
1761 smp_mb__after_atomic();
1763 if (should_restart_tx(q) &&
1764 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1770 if (!immediate(skb) &&
1771 map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1779 if (q->pidx >= q->size) {
1783 __skb_unlink(skb, &q->sendq);
1784 spin_unlock(&q->lock);
1786 write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1787 (dma_addr_t *)skb->head);
1788 spin_lock(&q->lock);
1790 spin_unlock(&q->lock);
1793 set_bit(TXQ_RUNNING, &q->flags);
1794 set_bit(TXQ_LAST_PKT_DB, &q->flags);
1797 if (likely(written))
1798 t3_write_reg(adap, A_SG_KDOORBELL,
1799 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1803 * queue_set - return the queue set a packet should use
1806 * Maps a packet to the SGE queue set it should use. The desired queue
1807 * set is carried in bits 1-3 in the packet's priority.
1809 static inline int queue_set(const struct sk_buff *skb)
1811 return skb->priority >> 1;
1815 * is_ctrl_pkt - return whether an offload packet is a control packet
1818 * Determines whether an offload packet should use an OFLD or a CTRL
1819 * Tx queue. This is indicated by bit 0 in the packet's priority.
1821 static inline int is_ctrl_pkt(const struct sk_buff *skb)
1823 return skb->priority & 1;
1827 * t3_offload_tx - send an offload packet
1828 * @tdev: the offload device to send to
1831 * Sends an offload packet. We use the packet priority to select the
1832 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1833 * should be sent as regular or control, bits 1-3 select the queue set.
1835 int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1837 struct adapter *adap = tdev2adap(tdev);
1838 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1840 if (unlikely(is_ctrl_pkt(skb)))
1841 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1843 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1847 * offload_enqueue - add an offload packet to an SGE offload receive queue
1848 * @q: the SGE response queue
1851 * Add a new offload packet to an SGE response queue's offload packet
1852 * queue. If the packet is the first on the queue it schedules the RX
1853 * softirq to process the queue.
1855 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1857 int was_empty = skb_queue_empty(&q->rx_queue);
1859 __skb_queue_tail(&q->rx_queue, skb);
1862 struct sge_qset *qs = rspq_to_qset(q);
1864 napi_schedule(&qs->napi);
1869 * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1870 * @tdev: the offload device that will be receiving the packets
1871 * @q: the SGE response queue that assembled the bundle
1872 * @skbs: the partial bundle
1873 * @n: the number of packets in the bundle
1875 * Delivers a (partial) bundle of Rx offload packets to an offload device.
1877 static inline void deliver_partial_bundle(struct t3cdev *tdev,
1879 struct sk_buff *skbs[], int n)
1882 q->offload_bundles++;
1883 tdev->recv(tdev, skbs, n);
1888 * ofld_poll - NAPI handler for offload packets in interrupt mode
1889 * @napi: the network device doing the polling
1890 * @budget: polling budget
1892 * The NAPI handler for offload packets when a response queue is serviced
1893 * by the hard interrupt handler, i.e., when it's operating in non-polling
1894 * mode. Creates small packet batches and sends them through the offload
1895 * receive handler. Batches need to be of modest size as we do prefetches
1896 * on the packets in each.
1898 static int ofld_poll(struct napi_struct *napi, int budget)
1900 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1901 struct sge_rspq *q = &qs->rspq;
1902 struct adapter *adapter = qs->adap;
1905 while (work_done < budget) {
1906 struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1907 struct sk_buff_head queue;
1910 spin_lock_irq(&q->lock);
1911 __skb_queue_head_init(&queue);
1912 skb_queue_splice_init(&q->rx_queue, &queue);
1913 if (skb_queue_empty(&queue)) {
1914 napi_complete_done(napi, work_done);
1915 spin_unlock_irq(&q->lock);
1918 spin_unlock_irq(&q->lock);
1921 skb_queue_walk_safe(&queue, skb, tmp) {
1922 if (work_done >= budget)
1926 __skb_unlink(skb, &queue);
1927 prefetch(skb->data);
1928 skbs[ngathered] = skb;
1929 if (++ngathered == RX_BUNDLE_SIZE) {
1930 q->offload_bundles++;
1931 adapter->tdev.recv(&adapter->tdev, skbs,
1936 if (!skb_queue_empty(&queue)) {
1937 /* splice remaining packets back onto Rx queue */
1938 spin_lock_irq(&q->lock);
1939 skb_queue_splice(&queue, &q->rx_queue);
1940 spin_unlock_irq(&q->lock);
1942 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1949 * rx_offload - process a received offload packet
1950 * @tdev: the offload device receiving the packet
1951 * @rq: the response queue that received the packet
1953 * @rx_gather: a gather list of packets if we are building a bundle
1954 * @gather_idx: index of the next available slot in the bundle
1956 * Process an ingress offload pakcet and add it to the offload ingress
1957 * queue. Returns the index of the next available slot in the bundle.
1959 static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1960 struct sk_buff *skb, struct sk_buff *rx_gather[],
1961 unsigned int gather_idx)
1963 skb_reset_mac_header(skb);
1964 skb_reset_network_header(skb);
1965 skb_reset_transport_header(skb);
1968 rx_gather[gather_idx++] = skb;
1969 if (gather_idx == RX_BUNDLE_SIZE) {
1970 tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1972 rq->offload_bundles++;
1975 offload_enqueue(rq, skb);
1981 * restart_tx - check whether to restart suspended Tx queues
1982 * @qs: the queue set to resume
1984 * Restarts suspended Tx queues of an SGE queue set if they have enough
1985 * free resources to resume operation.
1987 static void restart_tx(struct sge_qset *qs)
1989 if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1990 should_restart_tx(&qs->txq[TXQ_ETH]) &&
1991 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1992 qs->txq[TXQ_ETH].restarts++;
1993 if (netif_running(qs->netdev))
1994 netif_tx_wake_queue(qs->tx_q);
1997 if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1998 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1999 test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2000 qs->txq[TXQ_OFLD].restarts++;
2002 /* The work can be quite lengthy so we use driver's own queue */
2003 queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
2005 if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2006 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2007 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2008 qs->txq[TXQ_CTRL].restarts++;
2010 /* The work can be quite lengthy so we use driver's own queue */
2011 queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
2016 * cxgb3_arp_process - process an ARP request probing a private IP address
2017 * @pi: the port info
2018 * @skb: the skbuff containing the ARP request
2020 * Check if the ARP request is probing the private IP address
2021 * dedicated to iSCSI, generate an ARP reply if so.
2023 static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2025 struct net_device *dev = skb->dev;
2027 unsigned char *arp_ptr;
2034 skb_reset_network_header(skb);
2037 if (arp->ar_op != htons(ARPOP_REQUEST))
2040 arp_ptr = (unsigned char *)(arp + 1);
2042 arp_ptr += dev->addr_len;
2043 memcpy(&sip, arp_ptr, sizeof(sip));
2044 arp_ptr += sizeof(sip);
2045 arp_ptr += dev->addr_len;
2046 memcpy(&tip, arp_ptr, sizeof(tip));
2048 if (tip != pi->iscsi_ipv4addr)
2051 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2052 pi->iscsic.mac_addr, sha);
2056 static inline int is_arp(struct sk_buff *skb)
2058 return skb->protocol == htons(ETH_P_ARP);
2061 static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2062 struct sk_buff *skb)
2065 cxgb3_arp_process(pi, skb);
2069 if (pi->iscsic.recv)
2070 pi->iscsic.recv(pi, skb);
2075 * rx_eth - process an ingress ethernet packet
2076 * @adap: the adapter
2077 * @rq: the response queue that received the packet
2080 * @lro: large receive offload
2082 * Process an ingress ethernet pakcet and deliver it to the stack.
2083 * The padding is 2 if the packet was delivered in an Rx buffer and 0
2084 * if it was immediate data in a response.
2086 static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2087 struct sk_buff *skb, int pad, int lro)
2089 struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2090 struct sge_qset *qs = rspq_to_qset(rq);
2091 struct port_info *pi;
2093 skb_pull(skb, sizeof(*p) + pad);
2094 skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2095 pi = netdev_priv(skb->dev);
2096 if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2097 p->csum == htons(0xffff) && !p->fragment) {
2098 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2099 skb->ip_summed = CHECKSUM_UNNECESSARY;
2101 skb_checksum_none_assert(skb);
2102 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2104 if (p->vlan_valid) {
2105 qs->port_stats[SGE_PSTAT_VLANEX]++;
2106 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2110 napi_gro_receive(&qs->napi, skb);
2112 if (unlikely(pi->iscsic.flags))
2113 cxgb3_process_iscsi_prov_pack(pi, skb);
2114 netif_receive_skb(skb);
2120 static inline int is_eth_tcp(u32 rss)
2122 return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2126 * lro_add_page - add a page chunk to an LRO session
2127 * @adap: the adapter
2128 * @qs: the associated queue set
2129 * @fl: the free list containing the page chunk to add
2130 * @len: packet length
2131 * @complete: Indicates the last fragment of a frame
2133 * Add a received packet contained in a page chunk to an existing LRO
2136 static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2137 struct sge_fl *fl, int len, int complete)
2139 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2140 struct port_info *pi = netdev_priv(qs->netdev);
2141 struct sk_buff *skb = NULL;
2142 struct cpl_rx_pkt *cpl;
2143 skb_frag_t *rx_frag;
2148 skb = napi_get_frags(&qs->napi);
2154 dma_sync_single_for_cpu(&adap->pdev->dev,
2155 dma_unmap_addr(sd, dma_addr),
2156 fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE);
2158 (*sd->pg_chunk.p_cnt)--;
2159 if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2160 dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping,
2161 fl->alloc_size, DMA_FROM_DEVICE);
2164 put_page(sd->pg_chunk.page);
2170 rx_frag = skb_shinfo(skb)->frags;
2171 nr_frags = skb_shinfo(skb)->nr_frags;
2174 offset = 2 + sizeof(struct cpl_rx_pkt);
2175 cpl = qs->lro_va = sd->pg_chunk.va + 2;
2177 if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2178 cpl->csum_valid && cpl->csum == htons(0xffff)) {
2179 skb->ip_summed = CHECKSUM_UNNECESSARY;
2180 qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2182 skb->ip_summed = CHECKSUM_NONE;
2188 rx_frag += nr_frags;
2189 __skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2190 skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
2191 skb_frag_size_set(rx_frag, len);
2194 skb->data_len += len;
2195 skb->truesize += len;
2196 skb_shinfo(skb)->nr_frags++;
2201 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2203 if (cpl->vlan_valid) {
2204 qs->port_stats[SGE_PSTAT_VLANEX]++;
2205 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
2207 napi_gro_frags(&qs->napi);
2211 * handle_rsp_cntrl_info - handles control information in a response
2212 * @qs: the queue set corresponding to the response
2213 * @flags: the response control flags
2215 * Handles the control information of an SGE response, such as GTS
2216 * indications and completion credits for the queue set's Tx queues.
2217 * HW coalesces credits, we don't do any extra SW coalescing.
2219 static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2221 unsigned int credits;
2224 if (flags & F_RSPD_TXQ0_GTS)
2225 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2228 credits = G_RSPD_TXQ0_CR(flags);
2230 qs->txq[TXQ_ETH].processed += credits;
2232 credits = G_RSPD_TXQ2_CR(flags);
2234 qs->txq[TXQ_CTRL].processed += credits;
2237 if (flags & F_RSPD_TXQ1_GTS)
2238 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2240 credits = G_RSPD_TXQ1_CR(flags);
2242 qs->txq[TXQ_OFLD].processed += credits;
2246 * check_ring_db - check if we need to ring any doorbells
2247 * @adap: the adapter
2248 * @qs: the queue set whose Tx queues are to be examined
2249 * @sleeping: indicates which Tx queue sent GTS
2251 * Checks if some of a queue set's Tx queues need to ring their doorbells
2252 * to resume transmission after idling while they still have unprocessed
2255 static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2256 unsigned int sleeping)
2258 if (sleeping & F_RSPD_TXQ0_GTS) {
2259 struct sge_txq *txq = &qs->txq[TXQ_ETH];
2261 if (txq->cleaned + txq->in_use != txq->processed &&
2262 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2263 set_bit(TXQ_RUNNING, &txq->flags);
2264 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2265 V_EGRCNTX(txq->cntxt_id));
2269 if (sleeping & F_RSPD_TXQ1_GTS) {
2270 struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2272 if (txq->cleaned + txq->in_use != txq->processed &&
2273 !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2274 set_bit(TXQ_RUNNING, &txq->flags);
2275 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2276 V_EGRCNTX(txq->cntxt_id));
2282 * is_new_response - check if a response is newly written
2283 * @r: the response descriptor
2284 * @q: the response queue
2286 * Returns true if a response descriptor contains a yet unprocessed
2289 static inline int is_new_response(const struct rsp_desc *r,
2290 const struct sge_rspq *q)
2292 return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2295 static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2298 q->rx_recycle_buf = 0;
2301 #define RSPD_GTS_MASK (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2302 #define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2303 V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2304 V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2305 V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2307 /* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2308 #define NOMEM_INTR_DELAY 2500
2311 * process_responses - process responses from an SGE response queue
2312 * @adap: the adapter
2313 * @qs: the queue set to which the response queue belongs
2314 * @budget: how many responses can be processed in this round
2316 * Process responses from an SGE response queue up to the supplied budget.
2317 * Responses include received packets as well as credits and other events
2318 * for the queues that belong to the response queue's queue set.
2319 * A negative budget is effectively unlimited.
2321 * Additionally choose the interrupt holdoff time for the next interrupt
2322 * on this queue. If the system is under memory shortage use a fairly
2323 * long delay to help recovery.
2325 static int process_responses(struct adapter *adap, struct sge_qset *qs,
2328 struct sge_rspq *q = &qs->rspq;
2329 struct rsp_desc *r = &q->desc[q->cidx];
2330 int budget_left = budget;
2331 unsigned int sleeping = 0;
2332 struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2335 q->next_holdoff = q->holdoff_tmr;
2337 while (likely(budget_left && is_new_response(r, q))) {
2338 int packet_complete, eth, ethpad = 2;
2339 int lro = !!(qs->netdev->features & NETIF_F_GRO);
2340 struct sk_buff *skb = NULL;
2342 __be32 rss_hi, rss_lo;
2345 eth = r->rss_hdr.opcode == CPL_RX_PKT;
2346 rss_hi = *(const __be32 *)r;
2347 rss_lo = r->rss_hdr.rss_hash_val;
2348 flags = ntohl(r->flags);
2350 if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2351 skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2355 __skb_put_data(skb, r, AN_PKT_SIZE);
2356 skb->data[0] = CPL_ASYNC_NOTIF;
2357 rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2359 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2360 skb = get_imm_packet(r);
2361 if (unlikely(!skb)) {
2363 q->next_holdoff = NOMEM_INTR_DELAY;
2365 /* consume one credit since we tried */
2371 } else if ((len = ntohl(r->len_cq)) != 0) {
2374 lro &= eth && is_eth_tcp(rss_hi);
2376 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2377 if (fl->use_pages) {
2378 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2381 __refill_fl(adap, fl);
2383 lro_add_page(adap, qs, fl,
2385 flags & F_RSPD_EOP);
2389 skb = get_packet_pg(adap, fl, q,
2392 SGE_RX_DROP_THRES : 0);
2395 skb = get_packet(adap, fl, G_RSPD_LEN(len),
2396 eth ? SGE_RX_DROP_THRES : 0);
2397 if (unlikely(!skb)) {
2401 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2404 if (++fl->cidx == fl->size)
2409 if (flags & RSPD_CTRL_MASK) {
2410 sleeping |= flags & RSPD_GTS_MASK;
2411 handle_rsp_cntrl_info(qs, flags);
2415 if (unlikely(++q->cidx == q->size)) {
2422 if (++q->credits >= (q->size / 4)) {
2423 refill_rspq(adap, q, q->credits);
2427 packet_complete = flags &
2428 (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2429 F_RSPD_ASYNC_NOTIF);
2431 if (skb != NULL && packet_complete) {
2433 rx_eth(adap, q, skb, ethpad, lro);
2436 /* Preserve the RSS info in csum & priority */
2438 skb->priority = rss_lo;
2439 ngathered = rx_offload(&adap->tdev, q, skb,
2444 if (flags & F_RSPD_EOP)
2445 clear_rspq_bufstate(q);
2450 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2453 check_ring_db(adap, qs, sleeping);
2455 smp_mb(); /* commit Tx queue .processed updates */
2456 if (unlikely(qs->txq_stopped != 0))
2459 budget -= budget_left;
2463 static inline int is_pure_response(const struct rsp_desc *r)
2465 __be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2467 return (n | r->len_cq) == 0;
2471 * napi_rx_handler - the NAPI handler for Rx processing
2472 * @napi: the napi instance
2473 * @budget: how many packets we can process in this round
2475 * Handler for new data events when using NAPI.
2477 static int napi_rx_handler(struct napi_struct *napi, int budget)
2479 struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2480 struct adapter *adap = qs->adap;
2481 int work_done = process_responses(adap, qs, budget);
2483 if (likely(work_done < budget)) {
2484 napi_complete_done(napi, work_done);
2487 * Because we don't atomically flush the following
2488 * write it is possible that in very rare cases it can
2489 * reach the device in a way that races with a new
2490 * response being written plus an error interrupt
2491 * causing the NAPI interrupt handler below to return
2492 * unhandled status to the OS. To protect against
2493 * this would require flushing the write and doing
2494 * both the write and the flush with interrupts off.
2495 * Way too expensive and unjustifiable given the
2496 * rarity of the race.
2498 * The race cannot happen at all with MSI-X.
2500 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2501 V_NEWTIMER(qs->rspq.next_holdoff) |
2502 V_NEWINDEX(qs->rspq.cidx));
2508 * Returns true if the device is already scheduled for polling.
2510 static inline int napi_is_scheduled(struct napi_struct *napi)
2512 return test_bit(NAPI_STATE_SCHED, &napi->state);
2516 * process_pure_responses - process pure responses from a response queue
2517 * @adap: the adapter
2518 * @qs: the queue set owning the response queue
2519 * @r: the first pure response to process
2521 * A simpler version of process_responses() that handles only pure (i.e.,
2522 * non data-carrying) responses. Such respones are too light-weight to
2523 * justify calling a softirq under NAPI, so we handle them specially in
2524 * the interrupt handler. The function is called with a pointer to a
2525 * response, which the caller must ensure is a valid pure response.
2527 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2529 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2532 struct sge_rspq *q = &qs->rspq;
2533 unsigned int sleeping = 0;
2536 u32 flags = ntohl(r->flags);
2539 if (unlikely(++q->cidx == q->size)) {
2546 if (flags & RSPD_CTRL_MASK) {
2547 sleeping |= flags & RSPD_GTS_MASK;
2548 handle_rsp_cntrl_info(qs, flags);
2552 if (++q->credits >= (q->size / 4)) {
2553 refill_rspq(adap, q, q->credits);
2556 if (!is_new_response(r, q))
2559 } while (is_pure_response(r));
2562 check_ring_db(adap, qs, sleeping);
2564 smp_mb(); /* commit Tx queue .processed updates */
2565 if (unlikely(qs->txq_stopped != 0))
2568 return is_new_response(r, q);
2572 * handle_responses - decide what to do with new responses in NAPI mode
2573 * @adap: the adapter
2574 * @q: the response queue
2576 * This is used by the NAPI interrupt handlers to decide what to do with
2577 * new SGE responses. If there are no new responses it returns -1. If
2578 * there are new responses and they are pure (i.e., non-data carrying)
2579 * it handles them straight in hard interrupt context as they are very
2580 * cheap and don't deliver any packets. Finally, if there are any data
2581 * signaling responses it schedules the NAPI handler. Returns 1 if it
2582 * schedules NAPI, 0 if all new responses were pure.
2584 * The caller must ascertain NAPI is not already running.
2586 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2588 struct sge_qset *qs = rspq_to_qset(q);
2589 struct rsp_desc *r = &q->desc[q->cidx];
2591 if (!is_new_response(r, q))
2594 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2595 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2596 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2599 napi_schedule(&qs->napi);
2604 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2605 * (i.e., response queue serviced in hard interrupt).
2607 static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2609 struct sge_qset *qs = cookie;
2610 struct adapter *adap = qs->adap;
2611 struct sge_rspq *q = &qs->rspq;
2613 spin_lock(&q->lock);
2614 if (process_responses(adap, qs, -1) == 0)
2615 q->unhandled_irqs++;
2616 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2617 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2618 spin_unlock(&q->lock);
2623 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2624 * (i.e., response queue serviced by NAPI polling).
2626 static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2628 struct sge_qset *qs = cookie;
2629 struct sge_rspq *q = &qs->rspq;
2631 spin_lock(&q->lock);
2633 if (handle_responses(qs->adap, q) < 0)
2634 q->unhandled_irqs++;
2635 spin_unlock(&q->lock);
2640 * The non-NAPI MSI interrupt handler. This needs to handle data events from
2641 * SGE response queues as well as error and other async events as they all use
2642 * the same MSI vector. We use one SGE response queue per port in this mode
2643 * and protect all response queues with queue 0's lock.
2645 static irqreturn_t t3_intr_msi(int irq, void *cookie)
2647 int new_packets = 0;
2648 struct adapter *adap = cookie;
2649 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2651 spin_lock(&q->lock);
2653 if (process_responses(adap, &adap->sge.qs[0], -1)) {
2654 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2655 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2659 if (adap->params.nports == 2 &&
2660 process_responses(adap, &adap->sge.qs[1], -1)) {
2661 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2663 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2664 V_NEWTIMER(q1->next_holdoff) |
2665 V_NEWINDEX(q1->cidx));
2669 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2670 q->unhandled_irqs++;
2672 spin_unlock(&q->lock);
2676 static int rspq_check_napi(struct sge_qset *qs)
2678 struct sge_rspq *q = &qs->rspq;
2680 if (!napi_is_scheduled(&qs->napi) &&
2681 is_new_response(&q->desc[q->cidx], q)) {
2682 napi_schedule(&qs->napi);
2689 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2690 * by NAPI polling). Handles data events from SGE response queues as well as
2691 * error and other async events as they all use the same MSI vector. We use
2692 * one SGE response queue per port in this mode and protect all response
2693 * queues with queue 0's lock.
2695 static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2698 struct adapter *adap = cookie;
2699 struct sge_rspq *q = &adap->sge.qs[0].rspq;
2701 spin_lock(&q->lock);
2703 new_packets = rspq_check_napi(&adap->sge.qs[0]);
2704 if (adap->params.nports == 2)
2705 new_packets += rspq_check_napi(&adap->sge.qs[1]);
2706 if (!new_packets && t3_slow_intr_handler(adap) == 0)
2707 q->unhandled_irqs++;
2709 spin_unlock(&q->lock);
2714 * A helper function that processes responses and issues GTS.
2716 static inline int process_responses_gts(struct adapter *adap,
2717 struct sge_rspq *rq)
2721 work = process_responses(adap, rspq_to_qset(rq), -1);
2722 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2723 V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2728 * The legacy INTx interrupt handler. This needs to handle data events from
2729 * SGE response queues as well as error and other async events as they all use
2730 * the same interrupt pin. We use one SGE response queue per port in this mode
2731 * and protect all response queues with queue 0's lock.
2733 static irqreturn_t t3_intr(int irq, void *cookie)
2735 int work_done, w0, w1;
2736 struct adapter *adap = cookie;
2737 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2738 struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2740 spin_lock(&q0->lock);
2742 w0 = is_new_response(&q0->desc[q0->cidx], q0);
2743 w1 = adap->params.nports == 2 &&
2744 is_new_response(&q1->desc[q1->cidx], q1);
2746 if (likely(w0 | w1)) {
2747 t3_write_reg(adap, A_PL_CLI, 0);
2748 t3_read_reg(adap, A_PL_CLI); /* flush */
2751 process_responses_gts(adap, q0);
2754 process_responses_gts(adap, q1);
2756 work_done = w0 | w1;
2758 work_done = t3_slow_intr_handler(adap);
2760 spin_unlock(&q0->lock);
2761 return IRQ_RETVAL(work_done != 0);
2765 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2766 * Handles data events from SGE response queues as well as error and other
2767 * async events as they all use the same interrupt pin. We use one SGE
2768 * response queue per port in this mode and protect all response queues with
2771 static irqreturn_t t3b_intr(int irq, void *cookie)
2774 struct adapter *adap = cookie;
2775 struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2777 t3_write_reg(adap, A_PL_CLI, 0);
2778 map = t3_read_reg(adap, A_SG_DATA_INTR);
2780 if (unlikely(!map)) /* shared interrupt, most likely */
2783 spin_lock(&q0->lock);
2785 if (unlikely(map & F_ERRINTR))
2786 t3_slow_intr_handler(adap);
2788 if (likely(map & 1))
2789 process_responses_gts(adap, q0);
2792 process_responses_gts(adap, &adap->sge.qs[1].rspq);
2794 spin_unlock(&q0->lock);
2799 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2800 * Handles data events from SGE response queues as well as error and other
2801 * async events as they all use the same interrupt pin. We use one SGE
2802 * response queue per port in this mode and protect all response queues with
2805 static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2808 struct adapter *adap = cookie;
2809 struct sge_qset *qs0 = &adap->sge.qs[0];
2810 struct sge_rspq *q0 = &qs0->rspq;
2812 t3_write_reg(adap, A_PL_CLI, 0);
2813 map = t3_read_reg(adap, A_SG_DATA_INTR);
2815 if (unlikely(!map)) /* shared interrupt, most likely */
2818 spin_lock(&q0->lock);
2820 if (unlikely(map & F_ERRINTR))
2821 t3_slow_intr_handler(adap);
2823 if (likely(map & 1))
2824 napi_schedule(&qs0->napi);
2827 napi_schedule(&adap->sge.qs[1].napi);
2829 spin_unlock(&q0->lock);
2834 * t3_intr_handler - select the top-level interrupt handler
2835 * @adap: the adapter
2836 * @polling: whether using NAPI to service response queues
2838 * Selects the top-level interrupt handler based on the type of interrupts
2839 * (MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2842 irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2844 if (adap->flags & USING_MSIX)
2845 return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2846 if (adap->flags & USING_MSI)
2847 return polling ? t3_intr_msi_napi : t3_intr_msi;
2848 if (adap->params.rev > 0)
2849 return polling ? t3b_intr_napi : t3b_intr;
2853 #define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2854 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2855 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2856 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2858 #define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2859 #define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2863 * t3_sge_err_intr_handler - SGE async event interrupt handler
2864 * @adapter: the adapter
2866 * Interrupt handler for SGE asynchronous (non-data) events.
2868 void t3_sge_err_intr_handler(struct adapter *adapter)
2870 unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2873 if (status & SGE_PARERR)
2874 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2875 status & SGE_PARERR);
2876 if (status & SGE_FRAMINGERR)
2877 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2878 status & SGE_FRAMINGERR);
2880 if (status & F_RSPQCREDITOVERFOW)
2881 CH_ALERT(adapter, "SGE response queue credit overflow\n");
2883 if (status & F_RSPQDISABLED) {
2884 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2887 "packet delivered to disabled response queue "
2888 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2891 if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2892 queue_work(cxgb3_wq, &adapter->db_drop_task);
2894 if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2895 queue_work(cxgb3_wq, &adapter->db_full_task);
2897 if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2898 queue_work(cxgb3_wq, &adapter->db_empty_task);
2900 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2901 if (status & SGE_FATALERR)
2902 t3_fatal_err(adapter);
2906 * sge_timer_tx - perform periodic maintenance of an SGE qset
2907 * @t: a timer list containing the SGE queue set to maintain
2909 * Runs periodically from a timer to perform maintenance of an SGE queue
2910 * set. It performs two tasks:
2912 * Cleans up any completed Tx descriptors that may still be pending.
2913 * Normal descriptor cleanup happens when new packets are added to a Tx
2914 * queue so this timer is relatively infrequent and does any cleanup only
2915 * if the Tx queue has not seen any new packets in a while. We make a
2916 * best effort attempt to reclaim descriptors, in that we don't wait
2917 * around if we cannot get a queue's lock (which most likely is because
2918 * someone else is queueing new packets and so will also handle the clean
2919 * up). Since control queues use immediate data exclusively we don't
2920 * bother cleaning them up here.
2923 static void sge_timer_tx(struct timer_list *t)
2925 struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2926 struct port_info *pi = netdev_priv(qs->netdev);
2927 struct adapter *adap = pi->adapter;
2928 unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2929 unsigned long next_period;
2931 if (__netif_tx_trylock(qs->tx_q)) {
2932 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2933 TX_RECLAIM_TIMER_CHUNK);
2934 __netif_tx_unlock(qs->tx_q);
2937 if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2938 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2939 TX_RECLAIM_TIMER_CHUNK);
2940 spin_unlock(&qs->txq[TXQ_OFLD].lock);
2943 next_period = TX_RECLAIM_PERIOD >>
2944 (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2945 TX_RECLAIM_TIMER_CHUNK);
2946 mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2950 * sge_timer_rx - perform periodic maintenance of an SGE qset
2951 * @t: the timer list containing the SGE queue set to maintain
2953 * a) Replenishes Rx queues that have run out due to memory shortage.
2954 * Normally new Rx buffers are added when existing ones are consumed but
2955 * when out of memory a queue can become empty. We try to add only a few
2956 * buffers here, the queue will be replenished fully as these new buffers
2957 * are used up if memory shortage has subsided.
2959 * b) Return coalesced response queue credits in case a response queue is
2963 static void sge_timer_rx(struct timer_list *t)
2966 struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2967 struct port_info *pi = netdev_priv(qs->netdev);
2968 struct adapter *adap = pi->adapter;
2971 lock = adap->params.rev > 0 ?
2972 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2974 if (!spin_trylock_irq(lock))
2977 if (napi_is_scheduled(&qs->napi))
2980 if (adap->params.rev < 4) {
2981 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2983 if (status & (1 << qs->rspq.cntxt_id)) {
2985 if (qs->rspq.credits) {
2987 refill_rspq(adap, &qs->rspq, 1);
2988 qs->rspq.restarted++;
2989 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2990 1 << qs->rspq.cntxt_id);
2995 if (qs->fl[0].credits < qs->fl[0].size)
2996 __refill_fl(adap, &qs->fl[0]);
2997 if (qs->fl[1].credits < qs->fl[1].size)
2998 __refill_fl(adap, &qs->fl[1]);
3001 spin_unlock_irq(lock);
3003 mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3007 * t3_update_qset_coalesce - update coalescing settings for a queue set
3008 * @qs: the SGE queue set
3009 * @p: new queue set parameters
3011 * Update the coalescing settings for an SGE queue set. Nothing is done
3012 * if the queue set is not initialized yet.
3014 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3016 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3017 qs->rspq.polling = p->polling;
3018 qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3022 * t3_sge_alloc_qset - initialize an SGE queue set
3023 * @adapter: the adapter
3024 * @id: the queue set id
3025 * @nports: how many Ethernet ports will be using this queue set
3026 * @irq_vec_idx: the IRQ vector index for response queue interrupts
3027 * @p: configuration parameters for this queue set
3028 * @ntxq: number of Tx queues for the queue set
3029 * @dev: net device associated with this queue set
3030 * @netdevq: net device TX queue associated with this queue set
3032 * Allocate resources and initialize an SGE queue set. A queue set
3033 * comprises a response queue, two Rx free-buffer queues, and up to 3
3034 * Tx queues. The Tx queues are assigned roles in the order Ethernet
3035 * queue, offload queue, and control queue.
3037 int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3038 int irq_vec_idx, const struct qset_params *p,
3039 int ntxq, struct net_device *dev,
3040 struct netdev_queue *netdevq)
3042 int i, avail, ret = -ENOMEM;
3043 struct sge_qset *q = &adapter->sge.qs[id];
3045 init_qset_cntxt(q, id);
3046 timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
3047 timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
3049 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3050 sizeof(struct rx_desc),
3051 sizeof(struct rx_sw_desc),
3052 &q->fl[0].phys_addr, &q->fl[0].sdesc);
3056 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3057 sizeof(struct rx_desc),
3058 sizeof(struct rx_sw_desc),
3059 &q->fl[1].phys_addr, &q->fl[1].sdesc);
3063 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3064 sizeof(struct rsp_desc), 0,
3065 &q->rspq.phys_addr, NULL);
3069 for (i = 0; i < ntxq; ++i) {
3071 * The control queue always uses immediate data so does not
3072 * need to keep track of any sk_buffs.
3074 size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3076 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3077 sizeof(struct tx_desc), sz,
3078 &q->txq[i].phys_addr,
3080 if (!q->txq[i].desc)
3084 q->txq[i].size = p->txq_size[i];
3085 spin_lock_init(&q->txq[i].lock);
3086 skb_queue_head_init(&q->txq[i].sendq);
3089 INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
3090 INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
3092 q->fl[0].gen = q->fl[1].gen = 1;
3093 q->fl[0].size = p->fl_size;
3094 q->fl[1].size = p->jumbo_size;
3097 q->rspq.size = p->rspq_size;
3098 spin_lock_init(&q->rspq.lock);
3099 skb_queue_head_init(&q->rspq.rx_queue);
3101 q->txq[TXQ_ETH].stop_thres = nports *
3102 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3104 #if FL0_PG_CHUNK_SIZE > 0
3105 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3107 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3109 #if FL1_PG_CHUNK_SIZE > 0
3110 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3112 q->fl[1].buf_size = is_offload(adapter) ?
3113 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3114 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3117 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3118 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3119 q->fl[0].order = FL0_PG_ORDER;
3120 q->fl[1].order = FL1_PG_ORDER;
3121 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3122 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3124 spin_lock_irq(&adapter->sge.reg_lock);
3126 /* FL threshold comparison uses < */
3127 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3128 q->rspq.phys_addr, q->rspq.size,
3129 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3133 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3134 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3135 q->fl[i].phys_addr, q->fl[i].size,
3136 q->fl[i].buf_size - SGE_PG_RSVD,
3137 p->cong_thres, 1, 0);
3142 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3143 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3144 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3150 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3151 USE_GTS, SGE_CNTXT_OFLD, id,
3152 q->txq[TXQ_OFLD].phys_addr,
3153 q->txq[TXQ_OFLD].size, 0, 1, 0);
3159 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3161 q->txq[TXQ_CTRL].phys_addr,
3162 q->txq[TXQ_CTRL].size,
3163 q->txq[TXQ_CTRL].token, 1, 0);
3168 spin_unlock_irq(&adapter->sge.reg_lock);
3173 t3_update_qset_coalesce(q, p);
3175 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3176 GFP_KERNEL | __GFP_COMP);
3178 CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3182 if (avail < q->fl[0].size)
3183 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3186 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3187 GFP_KERNEL | __GFP_COMP);
3188 if (avail < q->fl[1].size)
3189 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3191 refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3193 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3194 V_NEWTIMER(q->rspq.holdoff_tmr));
3199 spin_unlock_irq(&adapter->sge.reg_lock);
3201 t3_free_qset(adapter, q);
3206 * t3_start_sge_timers - start SGE timer call backs
3207 * @adap: the adapter
3209 * Starts each SGE queue set's timer call back
3211 void t3_start_sge_timers(struct adapter *adap)
3215 for (i = 0; i < SGE_QSETS; ++i) {
3216 struct sge_qset *q = &adap->sge.qs[i];
3218 if (q->tx_reclaim_timer.function)
3219 mod_timer(&q->tx_reclaim_timer,
3220 jiffies + TX_RECLAIM_PERIOD);
3222 if (q->rx_reclaim_timer.function)
3223 mod_timer(&q->rx_reclaim_timer,
3224 jiffies + RX_RECLAIM_PERIOD);
3229 * t3_stop_sge_timers - stop SGE timer call backs
3230 * @adap: the adapter
3232 * Stops each SGE queue set's timer call back
3234 void t3_stop_sge_timers(struct adapter *adap)
3238 for (i = 0; i < SGE_QSETS; ++i) {
3239 struct sge_qset *q = &adap->sge.qs[i];
3241 if (q->tx_reclaim_timer.function)
3242 del_timer_sync(&q->tx_reclaim_timer);
3243 if (q->rx_reclaim_timer.function)
3244 del_timer_sync(&q->rx_reclaim_timer);
3249 * t3_free_sge_resources - free SGE resources
3250 * @adap: the adapter
3252 * Frees resources used by the SGE queue sets.
3254 void t3_free_sge_resources(struct adapter *adap)
3258 for (i = 0; i < SGE_QSETS; ++i)
3259 t3_free_qset(adap, &adap->sge.qs[i]);
3263 * t3_sge_start - enable SGE
3264 * @adap: the adapter
3266 * Enables the SGE for DMAs. This is the last step in starting packet
3269 void t3_sge_start(struct adapter *adap)
3271 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3275 * t3_sge_stop_dma - Disable SGE DMA engine operation
3276 * @adap: the adapter
3278 * Can be invoked from interrupt context e.g. error handler.
3280 * Note that this function cannot disable the restart of works as
3281 * it cannot wait if called from interrupt context, however the
3282 * works will have no effect since the doorbells are disabled. The
3283 * driver will call tg3_sge_stop() later from process context, at
3284 * which time the works will be stopped if they are still running.
3286 void t3_sge_stop_dma(struct adapter *adap)
3288 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3292 * t3_sge_stop - disable SGE operation completly
3293 * @adap: the adapter
3295 * Called from process context. Disables the DMA engine and any
3296 * pending queue restart works.
3298 void t3_sge_stop(struct adapter *adap)
3302 t3_sge_stop_dma(adap);
3304 for (i = 0; i < SGE_QSETS; ++i) {
3305 struct sge_qset *qs = &adap->sge.qs[i];
3307 cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
3308 cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
3313 * t3_sge_init - initialize SGE
3314 * @adap: the adapter
3315 * @p: the SGE parameters
3317 * Performs SGE initialization needed every time after a chip reset.
3318 * We do not initialize any of the queue sets here, instead the driver
3319 * top-level must request those individually. We also do not enable DMA
3320 * here, that should be done after the queues have been set up.
3322 void t3_sge_init(struct adapter *adap, struct sge_params *p)
3324 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3326 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3327 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3328 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3329 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3330 #if SGE_NUM_GENBITS == 1
3331 ctrl |= F_EGRGENCTRL;
3333 if (adap->params.rev > 0) {
3334 if (!(adap->flags & (USING_MSIX | USING_MSI)))
3335 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3337 t3_write_reg(adap, A_SG_CONTROL, ctrl);
3338 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3339 V_LORCQDRBTHRSH(512));
3340 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3341 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3342 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3343 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3344 adap->params.rev < T3_REV_C ? 1000 : 500);
3345 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3346 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3347 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3348 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3349 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3353 * t3_sge_prep - one-time SGE initialization
3354 * @adap: the associated adapter
3355 * @p: SGE parameters
3357 * Performs one-time initialization of SGE SW state. Includes determining
3358 * defaults for the assorted SGE parameters, which admins can change until
3359 * they are used to initialize the SGE.
3361 void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3365 p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3366 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3368 for (i = 0; i < SGE_QSETS; ++i) {
3369 struct qset_params *q = p->qset + i;
3371 q->polling = adap->params.rev > 0;
3372 q->coalesce_usecs = 5;
3373 q->rspq_size = 1024;
3375 q->jumbo_size = 512;
3376 q->txq_size[TXQ_ETH] = 1024;
3377 q->txq_size[TXQ_OFLD] = 1024;
3378 q->txq_size[TXQ_CTRL] = 256;
3382 spin_lock_init(&adap->sge.reg_lock);