2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
47 #include <net/busy_poll.h>
48 #ifdef CONFIG_CHELSIO_T4_FCOE
49 #include <scsi/fc/fc_fcoe.h>
50 #endif /* CONFIG_CHELSIO_T4_FCOE */
53 #include "t4_values.h"
56 #include "cxgb4_ptp.h"
57 #include "cxgb4_uld.h"
60 * Rx buffer size. We use largish buffers if possible but settle for single
61 * pages under memory shortage.
64 # define FL_PG_ORDER 0
66 # define FL_PG_ORDER (16 - PAGE_SHIFT)
69 /* RX_PULL_LEN should be <= RX_COPY_THRES */
70 #define RX_COPY_THRES 256
71 #define RX_PULL_LEN 128
74 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
75 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
77 #define RX_PKT_SKB_LEN 512
80 * Max number of Tx descriptors we clean up at a time. Should be modest as
81 * freeing skbs isn't cheap and it happens while holding locks. We just need
82 * to free packets faster than they arrive, we eventually catch up and keep
83 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES.
85 #define MAX_TX_RECLAIM 16
88 * Max number of Rx buffers we replenish at a time. Again keep this modest,
89 * allocating buffers isn't cheap either.
91 #define MAX_RX_REFILL 16U
94 * Period of the Rx queue check timer. This timer is infrequent as it has
95 * something to do only when the system experiences severe memory shortage.
97 #define RX_QCHECK_PERIOD (HZ / 2)
100 * Period of the Tx queue check timer.
102 #define TX_QCHECK_PERIOD (HZ / 2)
105 * Max number of Tx descriptors to be reclaimed by the Tx timer.
107 #define MAX_TIMER_TX_RECLAIM 100
110 * Timer index used when backing off due to memory shortage.
112 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
115 * Suspension threshold for non-Ethernet Tx queues. We require enough room
116 * for a full sized WR.
118 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
121 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
124 #define MAX_IMM_TX_PKT_LEN 256
127 * Max size of a WR sent through a control Tx queue.
129 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
131 struct rx_sw_desc { /* SW state per Rx descriptor */
137 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
138 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
139 * We could easily support more but there doesn't seem to be much need for
142 #define FL_MTU_SMALL 1500
143 #define FL_MTU_LARGE 9000
145 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
148 struct sge *s = &adapter->sge;
150 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
153 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
154 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
157 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
158 * these to specify the buffer size as an index into the SGE Free List Buffer
159 * Size register array. We also use bit 4, when the buffer has been unmapped
160 * for DMA, but this is of course never sent to the hardware and is only used
161 * to prevent double unmappings. All of the above requires that the Free List
162 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
163 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
164 * Free List Buffer alignment is 32 bytes, this works out for us ...
167 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
168 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
169 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
172 * XXX We shouldn't depend on being able to use these indices.
173 * XXX Especially when some other Master PF has initialized the
174 * XXX adapter or we use the Firmware Configuration File. We
175 * XXX should really search through the Host Buffer Size register
176 * XXX array for the appropriately sized buffer indices.
178 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
179 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
181 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
182 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
185 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
186 #define MIN_NAPI_WORK 1
188 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
190 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
193 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
195 return !(d->dma_addr & RX_UNMAPPED_BUF);
199 * txq_avail - return the number of available slots in a Tx queue
202 * Returns the number of descriptors in a Tx queue available to write new
205 static inline unsigned int txq_avail(const struct sge_txq *q)
207 return q->size - 1 - q->in_use;
211 * fl_cap - return the capacity of a free-buffer list
214 * Returns the capacity of a free-buffer list. The capacity is less than
215 * the size because one descriptor needs to be left unpopulated, otherwise
216 * HW will think the FL is empty.
218 static inline unsigned int fl_cap(const struct sge_fl *fl)
220 return fl->size - 8; /* 1 descriptor = 8 buffers */
224 * fl_starving - return whether a Free List is starving.
225 * @adapter: pointer to the adapter
228 * Tests specified Free List to see whether the number of buffers
229 * available to the hardware has falled below our "starvation"
232 static inline bool fl_starving(const struct adapter *adapter,
233 const struct sge_fl *fl)
235 const struct sge *s = &adapter->sge;
237 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
240 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
243 const skb_frag_t *fp, *end;
244 const struct skb_shared_info *si;
246 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
247 if (dma_mapping_error(dev, *addr))
250 si = skb_shinfo(skb);
251 end = &si->frags[si->nr_frags];
253 for (fp = si->frags; fp < end; fp++) {
254 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
256 if (dma_mapping_error(dev, *addr))
262 while (fp-- > si->frags)
263 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
265 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
269 EXPORT_SYMBOL(cxgb4_map_skb);
271 #ifdef CONFIG_NEED_DMA_MAP_STATE
272 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
273 const dma_addr_t *addr)
275 const skb_frag_t *fp, *end;
276 const struct skb_shared_info *si;
278 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
280 si = skb_shinfo(skb);
281 end = &si->frags[si->nr_frags];
282 for (fp = si->frags; fp < end; fp++)
283 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
287 * deferred_unmap_destructor - unmap a packet when it is freed
290 * This is the packet destructor used for Tx packets that need to remain
291 * mapped until they are freed rather than until their Tx descriptors are
294 static void deferred_unmap_destructor(struct sk_buff *skb)
296 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
300 static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
301 const struct ulptx_sgl *sgl, const struct sge_txq *q)
303 const struct ulptx_sge_pair *p;
304 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
306 if (likely(skb_headlen(skb)))
307 dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
310 dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
316 * the complexity below is because of the possibility of a wrap-around
317 * in the middle of an SGL
319 for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
320 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
321 unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
322 ntohl(p->len[0]), DMA_TO_DEVICE);
323 dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
324 ntohl(p->len[1]), DMA_TO_DEVICE);
326 } else if ((u8 *)p == (u8 *)q->stat) {
327 p = (const struct ulptx_sge_pair *)q->desc;
329 } else if ((u8 *)p + 8 == (u8 *)q->stat) {
330 const __be64 *addr = (const __be64 *)q->desc;
332 dma_unmap_page(dev, be64_to_cpu(addr[0]),
333 ntohl(p->len[0]), DMA_TO_DEVICE);
334 dma_unmap_page(dev, be64_to_cpu(addr[1]),
335 ntohl(p->len[1]), DMA_TO_DEVICE);
336 p = (const struct ulptx_sge_pair *)&addr[2];
338 const __be64 *addr = (const __be64 *)q->desc;
340 dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
341 ntohl(p->len[0]), DMA_TO_DEVICE);
342 dma_unmap_page(dev, be64_to_cpu(addr[0]),
343 ntohl(p->len[1]), DMA_TO_DEVICE);
344 p = (const struct ulptx_sge_pair *)&addr[1];
350 if ((u8 *)p == (u8 *)q->stat)
351 p = (const struct ulptx_sge_pair *)q->desc;
352 addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
353 *(const __be64 *)q->desc;
354 dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
360 * free_tx_desc - reclaims Tx descriptors and their buffers
361 * @adapter: the adapter
362 * @q: the Tx queue to reclaim descriptors from
363 * @n: the number of descriptors to reclaim
364 * @unmap: whether the buffers should be unmapped for DMA
366 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
367 * Tx buffers. Called with the Tx queue lock held.
369 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
370 unsigned int n, bool unmap)
372 struct tx_sw_desc *d;
373 unsigned int cidx = q->cidx;
374 struct device *dev = adap->pdev_dev;
378 if (d->skb) { /* an SGL is present */
380 unmap_sgl(dev, d->skb, d->sgl, q);
381 dev_consume_skb_any(d->skb);
385 if (++cidx == q->size) {
394 * Return the number of reclaimable descriptors in a Tx queue.
396 static inline int reclaimable(const struct sge_txq *q)
398 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
400 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
404 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
406 * @q: the Tx queue to reclaim completed descriptors from
407 * @unmap: whether the buffers should be unmapped for DMA
409 * Reclaims Tx descriptors that the SGE has indicated it has processed,
410 * and frees the associated buffers if possible. Called with the Tx
413 inline void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
416 int avail = reclaimable(q);
420 * Limit the amount of clean up work we do at a time to keep
421 * the Tx lock hold time O(1).
423 if (avail > MAX_TX_RECLAIM)
424 avail = MAX_TX_RECLAIM;
426 free_tx_desc(adap, q, avail, unmap);
430 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
432 static inline int get_buf_size(struct adapter *adapter,
433 const struct rx_sw_desc *d)
435 struct sge *s = &adapter->sge;
436 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
439 switch (rx_buf_size_idx) {
440 case RX_SMALL_PG_BUF:
441 buf_size = PAGE_SIZE;
444 case RX_LARGE_PG_BUF:
445 buf_size = PAGE_SIZE << s->fl_pg_order;
448 case RX_SMALL_MTU_BUF:
449 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
452 case RX_LARGE_MTU_BUF:
453 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
464 * free_rx_bufs - free the Rx buffers on an SGE free list
466 * @q: the SGE free list to free buffers from
467 * @n: how many buffers to free
469 * Release the next @n buffers on an SGE free-buffer Rx queue. The
470 * buffers must be made inaccessible to HW before calling this function.
472 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
475 struct rx_sw_desc *d = &q->sdesc[q->cidx];
477 if (is_buf_mapped(d))
478 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
479 get_buf_size(adap, d),
483 if (++q->cidx == q->size)
490 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
492 * @q: the SGE free list
494 * Unmap the current buffer on an SGE free-buffer Rx queue. The
495 * buffer must be made inaccessible to HW before calling this function.
497 * This is similar to @free_rx_bufs above but does not free the buffer.
498 * Do note that the FL still loses any further access to the buffer.
500 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
502 struct rx_sw_desc *d = &q->sdesc[q->cidx];
504 if (is_buf_mapped(d))
505 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
506 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
508 if (++q->cidx == q->size)
513 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
515 if (q->pend_cred >= 8) {
516 u32 val = adap->params.arch.sge_fl_db;
518 if (is_t4(adap->params.chip))
519 val |= PIDX_V(q->pend_cred / 8);
521 val |= PIDX_T5_V(q->pend_cred / 8);
523 /* Make sure all memory writes to the Free List queue are
524 * committed before we tell the hardware about them.
528 /* If we don't have access to the new User Doorbell (T5+), use
529 * the old doorbell mechanism; otherwise use the new BAR2
532 if (unlikely(q->bar2_addr == NULL)) {
533 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
534 val | QID_V(q->cntxt_id));
536 writel(val | QID_V(q->bar2_qid),
537 q->bar2_addr + SGE_UDB_KDOORBELL);
539 /* This Write memory Barrier will force the write to
540 * the User Doorbell area to be flushed.
548 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
552 sd->dma_addr = mapping; /* includes size low bits */
556 * refill_fl - refill an SGE Rx buffer ring
558 * @q: the ring to refill
559 * @n: the number of new buffers to allocate
560 * @gfp: the gfp flags for the allocations
562 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
563 * allocated with the supplied gfp flags. The caller must assure that
564 * @n does not exceed the queue's capacity. If afterwards the queue is
565 * found critically low mark it as starving in the bitmap of starving FLs.
567 * Returns the number of buffers allocated.
569 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
572 struct sge *s = &adap->sge;
575 unsigned int cred = q->avail;
576 __be64 *d = &q->desc[q->pidx];
577 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
580 #ifdef CONFIG_DEBUG_FS
581 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
586 node = dev_to_node(adap->pdev_dev);
588 if (s->fl_pg_order == 0)
589 goto alloc_small_pages;
592 * Prefer large buffers
595 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
597 q->large_alloc_failed++;
598 break; /* fall back to single pages */
601 mapping = dma_map_page(adap->pdev_dev, pg, 0,
602 PAGE_SIZE << s->fl_pg_order,
604 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
605 __free_pages(pg, s->fl_pg_order);
607 goto out; /* do not try small pages for this error */
609 mapping |= RX_LARGE_PG_BUF;
610 *d++ = cpu_to_be64(mapping);
612 set_rx_sw_desc(sd, pg, mapping);
616 if (++q->pidx == q->size) {
626 pg = alloc_pages_node(node, gfp, 0);
632 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
634 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
639 *d++ = cpu_to_be64(mapping);
641 set_rx_sw_desc(sd, pg, mapping);
645 if (++q->pidx == q->size) {
652 out: cred = q->avail - cred;
653 q->pend_cred += cred;
656 if (unlikely(fl_starving(adap, q))) {
659 set_bit(q->cntxt_id - adap->sge.egr_start,
660 adap->sge.starving_fl);
666 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
668 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
673 * alloc_ring - allocate resources for an SGE descriptor ring
674 * @dev: the PCI device's core device
675 * @nelem: the number of descriptors
676 * @elem_size: the size of each descriptor
677 * @sw_size: the size of the SW state associated with each ring element
678 * @phys: the physical address of the allocated ring
679 * @metadata: address of the array holding the SW state for the ring
680 * @stat_size: extra space in HW ring for status information
681 * @node: preferred node for memory allocations
683 * Allocates resources for an SGE descriptor ring, such as Tx queues,
684 * free buffer lists, or response queues. Each SGE ring requires
685 * space for its HW descriptors plus, optionally, space for the SW state
686 * associated with each HW entry (the metadata). The function returns
687 * three values: the virtual address for the HW ring (the return value
688 * of the function), the bus address of the HW ring, and the address
691 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
692 size_t sw_size, dma_addr_t *phys, void *metadata,
693 size_t stat_size, int node)
695 size_t len = nelem * elem_size + stat_size;
697 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
702 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
705 dma_free_coherent(dev, len, p, *phys);
710 *(void **)metadata = s;
716 * sgl_len - calculates the size of an SGL of the given capacity
717 * @n: the number of SGL entries
719 * Calculates the number of flits needed for a scatter/gather list that
720 * can hold the given number of entries.
722 static inline unsigned int sgl_len(unsigned int n)
724 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
725 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
726 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
727 * repeated sequences of { Length[i], Length[i+1], Address[i],
728 * Address[i+1] } (this ensures that all addresses are on 64-bit
729 * boundaries). If N is even, then Length[N+1] should be set to 0 and
730 * Address[N+1] is omitted.
732 * The following calculation incorporates all of the above. It's
733 * somewhat hard to follow but, briefly: the "+2" accounts for the
734 * first two flits which include the DSGL header, Length0 and
735 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
736 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
737 * finally the "+((n-1)&1)" adds the one remaining flit needed if
741 return (3 * n) / 2 + (n & 1) + 2;
745 * flits_to_desc - returns the num of Tx descriptors for the given flits
746 * @n: the number of flits
748 * Returns the number of Tx descriptors needed for the supplied number
751 static inline unsigned int flits_to_desc(unsigned int n)
753 BUG_ON(n > SGE_MAX_WR_LEN / 8);
754 return DIV_ROUND_UP(n, 8);
758 * is_eth_imm - can an Ethernet packet be sent as immediate data?
761 * Returns whether an Ethernet packet is small enough to fit as
762 * immediate data. Return value corresponds to headroom required.
764 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
768 if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
769 chip_ver > CHELSIO_T5) {
770 hdrlen = sizeof(struct cpl_tx_tnl_lso);
771 hdrlen += sizeof(struct cpl_tx_pkt_core);
773 hdrlen = skb_shinfo(skb)->gso_size ?
774 sizeof(struct cpl_tx_pkt_lso_core) : 0;
775 hdrlen += sizeof(struct cpl_tx_pkt);
777 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
783 * calc_tx_flits - calculate the number of flits for a packet Tx WR
786 * Returns the number of flits needed for a Tx WR for the given Ethernet
787 * packet, including the needed WR and CPL headers.
789 static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
790 unsigned int chip_ver)
793 int hdrlen = is_eth_imm(skb, chip_ver);
795 /* If the skb is small enough, we can pump it out as a work request
796 * with only immediate data. In that case we just have to have the
797 * TX Packet header plus the skb data in the Work Request.
801 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
803 /* Otherwise, we're going to have to construct a Scatter gather list
804 * of the skb body and fragments. We also include the flits necessary
805 * for the TX Packet Work Request and CPL. We always have a firmware
806 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
807 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
808 * message or, if we're doing a Large Send Offload, an LSO CPL message
809 * with an embedded TX Packet Write CPL message.
811 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
812 if (skb_shinfo(skb)->gso_size) {
813 if (skb->encapsulation && chip_ver > CHELSIO_T5)
814 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
815 sizeof(struct cpl_tx_tnl_lso);
817 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
818 sizeof(struct cpl_tx_pkt_lso_core);
820 hdrlen += sizeof(struct cpl_tx_pkt_core);
821 flits += (hdrlen / sizeof(__be64));
823 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
824 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
830 * calc_tx_descs - calculate the number of Tx descriptors for a packet
833 * Returns the number of Tx descriptors needed for the given Ethernet
834 * packet, including the needed WR and CPL headers.
836 static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
837 unsigned int chip_ver)
839 return flits_to_desc(calc_tx_flits(skb, chip_ver));
843 * cxgb4_write_sgl - populate a scatter/gather list for a packet
845 * @q: the Tx queue we are writing into
846 * @sgl: starting location for writing the SGL
847 * @end: points right after the end of the SGL
848 * @start: start offset into skb main-body data to include in the SGL
849 * @addr: the list of bus addresses for the SGL elements
851 * Generates a gather list for the buffers that make up a packet.
852 * The caller must provide adequate space for the SGL that will be written.
853 * The SGL includes all of the packet's page fragments and the data in its
854 * main body except for the first @start bytes. @sgl must be 16-byte
855 * aligned and within a Tx descriptor with available space. @end points
856 * right after the end of the SGL but does not account for any potential
857 * wrap around, i.e., @end > @sgl.
859 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
860 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
861 const dma_addr_t *addr)
864 struct ulptx_sge_pair *to;
865 const struct skb_shared_info *si = skb_shinfo(skb);
866 unsigned int nfrags = si->nr_frags;
867 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
869 len = skb_headlen(skb) - start;
871 sgl->len0 = htonl(len);
872 sgl->addr0 = cpu_to_be64(addr[0] + start);
875 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
876 sgl->addr0 = cpu_to_be64(addr[1]);
879 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
880 ULPTX_NSGE_V(nfrags));
881 if (likely(--nfrags == 0))
884 * Most of the complexity below deals with the possibility we hit the
885 * end of the queue in the middle of writing the SGL. For this case
886 * only we create the SGL in a temporary buffer and then copy it.
888 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
890 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
891 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
892 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
893 to->addr[0] = cpu_to_be64(addr[i]);
894 to->addr[1] = cpu_to_be64(addr[++i]);
897 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
898 to->len[1] = cpu_to_be32(0);
899 to->addr[0] = cpu_to_be64(addr[i + 1]);
901 if (unlikely((u8 *)end > (u8 *)q->stat)) {
902 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
905 memcpy(sgl->sge, buf, part0);
906 part1 = (u8 *)end - (u8 *)q->stat;
907 memcpy(q->desc, (u8 *)buf + part0, part1);
908 end = (void *)q->desc + part1;
910 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
913 EXPORT_SYMBOL(cxgb4_write_sgl);
915 /* This function copies 64 byte coalesced work request to
916 * memory mapped BAR2 space. For coalesced WR SGE fetches
917 * data from the FIFO instead of from Host.
919 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
932 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
935 * @n: number of new descriptors to give to HW
937 * Ring the doorbel for a Tx queue.
939 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
941 /* Make sure that all writes to the TX Descriptors are committed
942 * before we tell the hardware about them.
946 /* If we don't have access to the new User Doorbell (T5+), use the old
947 * doorbell mechanism; otherwise use the new BAR2 mechanism.
949 if (unlikely(q->bar2_addr == NULL)) {
953 /* For T4 we need to participate in the Doorbell Recovery
956 spin_lock_irqsave(&q->db_lock, flags);
958 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
959 QID_V(q->cntxt_id) | val);
962 q->db_pidx = q->pidx;
963 spin_unlock_irqrestore(&q->db_lock, flags);
965 u32 val = PIDX_T5_V(n);
967 /* T4 and later chips share the same PIDX field offset within
968 * the doorbell, but T5 and later shrank the field in order to
969 * gain a bit for Doorbell Priority. The field was absurdly
970 * large in the first place (14 bits) so we just use the T5
971 * and later limits and warn if a Queue ID is too large.
973 WARN_ON(val & DBPRIO_F);
975 /* If we're only writing a single TX Descriptor and we can use
976 * Inferred QID registers, we can use the Write Combining
977 * Gather Buffer; otherwise we use the simple doorbell.
979 if (n == 1 && q->bar2_qid == 0) {
983 u64 *wr = (u64 *)&q->desc[index];
985 cxgb_pio_copy((u64 __iomem *)
986 (q->bar2_addr + SGE_UDB_WCDOORBELL),
989 writel(val | QID_V(q->bar2_qid),
990 q->bar2_addr + SGE_UDB_KDOORBELL);
993 /* This Write Memory Barrier will force the write to the User
994 * Doorbell area to be flushed. This is needed to prevent
995 * writes on different CPUs for the same queue from hitting
996 * the adapter out of order. This is required when some Work
997 * Requests take the Write Combine Gather Buffer path (user
998 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
999 * take the traditional path where we simply increment the
1000 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1001 * hardware DMA read the actual Work Request.
1006 EXPORT_SYMBOL(cxgb4_ring_tx_db);
1009 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1011 * @q: the Tx queue where the packet will be inlined
1012 * @pos: starting position in the Tx queue where to inline the packet
1014 * Inline a packet's contents directly into Tx descriptors, starting at
1015 * the given position within the Tx DMA ring.
1016 * Most of the complexity of this operation is dealing with wrap arounds
1017 * in the middle of the packet we want to inline.
1019 void cxgb4_inline_tx_skb(const struct sk_buff *skb,
1020 const struct sge_txq *q, void *pos)
1022 int left = (void *)q->stat - pos;
1025 if (likely(skb->len <= left)) {
1026 if (likely(!skb->data_len))
1027 skb_copy_from_linear_data(skb, pos, skb->len);
1029 skb_copy_bits(skb, 0, pos, skb->len);
1032 skb_copy_bits(skb, 0, pos, left);
1033 skb_copy_bits(skb, left, q->desc, skb->len - left);
1034 pos = (void *)q->desc + (skb->len - left);
1037 /* 0-pad to multiple of 16 */
1038 p = PTR_ALIGN(pos, 8);
1039 if ((uintptr_t)p & 8)
1042 EXPORT_SYMBOL(cxgb4_inline_tx_skb);
1044 static void *inline_tx_skb_header(const struct sk_buff *skb,
1045 const struct sge_txq *q, void *pos,
1049 int left = (void *)q->stat - pos;
1051 if (likely(length <= left)) {
1052 memcpy(pos, skb->data, length);
1055 memcpy(pos, skb->data, left);
1056 memcpy(q->desc, skb->data + left, length - left);
1057 pos = (void *)q->desc + (length - left);
1059 /* 0-pad to multiple of 16 */
1060 p = PTR_ALIGN(pos, 8);
1061 if ((uintptr_t)p & 8) {
1069 * Figure out what HW csum a packet wants and return the appropriate control
1072 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1075 bool inner_hdr_csum = false;
1078 if (skb->encapsulation &&
1079 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
1080 inner_hdr_csum = true;
1082 if (inner_hdr_csum) {
1083 ver = inner_ip_hdr(skb)->version;
1084 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
1085 inner_ipv6_hdr(skb)->nexthdr;
1087 ver = ip_hdr(skb)->version;
1088 proto = (ver == 4) ? ip_hdr(skb)->protocol :
1089 ipv6_hdr(skb)->nexthdr;
1093 if (proto == IPPROTO_TCP)
1094 csum_type = TX_CSUM_TCPIP;
1095 else if (proto == IPPROTO_UDP)
1096 csum_type = TX_CSUM_UDPIP;
1099 * unknown protocol, disable HW csum
1100 * and hope a bad packet is detected
1102 return TXPKT_L4CSUM_DIS_F;
1106 * this doesn't work with extension headers
1108 if (proto == IPPROTO_TCP)
1109 csum_type = TX_CSUM_TCPIP6;
1110 else if (proto == IPPROTO_UDP)
1111 csum_type = TX_CSUM_UDPIP6;
1116 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1117 int eth_hdr_len, l4_len;
1120 if (inner_hdr_csum) {
1121 /* This allows checksum offload for all encapsulated
1122 * packets like GRE etc..
1124 l4_len = skb_inner_network_header_len(skb);
1125 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
1127 l4_len = skb_network_header_len(skb);
1128 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1130 hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
1132 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1133 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1135 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1136 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1138 int start = skb_transport_offset(skb);
1140 return TXPKT_CSUM_TYPE_V(csum_type) |
1141 TXPKT_CSUM_START_V(start) |
1142 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1146 static void eth_txq_stop(struct sge_eth_txq *q)
1148 netif_tx_stop_queue(q->txq);
1152 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1156 if (q->pidx >= q->size)
1160 #ifdef CONFIG_CHELSIO_T4_FCOE
1162 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1163 const struct port_info *pi, u64 *cntrl)
1165 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1167 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1170 if (skb->protocol != htons(ETH_P_FCOE))
1173 skb_reset_mac_header(skb);
1174 skb->mac_len = sizeof(struct ethhdr);
1176 skb_set_network_header(skb, skb->mac_len);
1177 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1179 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1182 /* FC CRC offload */
1183 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1184 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1185 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1186 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1187 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1190 #endif /* CONFIG_CHELSIO_T4_FCOE */
1192 /* Returns tunnel type if hardware supports offloading of the same.
1193 * It is called only for T5 and onwards.
1195 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
1198 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1199 struct port_info *pi = netdev_priv(skb->dev);
1200 struct adapter *adapter = pi->adapter;
1202 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1203 skb->inner_protocol != htons(ETH_P_TEB))
1206 switch (vlan_get_protocol(skb)) {
1207 case htons(ETH_P_IP):
1208 l4_hdr = ip_hdr(skb)->protocol;
1210 case htons(ETH_P_IPV6):
1211 l4_hdr = ipv6_hdr(skb)->nexthdr;
1219 if (adapter->vxlan_port == udp_hdr(skb)->dest)
1220 tnl_type = TX_TNL_TYPE_VXLAN;
1221 else if (adapter->geneve_port == udp_hdr(skb)->dest)
1222 tnl_type = TX_TNL_TYPE_GENEVE;
1231 static inline void t6_fill_tnl_lso(struct sk_buff *skb,
1232 struct cpl_tx_tnl_lso *tnl_lso,
1233 enum cpl_tx_tnl_lso_type tnl_type)
1236 int in_eth_xtra_len;
1237 int l3hdr_len = skb_network_header_len(skb);
1238 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1239 const struct skb_shared_info *ssi = skb_shinfo(skb);
1240 bool v6 = (ip_hdr(skb)->version == 6);
1242 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
1243 CPL_TX_TNL_LSO_FIRST_F |
1244 CPL_TX_TNL_LSO_LAST_F |
1245 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
1246 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
1247 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
1248 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
1249 CPL_TX_TNL_LSO_IPLENSETOUT_F |
1250 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
1251 tnl_lso->op_to_IpIdSplitOut = htonl(val);
1253 tnl_lso->IpIdOffsetOut = 0;
1255 /* Get the tunnel header length */
1256 val = skb_inner_mac_header(skb) - skb_mac_header(skb);
1257 in_eth_xtra_len = skb_inner_network_header(skb) -
1258 skb_inner_mac_header(skb) - ETH_HLEN;
1261 case TX_TNL_TYPE_VXLAN:
1262 case TX_TNL_TYPE_GENEVE:
1263 tnl_lso->UdpLenSetOut_to_TnlHdrLen =
1264 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
1265 CPL_TX_TNL_LSO_UDPLENSETOUT_F);
1268 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
1272 tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
1273 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
1274 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
1278 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
1279 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
1280 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
1281 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
1282 tnl_lso->Flow_to_TcpHdrLen = htonl(val);
1284 tnl_lso->IpIdOffset = htons(0);
1286 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
1287 tnl_lso->TCPSeqOffset = htonl(0);
1288 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
1292 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1294 * @dev: the egress net device
1296 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1298 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1300 u32 wr_mid, ctrl0, op;
1301 u64 cntrl, *end, *sgl;
1303 unsigned int flits, ndesc;
1304 struct adapter *adap;
1305 struct sge_eth_txq *q;
1306 const struct port_info *pi;
1307 struct fw_eth_tx_pkt_wr *wr;
1308 struct cpl_tx_pkt_core *cpl;
1309 const struct skb_shared_info *ssi;
1310 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1311 bool immediate = false;
1312 int len, max_pkt_len;
1313 bool ptp_enabled = is_ptp_enabled(skb, dev);
1314 unsigned int chip_ver;
1315 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1317 #ifdef CONFIG_CHELSIO_T4_FCOE
1319 #endif /* CONFIG_CHELSIO_T4_FCOE */
1322 * The chip min packet length is 10 octets but play safe and reject
1323 * anything shorter than an Ethernet header.
1325 if (unlikely(skb->len < ETH_HLEN)) {
1326 out_free: dev_kfree_skb_any(skb);
1327 return NETDEV_TX_OK;
1330 /* Discard the packet if the length is greater than mtu */
1331 max_pkt_len = ETH_HLEN + dev->mtu;
1332 if (skb_vlan_tagged(skb))
1333 max_pkt_len += VLAN_HLEN;
1334 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1337 pi = netdev_priv(dev);
1339 ssi = skb_shinfo(skb);
1340 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
1341 if (xfrm_offload(skb) && !ssi->gso_size)
1342 return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
1343 #endif /* CHELSIO_IPSEC_INLINE */
1345 qidx = skb_get_queue_mapping(skb);
1347 spin_lock(&adap->ptp_lock);
1348 if (!(adap->ptp_tx_skb)) {
1349 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1350 adap->ptp_tx_skb = skb_get(skb);
1352 spin_unlock(&adap->ptp_lock);
1355 q = &adap->sge.ptptxq;
1357 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1359 skb_tx_timestamp(skb);
1361 cxgb4_reclaim_completed_tx(adap, &q->q, true);
1362 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1364 #ifdef CONFIG_CHELSIO_T4_FCOE
1365 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1366 if (unlikely(err == -ENOTSUPP)) {
1368 spin_unlock(&adap->ptp_lock);
1371 #endif /* CONFIG_CHELSIO_T4_FCOE */
1373 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1374 flits = calc_tx_flits(skb, chip_ver);
1375 ndesc = flits_to_desc(flits);
1376 credits = txq_avail(&q->q) - ndesc;
1378 if (unlikely(credits < 0)) {
1380 dev_err(adap->pdev_dev,
1381 "%s: Tx ring %u full while queue awake!\n",
1384 spin_unlock(&adap->ptp_lock);
1385 return NETDEV_TX_BUSY;
1388 if (is_eth_imm(skb, chip_ver))
1391 if (skb->encapsulation && chip_ver > CHELSIO_T5)
1392 tnl_type = cxgb_encap_offload_supported(skb);
1395 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
1398 spin_unlock(&adap->ptp_lock);
1402 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1403 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1405 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1408 wr = (void *)&q->q.desc[q->q.pidx];
1409 wr->equiq_to_len16 = htonl(wr_mid);
1410 wr->r3 = cpu_to_be64(0);
1411 end = (u64 *)wr + flits;
1413 len = immediate ? skb->len : 0;
1414 if (ssi->gso_size) {
1415 struct cpl_tx_pkt_lso *lso = (void *)wr;
1416 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1417 int l3hdr_len = skb_network_header_len(skb);
1418 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1419 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
1422 len += sizeof(*tnl_lso);
1424 len += sizeof(*lso);
1426 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1427 FW_WR_IMMDLEN_V(len));
1429 struct iphdr *iph = ip_hdr(skb);
1431 t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
1432 cpl = (void *)(tnl_lso + 1);
1433 /* Driver is expected to compute partial checksum that
1434 * does not include the IP Total Length.
1436 if (iph->version == 4) {
1439 iph->check = (u16)(~ip_fast_csum((u8 *)iph,
1442 if (skb->ip_summed == CHECKSUM_PARTIAL)
1443 cntrl = hwcsum(adap->params.chip, skb);
1445 lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1446 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1448 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1449 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1450 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1451 lso->c.ipid_ofst = htons(0);
1452 lso->c.mss = htons(ssi->gso_size);
1453 lso->c.seqno_offset = htonl(0);
1454 if (is_t4(adap->params.chip))
1455 lso->c.len = htonl(skb->len);
1458 htonl(LSO_T5_XFER_SIZE_V(skb->len));
1459 cpl = (void *)(lso + 1);
1461 if (CHELSIO_CHIP_VERSION(adap->params.chip)
1463 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1465 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1467 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1468 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1469 TXPKT_IPHDR_LEN_V(l3hdr_len);
1471 sgl = (u64 *)(cpl + 1); /* sgl start here */
1472 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
1473 /* If current position is already at the end of the
1474 * txq, reset the current to point to start of the queue
1475 * and update the end ptr as well.
1477 if (sgl == (u64 *)q->q.stat) {
1478 int left = (u8 *)end - (u8 *)q->q.stat;
1480 end = (void *)q->q.desc + left;
1481 sgl = (void *)q->q.desc;
1485 q->tx_cso += ssi->gso_segs;
1487 len += sizeof(*cpl);
1489 op = FW_PTP_TX_PKT_WR;
1491 op = FW_ETH_TX_PKT_WR;
1492 wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1493 FW_WR_IMMDLEN_V(len));
1494 cpl = (void *)(wr + 1);
1495 sgl = (u64 *)(cpl + 1);
1496 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1497 cntrl = hwcsum(adap->params.chip, skb) |
1503 if (skb_vlan_tag_present(skb)) {
1505 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1506 #ifdef CONFIG_CHELSIO_T4_FCOE
1507 if (skb->protocol == htons(ETH_P_FCOE))
1508 cntrl |= TXPKT_VLAN_V(
1509 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1510 #endif /* CONFIG_CHELSIO_T4_FCOE */
1513 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1514 TXPKT_PF_V(adap->pf);
1516 ctrl0 |= TXPKT_TSTAMP_F;
1517 #ifdef CONFIG_CHELSIO_T4_DCB
1518 if (is_t4(adap->params.chip))
1519 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1521 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1523 cpl->ctrl0 = htonl(ctrl0);
1524 cpl->pack = htons(0);
1525 cpl->len = htons(skb->len);
1526 cpl->ctrl1 = cpu_to_be64(cntrl);
1529 cxgb4_inline_tx_skb(skb, &q->q, sgl);
1530 dev_consume_skb_any(skb);
1534 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
1537 last_desc = q->q.pidx + ndesc - 1;
1538 if (last_desc >= q->q.size)
1539 last_desc -= q->q.size;
1540 q->q.sdesc[last_desc].skb = skb;
1541 q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1544 txq_advance(&q->q, ndesc);
1546 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1548 spin_unlock(&adap->ptp_lock);
1549 return NETDEV_TX_OK;
1553 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1554 * @q: the SGE control Tx queue
1556 * This is a variant of cxgb4_reclaim_completed_tx() that is used
1557 * for Tx queues that send only immediate data (presently just
1558 * the control queues) and thus do not have any sk_buffs to release.
1560 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1562 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
1563 int reclaim = hw_cidx - q->cidx;
1568 q->in_use -= reclaim;
1573 * is_imm - check whether a packet can be sent as immediate data
1576 * Returns true if a packet can be sent as a WR with immediate data.
1578 static inline int is_imm(const struct sk_buff *skb)
1580 return skb->len <= MAX_CTRL_WR_LEN;
1584 * ctrlq_check_stop - check if a control queue is full and should stop
1586 * @wr: most recent WR written to the queue
1588 * Check if a control queue has become full and should be stopped.
1589 * We clean up control queue descriptors very lazily, only when we are out.
1590 * If the queue is still full after reclaiming any completed descriptors
1591 * we suspend it and have the last WR wake it up.
1593 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1595 reclaim_completed_tx_imm(&q->q);
1596 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1597 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1604 * ctrl_xmit - send a packet through an SGE control Tx queue
1605 * @q: the control queue
1608 * Send a packet through an SGE control Tx queue. Packets sent through
1609 * a control queue must fit entirely as immediate data.
1611 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1614 struct fw_wr_hdr *wr;
1616 if (unlikely(!is_imm(skb))) {
1619 return NET_XMIT_DROP;
1622 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1623 spin_lock(&q->sendq.lock);
1625 if (unlikely(q->full)) {
1626 skb->priority = ndesc; /* save for restart */
1627 __skb_queue_tail(&q->sendq, skb);
1628 spin_unlock(&q->sendq.lock);
1632 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1633 cxgb4_inline_tx_skb(skb, &q->q, wr);
1635 txq_advance(&q->q, ndesc);
1636 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1637 ctrlq_check_stop(q, wr);
1639 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
1640 spin_unlock(&q->sendq.lock);
1643 return NET_XMIT_SUCCESS;
1647 * restart_ctrlq - restart a suspended control queue
1648 * @data: the control queue to restart
1650 * Resumes transmission on a suspended Tx control queue.
1652 static void restart_ctrlq(unsigned long data)
1654 struct sk_buff *skb;
1655 unsigned int written = 0;
1656 struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1658 spin_lock(&q->sendq.lock);
1659 reclaim_completed_tx_imm(&q->q);
1660 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
1662 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1663 struct fw_wr_hdr *wr;
1664 unsigned int ndesc = skb->priority; /* previously saved */
1667 /* Write descriptors and free skbs outside the lock to limit
1668 * wait times. q->full is still set so new skbs will be queued.
1670 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1671 txq_advance(&q->q, ndesc);
1672 spin_unlock(&q->sendq.lock);
1674 cxgb4_inline_tx_skb(skb, &q->q, wr);
1677 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1678 unsigned long old = q->q.stops;
1680 ctrlq_check_stop(q, wr);
1681 if (q->q.stops != old) { /* suspended anew */
1682 spin_lock(&q->sendq.lock);
1687 cxgb4_ring_tx_db(q->adap, &q->q, written);
1690 spin_lock(&q->sendq.lock);
1695 cxgb4_ring_tx_db(q->adap, &q->q, written);
1696 spin_unlock(&q->sendq.lock);
1700 * t4_mgmt_tx - send a management message
1701 * @adap: the adapter
1702 * @skb: the packet containing the management message
1704 * Send a management message through control queue 0.
1706 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1711 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1717 * is_ofld_imm - check whether a packet can be sent as immediate data
1720 * Returns true if a packet can be sent as an offload WR with immediate
1721 * data. We currently use the same limit as for Ethernet packets.
1723 static inline int is_ofld_imm(const struct sk_buff *skb)
1725 struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
1726 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
1728 if (opcode == FW_CRYPTO_LOOKASIDE_WR)
1729 return skb->len <= SGE_MAX_WR_LEN;
1731 return skb->len <= MAX_IMM_TX_PKT_LEN;
1735 * calc_tx_flits_ofld - calculate # of flits for an offload packet
1738 * Returns the number of flits needed for the given offload packet.
1739 * These packets are already fully constructed and no additional headers
1742 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1744 unsigned int flits, cnt;
1746 if (is_ofld_imm(skb))
1747 return DIV_ROUND_UP(skb->len, 8);
1749 flits = skb_transport_offset(skb) / 8U; /* headers */
1750 cnt = skb_shinfo(skb)->nr_frags;
1751 if (skb_tail_pointer(skb) != skb_transport_header(skb))
1753 return flits + sgl_len(cnt);
1757 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1758 * @adap: the adapter
1759 * @q: the queue to stop
1761 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1762 * inability to map packets. A periodic timer attempts to restart
1765 static void txq_stop_maperr(struct sge_uld_txq *q)
1769 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1770 q->adap->sge.txq_maperr);
1774 * ofldtxq_stop - stop an offload Tx queue that has become full
1775 * @q: the queue to stop
1776 * @wr: the Work Request causing the queue to become full
1778 * Stops an offload Tx queue that has become full and modifies the packet
1779 * being written to request a wakeup.
1781 static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
1783 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1789 * service_ofldq - service/restart a suspended offload queue
1790 * @q: the offload queue
1792 * Services an offload Tx queue by moving packets from its Pending Send
1793 * Queue to the Hardware TX ring. The function starts and ends with the
1794 * Send Queue locked, but drops the lock while putting the skb at the
1795 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
1796 * allows more skbs to be added to the Send Queue by other threads.
1797 * The packet being processed at the head of the Pending Send Queue is
1798 * left on the queue in case we experience DMA Mapping errors, etc.
1799 * and need to give up and restart later.
1801 * service_ofldq() can be thought of as a task which opportunistically
1802 * uses other threads execution contexts. We use the Offload Queue
1803 * boolean "service_ofldq_running" to make sure that only one instance
1804 * is ever running at a time ...
1806 static void service_ofldq(struct sge_uld_txq *q)
1808 u64 *pos, *before, *end;
1810 struct sk_buff *skb;
1811 struct sge_txq *txq;
1813 unsigned int written = 0;
1814 unsigned int flits, ndesc;
1816 /* If another thread is currently in service_ofldq() processing the
1817 * Pending Send Queue then there's nothing to do. Otherwise, flag
1818 * that we're doing the work and continue. Examining/modifying
1819 * the Offload Queue boolean "service_ofldq_running" must be done
1820 * while holding the Pending Send Queue Lock.
1822 if (q->service_ofldq_running)
1824 q->service_ofldq_running = true;
1826 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1827 /* We drop the lock while we're working with the skb at the
1828 * head of the Pending Send Queue. This allows more skbs to
1829 * be added to the Pending Send Queue while we're working on
1830 * this one. We don't need to lock to guard the TX Ring
1831 * updates because only one thread of execution is ever
1832 * allowed into service_ofldq() at a time.
1834 spin_unlock(&q->sendq.lock);
1836 cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
1838 flits = skb->priority; /* previously saved */
1839 ndesc = flits_to_desc(flits);
1840 credits = txq_avail(&q->q) - ndesc;
1841 BUG_ON(credits < 0);
1842 if (unlikely(credits < TXQ_STOP_THRES))
1843 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
1845 pos = (u64 *)&q->q.desc[q->q.pidx];
1846 if (is_ofld_imm(skb))
1847 cxgb4_inline_tx_skb(skb, &q->q, pos);
1848 else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
1849 (dma_addr_t *)skb->head)) {
1851 spin_lock(&q->sendq.lock);
1854 int last_desc, hdr_len = skb_transport_offset(skb);
1856 /* The WR headers may not fit within one descriptor.
1857 * So we need to deal with wrap-around here.
1859 before = (u64 *)pos;
1860 end = (u64 *)pos + flits;
1862 pos = (void *)inline_tx_skb_header(skb, &q->q,
1865 if (before > (u64 *)pos) {
1866 left = (u8 *)end - (u8 *)txq->stat;
1867 end = (void *)txq->desc + left;
1870 /* If current position is already at the end of the
1871 * ofld queue, reset the current to point to
1872 * start of the queue and update the end ptr as well.
1874 if (pos == (u64 *)txq->stat) {
1875 left = (u8 *)end - (u8 *)txq->stat;
1876 end = (void *)txq->desc + left;
1877 pos = (void *)txq->desc;
1880 cxgb4_write_sgl(skb, &q->q, (void *)pos,
1882 (dma_addr_t *)skb->head);
1883 #ifdef CONFIG_NEED_DMA_MAP_STATE
1884 skb->dev = q->adap->port[0];
1885 skb->destructor = deferred_unmap_destructor;
1887 last_desc = q->q.pidx + ndesc - 1;
1888 if (last_desc >= q->q.size)
1889 last_desc -= q->q.size;
1890 q->q.sdesc[last_desc].skb = skb;
1893 txq_advance(&q->q, ndesc);
1895 if (unlikely(written > 32)) {
1896 cxgb4_ring_tx_db(q->adap, &q->q, written);
1900 /* Reacquire the Pending Send Queue Lock so we can unlink the
1901 * skb we've just successfully transferred to the TX Ring and
1902 * loop for the next skb which may be at the head of the
1903 * Pending Send Queue.
1905 spin_lock(&q->sendq.lock);
1906 __skb_unlink(skb, &q->sendq);
1907 if (is_ofld_imm(skb))
1910 if (likely(written))
1911 cxgb4_ring_tx_db(q->adap, &q->q, written);
1913 /*Indicate that no thread is processing the Pending Send Queue
1916 q->service_ofldq_running = false;
1920 * ofld_xmit - send a packet through an offload queue
1921 * @q: the Tx offload queue
1924 * Send an offload packet through an SGE offload queue.
1926 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
1928 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
1929 spin_lock(&q->sendq.lock);
1931 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
1932 * that results in this new skb being the only one on the queue, start
1933 * servicing it. If there are other skbs already on the list, then
1934 * either the queue is currently being processed or it's been stopped
1935 * for some reason and it'll be restarted at a later time. Restart
1936 * paths are triggered by events like experiencing a DMA Mapping Error
1937 * or filling the Hardware TX Ring.
1939 __skb_queue_tail(&q->sendq, skb);
1940 if (q->sendq.qlen == 1)
1943 spin_unlock(&q->sendq.lock);
1944 return NET_XMIT_SUCCESS;
1948 * restart_ofldq - restart a suspended offload queue
1949 * @data: the offload queue to restart
1951 * Resumes transmission on a suspended Tx offload queue.
1953 static void restart_ofldq(unsigned long data)
1955 struct sge_uld_txq *q = (struct sge_uld_txq *)data;
1957 spin_lock(&q->sendq.lock);
1958 q->full = 0; /* the queue actually is completely empty now */
1960 spin_unlock(&q->sendq.lock);
1964 * skb_txq - return the Tx queue an offload packet should use
1967 * Returns the Tx queue an offload packet should use as indicated by bits
1968 * 1-15 in the packet's queue_mapping.
1970 static inline unsigned int skb_txq(const struct sk_buff *skb)
1972 return skb->queue_mapping >> 1;
1976 * is_ctrl_pkt - return whether an offload packet is a control packet
1979 * Returns whether an offload packet should use an OFLD or a CTRL
1980 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
1982 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1984 return skb->queue_mapping & 1;
1987 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
1988 unsigned int tx_uld_type)
1990 struct sge_uld_txq_info *txq_info;
1991 struct sge_uld_txq *txq;
1992 unsigned int idx = skb_txq(skb);
1994 if (unlikely(is_ctrl_pkt(skb))) {
1995 /* Single ctrl queue is a requirement for LE workaround path */
1996 if (adap->tids.nsftids)
1998 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
2001 txq_info = adap->sge.uld_txq_info[tx_uld_type];
2002 if (unlikely(!txq_info)) {
2004 return NET_XMIT_DROP;
2007 txq = &txq_info->uldtxq[idx];
2008 return ofld_xmit(txq, skb);
2012 * t4_ofld_send - send an offload packet
2013 * @adap: the adapter
2016 * Sends an offload packet. We use the packet queue_mapping to select the
2017 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2018 * should be sent as regular or control, bits 1-15 select the queue.
2020 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
2025 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
2031 * cxgb4_ofld_send - send an offload packet
2032 * @dev: the net device
2035 * Sends an offload packet. This is an exported version of @t4_ofld_send,
2036 * intended for ULDs.
2038 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
2040 return t4_ofld_send(netdev2adap(dev), skb);
2042 EXPORT_SYMBOL(cxgb4_ofld_send);
2044 static void *inline_tx_header(const void *src,
2045 const struct sge_txq *q,
2046 void *pos, int length)
2048 int left = (void *)q->stat - pos;
2051 if (likely(length <= left)) {
2052 memcpy(pos, src, length);
2055 memcpy(pos, src, left);
2056 memcpy(q->desc, src + left, length - left);
2057 pos = (void *)q->desc + (length - left);
2059 /* 0-pad to multiple of 16 */
2060 p = PTR_ALIGN(pos, 8);
2061 if ((uintptr_t)p & 8) {
2069 * ofld_xmit_direct - copy a WR into offload queue
2070 * @q: the Tx offload queue
2071 * @src: location of WR
2074 * Copy an immediate WR into an uncontended SGE offload queue.
2076 static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
2083 /* Use the lower limit as the cut-off */
2084 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
2086 return NET_XMIT_DROP;
2089 /* Don't return NET_XMIT_CN here as the current
2090 * implementation doesn't queue the request
2091 * using an skb when the following conditions not met
2093 if (!spin_trylock(&q->sendq.lock))
2094 return NET_XMIT_DROP;
2096 if (q->full || !skb_queue_empty(&q->sendq) ||
2097 q->service_ofldq_running) {
2098 spin_unlock(&q->sendq.lock);
2099 return NET_XMIT_DROP;
2101 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
2102 credits = txq_avail(&q->q) - ndesc;
2103 pos = (u64 *)&q->q.desc[q->q.pidx];
2105 /* ofldtxq_stop modifies WR header in-situ */
2106 inline_tx_header(src, &q->q, pos, len);
2107 if (unlikely(credits < TXQ_STOP_THRES))
2108 ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
2109 txq_advance(&q->q, ndesc);
2110 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2112 spin_unlock(&q->sendq.lock);
2113 return NET_XMIT_SUCCESS;
2116 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
2117 const void *src, unsigned int len)
2119 struct sge_uld_txq_info *txq_info;
2120 struct sge_uld_txq *txq;
2121 struct adapter *adap;
2124 adap = netdev2adap(dev);
2127 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2128 if (unlikely(!txq_info)) {
2131 return NET_XMIT_DROP;
2133 txq = &txq_info->uldtxq[idx];
2135 ret = ofld_xmit_direct(txq, src, len);
2137 return net_xmit_eval(ret);
2139 EXPORT_SYMBOL(cxgb4_immdata_send);
2142 * t4_crypto_send - send crypto packet
2143 * @adap: the adapter
2146 * Sends crypto packet. We use the packet queue_mapping to select the
2147 * appropriate Tx queue as follows: bit 0 indicates whether the packet
2148 * should be sent as regular or control, bits 1-15 select the queue.
2150 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
2155 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
2161 * cxgb4_crypto_send - send crypto packet
2162 * @dev: the net device
2165 * Sends crypto packet. This is an exported version of @t4_crypto_send,
2166 * intended for ULDs.
2168 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
2170 return t4_crypto_send(netdev2adap(dev), skb);
2172 EXPORT_SYMBOL(cxgb4_crypto_send);
2174 static inline void copy_frags(struct sk_buff *skb,
2175 const struct pkt_gl *gl, unsigned int offset)
2179 /* usually there's just one frag */
2180 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
2181 gl->frags[0].offset + offset,
2182 gl->frags[0].size - offset);
2183 skb_shinfo(skb)->nr_frags = gl->nfrags;
2184 for (i = 1; i < gl->nfrags; i++)
2185 __skb_fill_page_desc(skb, i, gl->frags[i].page,
2186 gl->frags[i].offset,
2189 /* get a reference to the last page, we don't own it */
2190 get_page(gl->frags[gl->nfrags - 1].page);
2194 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
2195 * @gl: the gather list
2196 * @skb_len: size of sk_buff main body if it carries fragments
2197 * @pull_len: amount of data to move to the sk_buff's main body
2199 * Builds an sk_buff from the given packet gather list. Returns the
2200 * sk_buff or %NULL if sk_buff allocation failed.
2202 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
2203 unsigned int skb_len, unsigned int pull_len)
2205 struct sk_buff *skb;
2208 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
2209 * size, which is expected since buffers are at least PAGE_SIZEd.
2210 * In this case packets up to RX_COPY_THRES have only one fragment.
2212 if (gl->tot_len <= RX_COPY_THRES) {
2213 skb = dev_alloc_skb(gl->tot_len);
2216 __skb_put(skb, gl->tot_len);
2217 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
2219 skb = dev_alloc_skb(skb_len);
2222 __skb_put(skb, pull_len);
2223 skb_copy_to_linear_data(skb, gl->va, pull_len);
2225 copy_frags(skb, gl, pull_len);
2226 skb->len = gl->tot_len;
2227 skb->data_len = skb->len - pull_len;
2228 skb->truesize += skb->data_len;
2232 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
2235 * t4_pktgl_free - free a packet gather list
2236 * @gl: the gather list
2238 * Releases the pages of a packet gather list. We do not own the last
2239 * page on the list and do not free it.
2241 static void t4_pktgl_free(const struct pkt_gl *gl)
2244 const struct page_frag *p;
2246 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
2251 * Process an MPS trace packet. Give it an unused protocol number so it won't
2252 * be delivered to anyone and send it to the stack for capture.
2254 static noinline int handle_trace_pkt(struct adapter *adap,
2255 const struct pkt_gl *gl)
2257 struct sk_buff *skb;
2259 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
2260 if (unlikely(!skb)) {
2265 if (is_t4(adap->params.chip))
2266 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
2268 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
2270 skb_reset_mac_header(skb);
2271 skb->protocol = htons(0xffff);
2272 skb->dev = adap->port[0];
2273 netif_receive_skb(skb);
2278 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
2279 * @adap: the adapter
2280 * @hwtstamps: time stamp structure to update
2281 * @sgetstamp: 60bit iqe timestamp
2283 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
2284 * which is in Core Clock ticks into ktime_t and assign it
2286 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
2287 struct skb_shared_hwtstamps *hwtstamps,
2291 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
2293 ns = div_u64(tmp, adap->params.vpd.cclk);
2295 memset(hwtstamps, 0, sizeof(*hwtstamps));
2296 hwtstamps->hwtstamp = ns_to_ktime(ns);
2299 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
2300 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
2302 struct adapter *adapter = rxq->rspq.adap;
2303 struct sge *s = &adapter->sge;
2304 struct port_info *pi;
2306 struct sk_buff *skb;
2308 skb = napi_get_frags(&rxq->rspq.napi);
2309 if (unlikely(!skb)) {
2311 rxq->stats.rx_drops++;
2315 copy_frags(skb, gl, s->pktshift);
2317 skb->csum_level = 1;
2318 skb->len = gl->tot_len - s->pktshift;
2319 skb->data_len = skb->len;
2320 skb->truesize += skb->data_len;
2321 skb->ip_summed = CHECKSUM_UNNECESSARY;
2322 skb_record_rx_queue(skb, rxq->rspq.idx);
2323 pi = netdev_priv(skb->dev);
2325 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
2327 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
2328 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2331 if (unlikely(pkt->vlan_ex)) {
2332 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2333 rxq->stats.vlan_ex++;
2335 ret = napi_gro_frags(&rxq->rspq.napi);
2336 if (ret == GRO_HELD)
2337 rxq->stats.lro_pkts++;
2338 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
2339 rxq->stats.lro_merged++;
2341 rxq->stats.rx_cso++;
2351 * t4_systim_to_hwstamp - read hardware time stamp
2352 * @adap: the adapter
2355 * Read Time Stamp from MPS packet and insert in skb which
2356 * is forwarded to PTP application
2358 static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
2359 struct sk_buff *skb)
2361 struct skb_shared_hwtstamps *hwtstamps;
2362 struct cpl_rx_mps_pkt *cpl = NULL;
2363 unsigned char *data;
2366 cpl = (struct cpl_rx_mps_pkt *)skb->data;
2367 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
2368 X_CPL_RX_MPS_PKT_TYPE_PTP))
2369 return RX_PTP_PKT_ERR;
2371 data = skb->data + sizeof(*cpl);
2372 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
2373 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
2374 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
2375 return RX_PTP_PKT_ERR;
2377 hwtstamps = skb_hwtstamps(skb);
2378 memset(hwtstamps, 0, sizeof(*hwtstamps));
2379 hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
2381 return RX_PTP_PKT_SUC;
2385 * t4_rx_hststamp - Recv PTP Event Message
2386 * @adap: the adapter
2387 * @rsp: the response queue descriptor holding the RX_PKT message
2390 * PTP enabled and MPS packet, read HW timestamp
2392 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
2393 struct sge_eth_rxq *rxq, struct sk_buff *skb)
2397 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
2398 !is_t4(adapter->params.chip))) {
2399 ret = t4_systim_to_hwstamp(adapter, skb);
2400 if (ret == RX_PTP_PKT_ERR) {
2402 rxq->stats.rx_drops++;
2406 return RX_NON_PTP_PKT;
2410 * t4_tx_hststamp - Loopback PTP Transmit Event Message
2411 * @adap: the adapter
2413 * @dev: the ingress net device
2415 * Read hardware timestamp for the loopback PTP Tx event message
2417 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
2418 struct net_device *dev)
2420 struct port_info *pi = netdev_priv(dev);
2422 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
2423 cxgb4_ptp_read_hwstamp(adapter, pi);
2431 * t4_ethrx_handler - process an ingress ethernet packet
2432 * @q: the response queue that received the packet
2433 * @rsp: the response queue descriptor holding the RX_PKT message
2434 * @si: the gather list of packet fragments
2436 * Process an ingress ethernet packet and deliver it to the stack.
2438 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
2439 const struct pkt_gl *si)
2442 struct sk_buff *skb;
2443 const struct cpl_rx_pkt *pkt;
2444 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2445 struct adapter *adapter = q->adap;
2446 struct sge *s = &q->adap->sge;
2447 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
2448 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
2449 u16 err_vec, tnl_hdr_len = 0;
2450 struct port_info *pi;
2453 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
2454 return handle_trace_pkt(q->adap, si);
2456 pkt = (const struct cpl_rx_pkt *)rsp;
2457 /* Compressed error vector is enabled for T6 only */
2458 if (q->adap->params.tp.rx_pkt_encap) {
2459 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
2460 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
2462 err_vec = be16_to_cpu(pkt->err_vec);
2465 csum_ok = pkt->csum_calc && !err_vec &&
2466 (q->netdev->features & NETIF_F_RXCSUM);
2467 if (((pkt->l2info & htonl(RXF_TCP_F)) ||
2469 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
2470 do_gro(rxq, si, pkt, tnl_hdr_len);
2474 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
2475 if (unlikely(!skb)) {
2477 rxq->stats.rx_drops++;
2480 pi = netdev_priv(q->netdev);
2482 /* Handle PTP Event Rx packet */
2483 if (unlikely(pi->ptp_enable)) {
2484 ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
2485 if (ret == RX_PTP_PKT_ERR)
2489 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
2491 /* Handle the PTP Event Tx Loopback packet */
2492 if (unlikely(pi->ptp_enable && !ret &&
2493 (pkt->l2info & htonl(RXF_UDP_F)) &&
2494 cxgb4_ptp_is_ptp_rx(skb))) {
2495 if (!t4_tx_hststamp(adapter, skb, q->netdev))
2499 skb->protocol = eth_type_trans(skb, q->netdev);
2500 skb_record_rx_queue(skb, q->idx);
2501 if (skb->dev->features & NETIF_F_RXHASH)
2502 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
2508 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
2510 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
2511 if (!pkt->ip_frag) {
2512 skb->ip_summed = CHECKSUM_UNNECESSARY;
2513 rxq->stats.rx_cso++;
2514 } else if (pkt->l2info & htonl(RXF_IP_F)) {
2515 __sum16 c = (__force __sum16)pkt->csum;
2516 skb->csum = csum_unfold(c);
2519 skb->ip_summed = CHECKSUM_UNNECESSARY;
2520 skb->csum_level = 1;
2522 skb->ip_summed = CHECKSUM_COMPLETE;
2524 rxq->stats.rx_cso++;
2527 skb_checksum_none_assert(skb);
2528 #ifdef CONFIG_CHELSIO_T4_FCOE
2529 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
2530 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
2532 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
2533 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
2534 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
2535 if (q->adap->params.tp.rx_pkt_encap)
2537 T6_COMPR_RXERR_SUM_F;
2539 csum_ok = err_vec & RXERR_CSUM_F;
2541 skb->ip_summed = CHECKSUM_UNNECESSARY;
2545 #undef CPL_RX_PKT_FLAGS
2546 #endif /* CONFIG_CHELSIO_T4_FCOE */
2549 if (unlikely(pkt->vlan_ex)) {
2550 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
2551 rxq->stats.vlan_ex++;
2553 skb_mark_napi_id(skb, &q->napi);
2554 netif_receive_skb(skb);
2559 * restore_rx_bufs - put back a packet's Rx buffers
2560 * @si: the packet gather list
2561 * @q: the SGE free list
2562 * @frags: number of FL buffers to restore
2564 * Puts back on an FL the Rx buffers associated with @si. The buffers
2565 * have already been unmapped and are left unmapped, we mark them so to
2566 * prevent further unmapping attempts.
2568 * This function undoes a series of @unmap_rx_buf calls when we find out
2569 * that the current packet can't be processed right away afterall and we
2570 * need to come back to it later. This is a very rare event and there's
2571 * no effort to make this particularly efficient.
2573 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
2576 struct rx_sw_desc *d;
2580 q->cidx = q->size - 1;
2583 d = &q->sdesc[q->cidx];
2584 d->page = si->frags[frags].page;
2585 d->dma_addr |= RX_UNMAPPED_BUF;
2591 * is_new_response - check if a response is newly written
2592 * @r: the response descriptor
2593 * @q: the response queue
2595 * Returns true if a response descriptor contains a yet unprocessed
2598 static inline bool is_new_response(const struct rsp_ctrl *r,
2599 const struct sge_rspq *q)
2601 return (r->type_gen >> RSPD_GEN_S) == q->gen;
2605 * rspq_next - advance to the next entry in a response queue
2608 * Updates the state of a response queue to advance it to the next entry.
2610 static inline void rspq_next(struct sge_rspq *q)
2612 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
2613 if (unlikely(++q->cidx == q->size)) {
2616 q->cur_desc = q->desc;
2621 * process_responses - process responses from an SGE response queue
2622 * @q: the ingress queue to process
2623 * @budget: how many responses can be processed in this round
2625 * Process responses from an SGE response queue up to the supplied budget.
2626 * Responses include received packets as well as control messages from FW
2629 * Additionally choose the interrupt holdoff time for the next interrupt
2630 * on this queue. If the system is under memory shortage use a fairly
2631 * long delay to help recovery.
2633 static int process_responses(struct sge_rspq *q, int budget)
2636 int budget_left = budget;
2637 const struct rsp_ctrl *rc;
2638 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2639 struct adapter *adapter = q->adap;
2640 struct sge *s = &adapter->sge;
2642 while (likely(budget_left)) {
2643 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2644 if (!is_new_response(rc, q)) {
2645 if (q->flush_handler)
2646 q->flush_handler(q);
2651 rsp_type = RSPD_TYPE_G(rc->type_gen);
2652 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
2653 struct page_frag *fp;
2655 const struct rx_sw_desc *rsd;
2656 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
2658 if (len & RSPD_NEWBUF_F) {
2659 if (likely(q->offset > 0)) {
2660 free_rx_bufs(q->adap, &rxq->fl, 1);
2663 len = RSPD_LEN_G(len);
2667 /* gather packet fragments */
2668 for (frags = 0, fp = si.frags; ; frags++, fp++) {
2669 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
2670 bufsz = get_buf_size(adapter, rsd);
2671 fp->page = rsd->page;
2672 fp->offset = q->offset;
2673 fp->size = min(bufsz, len);
2677 unmap_rx_buf(q->adap, &rxq->fl);
2680 si.sgetstamp = SGE_TIMESTAMP_G(
2681 be64_to_cpu(rc->last_flit));
2683 * Last buffer remains mapped so explicitly make it
2684 * coherent for CPU access.
2686 dma_sync_single_for_cpu(q->adap->pdev_dev,
2688 fp->size, DMA_FROM_DEVICE);
2690 si.va = page_address(si.frags[0].page) +
2694 si.nfrags = frags + 1;
2695 ret = q->handler(q, q->cur_desc, &si);
2696 if (likely(ret == 0))
2697 q->offset += ALIGN(fp->size, s->fl_align);
2699 restore_rx_bufs(&si, &rxq->fl, frags);
2700 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
2701 ret = q->handler(q, q->cur_desc, NULL);
2703 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
2706 if (unlikely(ret)) {
2707 /* couldn't process descriptor, back off for recovery */
2708 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
2716 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
2717 __refill_fl(q->adap, &rxq->fl);
2718 return budget - budget_left;
2722 * napi_rx_handler - the NAPI handler for Rx processing
2723 * @napi: the napi instance
2724 * @budget: how many packets we can process in this round
2726 * Handler for new data events when using NAPI. This does not need any
2727 * locking or protection from interrupts as data interrupts are off at
2728 * this point and other adapter interrupts do not interfere (the latter
2729 * in not a concern at all with MSI-X as non-data interrupts then have
2730 * a separate handler).
2732 static int napi_rx_handler(struct napi_struct *napi, int budget)
2734 unsigned int params;
2735 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2739 work_done = process_responses(q, budget);
2740 if (likely(work_done < budget)) {
2743 napi_complete_done(napi, work_done);
2744 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
2746 if (q->adaptive_rx) {
2747 if (work_done > max(timer_pkt_quota[timer_index],
2749 timer_index = (timer_index + 1);
2751 timer_index = timer_index - 1;
2753 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2754 q->next_intr_params =
2755 QINTR_TIMER_IDX_V(timer_index) |
2757 params = q->next_intr_params;
2759 params = q->next_intr_params;
2760 q->next_intr_params = q->intr_params;
2763 params = QINTR_TIMER_IDX_V(7);
2765 val = CIDXINC_V(work_done) | SEINTARM_V(params);
2767 /* If we don't have access to the new User GTS (T5+), use the old
2768 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2770 if (unlikely(q->bar2_addr == NULL)) {
2771 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2772 val | INGRESSQID_V((u32)q->cntxt_id));
2774 writel(val | INGRESSQID_V(q->bar2_qid),
2775 q->bar2_addr + SGE_UDB_GTS);
2782 * The MSI-X interrupt handler for an SGE response queue.
2784 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2786 struct sge_rspq *q = cookie;
2788 napi_schedule(&q->napi);
2793 * Process the indirect interrupt entries in the interrupt queue and kick off
2794 * NAPI for each queue that has generated an entry.
2796 static unsigned int process_intrq(struct adapter *adap)
2798 unsigned int credits;
2799 const struct rsp_ctrl *rc;
2800 struct sge_rspq *q = &adap->sge.intrq;
2803 spin_lock(&adap->sge.intrq_lock);
2804 for (credits = 0; ; credits++) {
2805 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2806 if (!is_new_response(rc, q))
2810 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
2811 unsigned int qid = ntohl(rc->pldbuflen_qid);
2813 qid -= adap->sge.ingr_start;
2814 napi_schedule(&adap->sge.ingr_map[qid]->napi);
2820 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2822 /* If we don't have access to the new User GTS (T5+), use the old
2823 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2825 if (unlikely(q->bar2_addr == NULL)) {
2826 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2827 val | INGRESSQID_V(q->cntxt_id));
2829 writel(val | INGRESSQID_V(q->bar2_qid),
2830 q->bar2_addr + SGE_UDB_GTS);
2833 spin_unlock(&adap->sge.intrq_lock);
2838 * The MSI interrupt handler, which handles data events from SGE response queues
2839 * as well as error and other async events as they all use the same MSI vector.
2841 static irqreturn_t t4_intr_msi(int irq, void *cookie)
2843 struct adapter *adap = cookie;
2845 if (adap->flags & MASTER_PF)
2846 t4_slow_intr_handler(adap);
2847 process_intrq(adap);
2852 * Interrupt handler for legacy INTx interrupts.
2853 * Handles data events from SGE response queues as well as error and other
2854 * async events as they all use the same interrupt line.
2856 static irqreturn_t t4_intr_intx(int irq, void *cookie)
2858 struct adapter *adap = cookie;
2860 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2861 if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
2862 process_intrq(adap))
2864 return IRQ_NONE; /* probably shared interrupt */
2868 * t4_intr_handler - select the top-level interrupt handler
2869 * @adap: the adapter
2871 * Selects the top-level interrupt handler based on the type of interrupts
2872 * (MSI-X, MSI, or INTx).
2874 irq_handler_t t4_intr_handler(struct adapter *adap)
2876 if (adap->flags & USING_MSIX)
2877 return t4_sge_intr_msix;
2878 if (adap->flags & USING_MSI)
2880 return t4_intr_intx;
2883 static void sge_rx_timer_cb(struct timer_list *t)
2887 struct adapter *adap = from_timer(adap, t, sge.rx_timer);
2888 struct sge *s = &adap->sge;
2890 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2891 for (m = s->starving_fl[i]; m; m &= m - 1) {
2892 struct sge_eth_rxq *rxq;
2893 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2894 struct sge_fl *fl = s->egr_map[id];
2896 clear_bit(id, s->starving_fl);
2897 smp_mb__after_atomic();
2899 if (fl_starving(adap, fl)) {
2900 rxq = container_of(fl, struct sge_eth_rxq, fl);
2901 if (napi_reschedule(&rxq->rspq.napi))
2904 set_bit(id, s->starving_fl);
2907 /* The remainder of the SGE RX Timer Callback routine is dedicated to
2908 * global Master PF activities like checking for chip ingress stalls,
2911 if (!(adap->flags & MASTER_PF))
2914 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
2917 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2920 static void sge_tx_timer_cb(struct timer_list *t)
2923 unsigned int i, budget;
2924 struct adapter *adap = from_timer(adap, t, sge.tx_timer);
2925 struct sge *s = &adap->sge;
2927 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2928 for (m = s->txq_maperr[i]; m; m &= m - 1) {
2929 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2930 struct sge_uld_txq *txq = s->egr_map[id];
2932 clear_bit(id, s->txq_maperr);
2933 tasklet_schedule(&txq->qresume_tsk);
2936 if (!is_t4(adap->params.chip)) {
2937 struct sge_eth_txq *q = &s->ptptxq;
2940 spin_lock(&adap->ptp_lock);
2941 avail = reclaimable(&q->q);
2944 free_tx_desc(adap, &q->q, avail, false);
2945 q->q.in_use -= avail;
2947 spin_unlock(&adap->ptp_lock);
2950 budget = MAX_TIMER_TX_RECLAIM;
2951 i = s->ethtxq_rover;
2953 struct sge_eth_txq *q = &s->ethtxq[i];
2956 time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2957 __netif_tx_trylock(q->txq)) {
2958 int avail = reclaimable(&q->q);
2964 free_tx_desc(adap, &q->q, avail, true);
2965 q->q.in_use -= avail;
2968 __netif_tx_unlock(q->txq);
2971 if (++i >= s->ethqsets)
2973 } while (budget && i != s->ethtxq_rover);
2974 s->ethtxq_rover = i;
2975 mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2979 * bar2_address - return the BAR2 address for an SGE Queue's Registers
2980 * @adapter: the adapter
2981 * @qid: the SGE Queue ID
2982 * @qtype: the SGE Queue Type (Egress or Ingress)
2983 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2985 * Returns the BAR2 address for the SGE Queue Registers associated with
2986 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
2987 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2988 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2989 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
2991 static void __iomem *bar2_address(struct adapter *adapter,
2993 enum t4_bar2_qtype qtype,
2994 unsigned int *pbar2_qid)
2999 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
3000 &bar2_qoffset, pbar2_qid);
3004 return adapter->bar2 + bar2_qoffset;
3007 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
3008 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
3010 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
3011 struct net_device *dev, int intr_idx,
3012 struct sge_fl *fl, rspq_handler_t hnd,
3013 rspq_flush_handler_t flush_hnd, int cong)
3017 struct sge *s = &adap->sge;
3018 struct port_info *pi = netdev_priv(dev);
3019 int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
3021 /* Size needs to be multiple of 16, including status entry. */
3022 iq->size = roundup(iq->size, 16);
3024 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
3025 &iq->phys_addr, NULL, 0,
3026 dev_to_node(adap->pdev_dev));
3030 memset(&c, 0, sizeof(c));
3031 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
3032 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3033 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
3034 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
3036 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
3037 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
3038 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
3039 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
3040 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
3042 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
3043 FW_IQ_CMD_IQGTSMODE_F |
3044 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
3045 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
3046 c.iqsize = htons(iq->size);
3047 c.iqaddr = cpu_to_be64(iq->phys_addr);
3049 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
3052 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
3054 /* Allocate the ring for the hardware free list (with space
3055 * for its status page) along with the associated software
3056 * descriptor ring. The free list size needs to be a multiple
3057 * of the Egress Queue Unit and at least 2 Egress Units larger
3058 * than the SGE's Egress Congrestion Threshold
3059 * (fl_starve_thres - 1).
3061 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
3062 fl->size = s->fl_starve_thres - 1 + 2 * 8;
3063 fl->size = roundup(fl->size, 8);
3064 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
3065 sizeof(struct rx_sw_desc), &fl->addr,
3066 &fl->sdesc, s->stat_len,
3067 dev_to_node(adap->pdev_dev));
3071 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
3072 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
3073 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
3074 FW_IQ_CMD_FL0DATARO_V(relaxed) |
3075 FW_IQ_CMD_FL0PADEN_F);
3077 c.iqns_to_fl0congen |=
3078 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
3079 FW_IQ_CMD_FL0CONGCIF_F |
3080 FW_IQ_CMD_FL0CONGEN_F);
3081 /* In T6, for egress queue type FL there is internal overhead
3082 * of 16B for header going into FLM module. Hence the maximum
3083 * allowed burst size is 448 bytes. For T4/T5, the hardware
3084 * doesn't coalesce fetch requests if more than 64 bytes of
3085 * Free List pointers are provided, so we use a 128-byte Fetch
3086 * Burst Minimum there (T6 implements coalescing so we can use
3087 * the smaller 64-byte value there).
3089 c.fl0dcaen_to_fl0cidxfthresh =
3090 htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ?
3091 FETCHBURSTMIN_128B_X :
3092 FETCHBURSTMIN_64B_X) |
3093 FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
3094 FETCHBURSTMAX_512B_X :
3095 FETCHBURSTMAX_256B_X));
3096 c.fl0size = htons(flsz);
3097 c.fl0addr = cpu_to_be64(fl->addr);
3100 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3104 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
3105 iq->cur_desc = iq->desc;
3108 iq->next_intr_params = iq->intr_params;
3109 iq->cntxt_id = ntohs(c.iqid);
3110 iq->abs_id = ntohs(c.physiqid);
3111 iq->bar2_addr = bar2_address(adap,
3113 T4_BAR2_QTYPE_INGRESS,
3115 iq->size--; /* subtract status entry */
3118 iq->flush_handler = flush_hnd;
3120 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
3121 skb_queue_head_init(&iq->lro_mgr.lroq);
3123 /* set offset to -1 to distinguish ingress queues without FL */
3124 iq->offset = fl ? 0 : -1;
3126 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
3129 fl->cntxt_id = ntohs(c.fl0id);
3130 fl->avail = fl->pend_cred = 0;
3131 fl->pidx = fl->cidx = 0;
3132 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
3133 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
3135 /* Note, we must initialize the BAR2 Free List User Doorbell
3136 * information before refilling the Free List!
3138 fl->bar2_addr = bar2_address(adap,
3140 T4_BAR2_QTYPE_EGRESS,
3142 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
3145 /* For T5 and later we attempt to set up the Congestion Manager values
3146 * of the new RX Ethernet Queue. This should really be handled by
3147 * firmware because it's more complex than any host driver wants to
3148 * get involved with and it's different per chip and this is almost
3149 * certainly wrong. Firmware would be wrong as well, but it would be
3150 * a lot easier to fix in one place ... For now we do something very
3151 * simple (and hopefully less wrong).
3153 if (!is_t4(adap->params.chip) && cong >= 0) {
3154 u32 param, val, ch_map = 0;
3156 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
3158 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
3159 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
3160 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
3162 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
3165 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
3166 for (i = 0; i < 4; i++) {
3167 if (cong & (1 << i))
3168 ch_map |= 1 << (i << cng_ch_bits_log);
3170 val |= CONMCTXT_CNGCHMAP_V(ch_map);
3172 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
3175 dev_warn(adap->pdev_dev, "Failed to set Congestion"
3176 " Manager Context for Ingress Queue %d: %d\n",
3177 iq->cntxt_id, -ret);
3186 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
3187 iq->desc, iq->phys_addr);
3190 if (fl && fl->desc) {
3193 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
3194 fl->desc, fl->addr);
3200 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
3203 q->bar2_addr = bar2_address(adap,
3205 T4_BAR2_QTYPE_EGRESS,
3208 q->cidx = q->pidx = 0;
3209 q->stops = q->restarts = 0;
3210 q->stat = (void *)&q->desc[q->size];
3211 spin_lock_init(&q->db_lock);
3212 adap->sge.egr_map[id - adap->sge.egr_start] = q;
3215 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
3216 struct net_device *dev, struct netdev_queue *netdevq,
3220 struct fw_eq_eth_cmd c;
3221 struct sge *s = &adap->sge;
3222 struct port_info *pi = netdev_priv(dev);
3224 /* Add status entries */
3225 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3227 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
3228 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
3229 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
3230 netdev_queue_numa_node_read(netdevq));
3234 memset(&c, 0, sizeof(c));
3235 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
3236 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3237 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
3238 FW_EQ_ETH_CMD_VFN_V(0));
3239 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
3240 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
3241 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
3242 FW_EQ_ETH_CMD_VIID_V(pi->viid));
3243 c.fetchszm_to_iqid =
3244 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3245 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
3246 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
3248 htonl(FW_EQ_ETH_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3249 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3250 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3251 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
3252 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3254 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3256 kfree(txq->q.sdesc);
3257 txq->q.sdesc = NULL;
3258 dma_free_coherent(adap->pdev_dev,
3259 nentries * sizeof(struct tx_desc),
3260 txq->q.desc, txq->q.phys_addr);
3265 txq->q.q_type = CXGB4_TXQ_ETH;
3266 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
3268 txq->tso = txq->tx_cso = txq->vlan_ins = 0;
3269 txq->mapping_err = 0;
3273 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
3274 struct net_device *dev, unsigned int iqid,
3275 unsigned int cmplqid)
3278 struct fw_eq_ctrl_cmd c;
3279 struct sge *s = &adap->sge;
3280 struct port_info *pi = netdev_priv(dev);
3282 /* Add status entries */
3283 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3285 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
3286 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
3287 NULL, 0, dev_to_node(adap->pdev_dev));
3291 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
3292 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3293 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
3294 FW_EQ_CTRL_CMD_VFN_V(0));
3295 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
3296 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
3297 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
3298 c.physeqid_pkd = htonl(0);
3299 c.fetchszm_to_iqid =
3300 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3301 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
3302 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
3304 htonl(FW_EQ_CTRL_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3305 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3306 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3307 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
3308 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3310 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3312 dma_free_coherent(adap->pdev_dev,
3313 nentries * sizeof(struct tx_desc),
3314 txq->q.desc, txq->q.phys_addr);
3319 txq->q.q_type = CXGB4_TXQ_CTRL;
3320 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
3322 skb_queue_head_init(&txq->sendq);
3323 tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
3328 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
3329 unsigned int cmplqid)
3333 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
3334 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
3335 FW_PARAMS_PARAM_YZ_V(eqid));
3337 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
3340 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
3341 struct net_device *dev, unsigned int iqid,
3342 unsigned int uld_type)
3345 struct fw_eq_ofld_cmd c;
3346 struct sge *s = &adap->sge;
3347 struct port_info *pi = netdev_priv(dev);
3348 int cmd = FW_EQ_OFLD_CMD;
3350 /* Add status entries */
3351 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
3353 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
3354 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
3355 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
3360 memset(&c, 0, sizeof(c));
3361 if (unlikely(uld_type == CXGB4_TX_CRYPTO))
3362 cmd = FW_EQ_CTRL_CMD;
3363 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
3364 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
3365 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
3366 FW_EQ_OFLD_CMD_VFN_V(0));
3367 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
3368 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
3369 c.fetchszm_to_iqid =
3370 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
3371 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
3372 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
3374 htonl(FW_EQ_OFLD_CMD_FBMIN_V(FETCHBURSTMIN_64B_X) |
3375 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
3376 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
3377 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
3378 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
3380 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
3382 kfree(txq->q.sdesc);
3383 txq->q.sdesc = NULL;
3384 dma_free_coherent(adap->pdev_dev,
3385 nentries * sizeof(struct tx_desc),
3386 txq->q.desc, txq->q.phys_addr);
3391 txq->q.q_type = CXGB4_TXQ_ULD;
3392 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
3394 skb_queue_head_init(&txq->sendq);
3395 tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
3397 txq->mapping_err = 0;
3401 void free_txq(struct adapter *adap, struct sge_txq *q)
3403 struct sge *s = &adap->sge;
3405 dma_free_coherent(adap->pdev_dev,
3406 q->size * sizeof(struct tx_desc) + s->stat_len,
3407 q->desc, q->phys_addr);
3413 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
3416 struct sge *s = &adap->sge;
3417 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
3419 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
3420 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
3421 rq->cntxt_id, fl_id, 0xffff);
3422 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
3423 rq->desc, rq->phys_addr);
3424 netif_napi_del(&rq->napi);
3426 rq->cntxt_id = rq->abs_id = 0;
3430 free_rx_bufs(adap, fl, fl->avail);
3431 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
3432 fl->desc, fl->addr);
3441 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
3442 * @adap: the adapter
3443 * @n: number of queues
3444 * @q: pointer to first queue
3446 * Release the resources of a consecutive block of offload Rx queues.
3448 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
3450 for ( ; n; n--, q++)
3452 free_rspq_fl(adap, &q->rspq,
3453 q->fl.size ? &q->fl : NULL);
3457 * t4_free_sge_resources - free SGE resources
3458 * @adap: the adapter
3460 * Frees resources used by the SGE queue sets.
3462 void t4_free_sge_resources(struct adapter *adap)
3465 struct sge_eth_rxq *eq;
3466 struct sge_eth_txq *etq;
3468 /* stop all Rx queues in order to start them draining */
3469 for (i = 0; i < adap->sge.ethqsets; i++) {
3470 eq = &adap->sge.ethrxq[i];
3472 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
3473 FW_IQ_TYPE_FL_INT_CAP,
3475 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
3479 /* clean up Ethernet Tx/Rx queues */
3480 for (i = 0; i < adap->sge.ethqsets; i++) {
3481 eq = &adap->sge.ethrxq[i];
3483 free_rspq_fl(adap, &eq->rspq,
3484 eq->fl.size ? &eq->fl : NULL);
3486 etq = &adap->sge.ethtxq[i];
3488 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3490 __netif_tx_lock_bh(etq->txq);
3491 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3492 __netif_tx_unlock_bh(etq->txq);
3493 kfree(etq->q.sdesc);
3494 free_txq(adap, &etq->q);
3498 /* clean up control Tx queues */
3499 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
3500 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
3503 tasklet_kill(&cq->qresume_tsk);
3504 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
3506 __skb_queue_purge(&cq->sendq);
3507 free_txq(adap, &cq->q);
3511 if (adap->sge.fw_evtq.desc)
3512 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
3514 if (adap->sge.intrq.desc)
3515 free_rspq_fl(adap, &adap->sge.intrq, NULL);
3517 if (!is_t4(adap->params.chip)) {
3518 etq = &adap->sge.ptptxq;
3520 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
3522 spin_lock_bh(&adap->ptp_lock);
3523 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
3524 spin_unlock_bh(&adap->ptp_lock);
3525 kfree(etq->q.sdesc);
3526 free_txq(adap, &etq->q);
3530 /* clear the reverse egress queue map */
3531 memset(adap->sge.egr_map, 0,
3532 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
3535 void t4_sge_start(struct adapter *adap)
3537 adap->sge.ethtxq_rover = 0;
3538 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
3539 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
3543 * t4_sge_stop - disable SGE operation
3544 * @adap: the adapter
3546 * Stop tasklets and timers associated with the DMA engine. Note that
3547 * this is effective only if measures have been taken to disable any HW
3548 * events that may restart them.
3550 void t4_sge_stop(struct adapter *adap)
3553 struct sge *s = &adap->sge;
3555 if (in_interrupt()) /* actions below require waiting */
3558 if (s->rx_timer.function)
3559 del_timer_sync(&s->rx_timer);
3560 if (s->tx_timer.function)
3561 del_timer_sync(&s->tx_timer);
3563 if (is_offload(adap)) {
3564 struct sge_uld_txq_info *txq_info;
3566 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3568 struct sge_uld_txq *txq = txq_info->uldtxq;
3570 for_each_ofldtxq(&adap->sge, i) {
3572 tasklet_kill(&txq->qresume_tsk);
3577 if (is_pci_uld(adap)) {
3578 struct sge_uld_txq_info *txq_info;
3580 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
3582 struct sge_uld_txq *txq = txq_info->uldtxq;
3584 for_each_ofldtxq(&adap->sge, i) {
3586 tasklet_kill(&txq->qresume_tsk);
3591 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
3592 struct sge_ctrl_txq *cq = &s->ctrlq[i];
3595 tasklet_kill(&cq->qresume_tsk);
3600 * t4_sge_init_soft - grab core SGE values needed by SGE code
3601 * @adap: the adapter
3603 * We need to grab the SGE operating parameters that we need to have
3604 * in order to do our job and make sure we can live with them.
3607 static int t4_sge_init_soft(struct adapter *adap)
3609 struct sge *s = &adap->sge;
3610 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
3611 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
3612 u32 ingress_rx_threshold;
3615 * Verify that CPL messages are going to the Ingress Queue for
3616 * process_responses() and that only packet data is going to the
3619 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
3620 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
3621 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
3626 * Validate the Host Buffer Register Array indices that we want to
3629 * XXX Note that we should really read through the Host Buffer Size
3630 * XXX register array and find the indices of the Buffer Sizes which
3631 * XXX meet our needs!
3633 #define READ_FL_BUF(x) \
3634 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
3636 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
3637 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
3638 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
3639 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
3641 /* We only bother using the Large Page logic if the Large Page Buffer
3642 * is larger than our Page Size Buffer.
3644 if (fl_large_pg <= fl_small_pg)
3649 /* The Page Size Buffer must be exactly equal to our Page Size and the
3650 * Large Page Size Buffer should be 0 (per above) or a power of 2.
3652 if (fl_small_pg != PAGE_SIZE ||
3653 (fl_large_pg & (fl_large_pg-1)) != 0) {
3654 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
3655 fl_small_pg, fl_large_pg);
3659 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
3661 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
3662 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
3663 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
3664 fl_small_mtu, fl_large_mtu);
3669 * Retrieve our RX interrupt holdoff timer values and counter
3670 * threshold values from the SGE parameters.
3672 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
3673 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
3674 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
3675 s->timer_val[0] = core_ticks_to_us(adap,
3676 TIMERVALUE0_G(timer_value_0_and_1));
3677 s->timer_val[1] = core_ticks_to_us(adap,
3678 TIMERVALUE1_G(timer_value_0_and_1));
3679 s->timer_val[2] = core_ticks_to_us(adap,
3680 TIMERVALUE2_G(timer_value_2_and_3));
3681 s->timer_val[3] = core_ticks_to_us(adap,
3682 TIMERVALUE3_G(timer_value_2_and_3));
3683 s->timer_val[4] = core_ticks_to_us(adap,
3684 TIMERVALUE4_G(timer_value_4_and_5));
3685 s->timer_val[5] = core_ticks_to_us(adap,
3686 TIMERVALUE5_G(timer_value_4_and_5));
3688 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
3689 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
3690 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
3691 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
3692 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
3698 * t4_sge_init - initialize SGE
3699 * @adap: the adapter
3701 * Perform low-level SGE code initialization needed every time after a
3704 int t4_sge_init(struct adapter *adap)
3706 struct sge *s = &adap->sge;
3707 u32 sge_control, sge_conm_ctrl;
3708 int ret, egress_threshold;
3711 * Ingress Padding Boundary and Egress Status Page Size are set up by
3712 * t4_fixup_host_params().
3714 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
3715 s->pktshift = PKTSHIFT_G(sge_control);
3716 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
3718 s->fl_align = t4_fl_pkt_align(adap);
3719 ret = t4_sge_init_soft(adap);
3724 * A FL with <= fl_starve_thres buffers is starving and a periodic
3725 * timer will attempt to refill it. This needs to be larger than the
3726 * SGE's Egress Congestion Threshold. If it isn't, then we can get
3727 * stuck waiting for new packets while the SGE is waiting for us to
3728 * give it more Free List entries. (Note that the SGE's Egress
3729 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
3730 * there was only a single field to control this. For T5 there's the
3731 * original field which now only applies to Unpacked Mode Free List
3732 * buffers and a new field which only applies to Packed Mode Free List
3735 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
3736 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
3738 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
3741 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3744 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3747 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
3748 CHELSIO_CHIP_VERSION(adap->params.chip));
3751 s->fl_starve_thres = 2*egress_threshold + 1;
3753 t4_idma_monitor_init(adap, &s->idma_monitor);
3755 /* Set up timers used for recuring callbacks to process RX and TX
3756 * administrative tasks.
3758 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
3759 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
3761 spin_lock_init(&s->intrq_lock);