2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/if_vlan.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/jiffies.h>
42 #include <linux/prefetch.h>
43 #include <linux/export.h>
47 #include <net/busy_poll.h>
48 #ifdef CONFIG_CHELSIO_T4_FCOE
49 #include <scsi/fc/fc_fcoe.h>
50 #endif /* CONFIG_CHELSIO_T4_FCOE */
53 #include "t4_values.h"
56 #include "cxgb4_ptp.h"
57 #include "cxgb4_uld.h"
58 #include "cxgb4_tc_mqprio.h"
62 * Rx buffer size. We use largish buffers if possible but settle for single
63 * pages under memory shortage.
66 # define FL_PG_ORDER 0
68 # define FL_PG_ORDER (16 - PAGE_SHIFT)
71 /* RX_PULL_LEN should be <= RX_COPY_THRES */
72 #define RX_COPY_THRES 256
73 #define RX_PULL_LEN 128
76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
77 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
79 #define RX_PKT_SKB_LEN 512
82 * Max number of Tx descriptors we clean up at a time. Should be modest as
83 * freeing skbs isn't cheap and it happens while holding locks. We just need
84 * to free packets faster than they arrive, we eventually catch up and keep
85 * the amortized cost reasonable. Must be >= 2 * TXQ_STOP_THRES. It should
86 * also match the CIDX Flush Threshold.
88 #define MAX_TX_RECLAIM 32
91 * Max number of Rx buffers we replenish at a time. Again keep this modest,
92 * allocating buffers isn't cheap either.
94 #define MAX_RX_REFILL 16U
97 * Period of the Rx queue check timer. This timer is infrequent as it has
98 * something to do only when the system experiences severe memory shortage.
100 #define RX_QCHECK_PERIOD (HZ / 2)
103 * Period of the Tx queue check timer.
105 #define TX_QCHECK_PERIOD (HZ / 2)
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
110 #define MAX_TIMER_TX_RECLAIM 100
113 * Timer index used when backing off due to memory shortage.
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
119 * for a full sized WR.
121 #define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
127 #define MAX_IMM_TX_PKT_LEN 256
130 * Max size of a WR sent through a control Tx queue.
132 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
134 struct rx_sw_desc { /* SW state per Rx descriptor */
140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
142 * We could easily support more but there doesn't seem to be much need for
145 #define FL_MTU_SMALL 1500
146 #define FL_MTU_LARGE 9000
148 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
151 struct sge *s = &adapter->sge;
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
156 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
157 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
160 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
161 * these to specify the buffer size as an index into the SGE Free List Buffer
162 * Size register array. We also use bit 4, when the buffer has been unmapped
163 * for DMA, but this is of course never sent to the hardware and is only used
164 * to prevent double unmappings. All of the above requires that the Free List
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
167 * Free List Buffer alignment is 32 bytes, this works out for us ...
170 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
171 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
172 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
175 * XXX We shouldn't depend on being able to use these indices.
176 * XXX Especially when some other Master PF has initialized the
177 * XXX adapter or we use the Firmware Configuration File. We
178 * XXX should really search through the Host Buffer Size register
179 * XXX array for the appropriately sized buffer indices.
181 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
182 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
184 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
185 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
188 static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
189 #define MIN_NAPI_WORK 1
191 static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
196 static inline bool is_buf_mapped(const struct rx_sw_desc *d)
198 return !(d->dma_addr & RX_UNMAPPED_BUF);
202 * txq_avail - return the number of available slots in a Tx queue
205 * Returns the number of descriptors in a Tx queue available to write new
208 static inline unsigned int txq_avail(const struct sge_txq *q)
210 return q->size - 1 - q->in_use;
214 * fl_cap - return the capacity of a free-buffer list
217 * Returns the capacity of a free-buffer list. The capacity is less than
218 * the size because one descriptor needs to be left unpopulated, otherwise
219 * HW will think the FL is empty.
221 static inline unsigned int fl_cap(const struct sge_fl *fl)
223 return fl->size - 8; /* 1 descriptor = 8 buffers */
227 * fl_starving - return whether a Free List is starving.
228 * @adapter: pointer to the adapter
231 * Tests specified Free List to see whether the number of buffers
232 * available to the hardware has falled below our "starvation"
235 static inline bool fl_starving(const struct adapter *adapter,
236 const struct sge_fl *fl)
238 const struct sge *s = &adapter->sge;
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
243 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
246 const skb_frag_t *fp, *end;
247 const struct skb_shared_info *si;
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
250 if (dma_mapping_error(dev, *addr))
253 si = skb_shinfo(skb);
254 end = &si->frags[si->nr_frags];
256 for (fp = si->frags; fp < end; fp++) {
257 *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
259 if (dma_mapping_error(dev, *addr))
265 while (fp-- > si->frags)
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
272 EXPORT_SYMBOL(cxgb4_map_skb);
274 static void unmap_skb(struct device *dev, const struct sk_buff *skb,
275 const dma_addr_t *addr)
277 const skb_frag_t *fp, *end;
278 const struct skb_shared_info *si;
280 dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
282 si = skb_shinfo(skb);
283 end = &si->frags[si->nr_frags];
284 for (fp = si->frags; fp < end; fp++)
285 dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
288 #ifdef CONFIG_NEED_DMA_MAP_STATE
290 * deferred_unmap_destructor - unmap a packet when it is freed
293 * This is the packet destructor used for Tx packets that need to remain
294 * mapped until they are freed rather than until their Tx descriptors are
297 static void deferred_unmap_destructor(struct sk_buff *skb)
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
304 * free_tx_desc - reclaims Tx descriptors and their buffers
306 * @q: the Tx queue to reclaim descriptors from
307 * @n: the number of descriptors to reclaim
308 * @unmap: whether the buffers should be unmapped for DMA
310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
311 * Tx buffers. Called with the Tx queue lock held.
313 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
314 unsigned int n, bool unmap)
316 unsigned int cidx = q->cidx;
317 struct tx_sw_desc *d;
321 if (d->skb) { /* an SGL is present */
322 if (unmap && d->addr[0]) {
323 unmap_skb(adap->pdev_dev, d->skb, d->addr);
324 memset(d->addr, 0, sizeof(d->addr));
326 dev_consume_skb_any(d->skb);
330 if (++cidx == q->size) {
339 * Return the number of reclaimable descriptors in a Tx queue.
341 static inline int reclaimable(const struct sge_txq *q)
343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
349 * reclaim_completed_tx - reclaims completed TX Descriptors
351 * @q: the Tx queue to reclaim completed descriptors from
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
353 * @unmap: whether the buffers should be unmapped for DMA
355 * Reclaims Tx Descriptors that the SGE has indicated it has processed,
356 * and frees the associated buffers if possible. If @max == -1, then
357 * we'll use a defaiult maximum. Called with the TX Queue locked.
359 static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
360 int maxreclaim, bool unmap)
362 int reclaim = reclaimable(q);
366 * Limit the amount of clean up work we do at a time to keep
367 * the Tx lock hold time O(1).
370 maxreclaim = MAX_TX_RECLAIM;
371 if (reclaim > maxreclaim)
372 reclaim = maxreclaim;
374 free_tx_desc(adap, q, reclaim, unmap);
375 q->in_use -= reclaim;
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
384 * @q: the Tx queue to reclaim completed descriptors from
385 * @unmap: whether the buffers should be unmapped for DMA
387 * Reclaims Tx descriptors that the SGE has indicated it has processed,
388 * and frees the associated buffers if possible. Called with the Tx
391 void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
394 (void)reclaim_completed_tx(adap, q, -1, unmap);
396 EXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
398 static inline int get_buf_size(struct adapter *adapter,
399 const struct rx_sw_desc *d)
401 struct sge *s = &adapter->sge;
402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
405 switch (rx_buf_size_idx) {
406 case RX_SMALL_PG_BUF:
407 buf_size = PAGE_SIZE;
410 case RX_LARGE_PG_BUF:
411 buf_size = PAGE_SIZE << s->fl_pg_order;
414 case RX_SMALL_MTU_BUF:
415 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
418 case RX_LARGE_MTU_BUF:
419 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
430 * free_rx_bufs - free the Rx buffers on an SGE free list
432 * @q: the SGE free list to free buffers from
433 * @n: how many buffers to free
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
436 * buffers must be made inaccessible to HW before calling this function.
438 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
441 struct rx_sw_desc *d = &q->sdesc[q->cidx];
443 if (is_buf_mapped(d))
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
445 get_buf_size(adap, d),
449 if (++q->cidx == q->size)
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
458 * @q: the SGE free list
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
461 * buffer must be made inaccessible to HW before calling this function.
463 * This is similar to @free_rx_bufs above but does not free the buffer.
464 * Do note that the FL still loses any further access to the buffer.
466 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
468 struct rx_sw_desc *d = &q->sdesc[q->cidx];
470 if (is_buf_mapped(d))
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
472 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
474 if (++q->cidx == q->size)
479 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
481 if (q->pend_cred >= 8) {
482 u32 val = adap->params.arch.sge_fl_db;
484 if (is_t4(adap->params.chip))
485 val |= PIDX_V(q->pend_cred / 8);
487 val |= PIDX_T5_V(q->pend_cred / 8);
489 /* Make sure all memory writes to the Free List queue are
490 * committed before we tell the hardware about them.
494 /* If we don't have access to the new User Doorbell (T5+), use
495 * the old doorbell mechanism; otherwise use the new BAR2
498 if (unlikely(q->bar2_addr == NULL)) {
499 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
500 val | QID_V(q->cntxt_id));
502 writel(val | QID_V(q->bar2_qid),
503 q->bar2_addr + SGE_UDB_KDOORBELL);
505 /* This Write memory Barrier will force the write to
506 * the User Doorbell area to be flushed.
514 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
518 sd->dma_addr = mapping; /* includes size low bits */
522 * refill_fl - refill an SGE Rx buffer ring
524 * @q: the ring to refill
525 * @n: the number of new buffers to allocate
526 * @gfp: the gfp flags for the allocations
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
529 * allocated with the supplied gfp flags. The caller must assure that
530 * @n does not exceed the queue's capacity. If afterwards the queue is
531 * found critically low mark it as starving in the bitmap of starving FLs.
533 * Returns the number of buffers allocated.
535 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
538 struct sge *s = &adap->sge;
541 unsigned int cred = q->avail;
542 __be64 *d = &q->desc[q->pidx];
543 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
546 #ifdef CONFIG_DEBUG_FS
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
552 node = dev_to_node(adap->pdev_dev);
554 if (s->fl_pg_order == 0)
555 goto alloc_small_pages;
558 * Prefer large buffers
561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
563 q->large_alloc_failed++;
564 break; /* fall back to single pages */
567 mapping = dma_map_page(adap->pdev_dev, pg, 0,
568 PAGE_SIZE << s->fl_pg_order,
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
571 __free_pages(pg, s->fl_pg_order);
573 goto out; /* do not try small pages for this error */
575 mapping |= RX_LARGE_PG_BUF;
576 *d++ = cpu_to_be64(mapping);
578 set_rx_sw_desc(sd, pg, mapping);
582 if (++q->pidx == q->size) {
592 pg = alloc_pages_node(node, gfp, 0);
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
605 *d++ = cpu_to_be64(mapping);
607 set_rx_sw_desc(sd, pg, mapping);
611 if (++q->pidx == q->size) {
618 out: cred = q->avail - cred;
619 q->pend_cred += cred;
622 if (unlikely(fl_starving(adap, q))) {
625 set_bit(q->cntxt_id - adap->sge.egr_start,
626 adap->sge.starving_fl);
632 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
639 * alloc_ring - allocate resources for an SGE descriptor ring
640 * @dev: the PCI device's core device
641 * @nelem: the number of descriptors
642 * @elem_size: the size of each descriptor
643 * @sw_size: the size of the SW state associated with each ring element
644 * @phys: the physical address of the allocated ring
645 * @metadata: address of the array holding the SW state for the ring
646 * @stat_size: extra space in HW ring for status information
647 * @node: preferred node for memory allocations
649 * Allocates resources for an SGE descriptor ring, such as Tx queues,
650 * free buffer lists, or response queues. Each SGE ring requires
651 * space for its HW descriptors plus, optionally, space for the SW state
652 * associated with each HW entry (the metadata). The function returns
653 * three values: the virtual address for the HW ring (the return value
654 * of the function), the bus address of the HW ring, and the address
657 static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
658 size_t sw_size, dma_addr_t *phys, void *metadata,
659 size_t stat_size, int node)
661 size_t len = nelem * elem_size + stat_size;
663 void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
668 s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
671 dma_free_coherent(dev, len, p, *phys);
676 *(void **)metadata = s;
681 * sgl_len - calculates the size of an SGL of the given capacity
682 * @n: the number of SGL entries
684 * Calculates the number of flits needed for a scatter/gather list that
685 * can hold the given number of entries.
687 static inline unsigned int sgl_len(unsigned int n)
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
691 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
692 * repeated sequences of { Length[i], Length[i+1], Address[i],
693 * Address[i+1] } (this ensures that all addresses are on 64-bit
694 * boundaries). If N is even, then Length[N+1] should be set to 0 and
695 * Address[N+1] is omitted.
697 * The following calculation incorporates all of the above. It's
698 * somewhat hard to follow but, briefly: the "+2" accounts for the
699 * first two flits which include the DSGL header, Length0 and
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if
706 return (3 * n) / 2 + (n & 1) + 2;
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
711 * @n: the number of flits
713 * Returns the number of Tx descriptors needed for the supplied number
716 static inline unsigned int flits_to_desc(unsigned int n)
718 BUG_ON(n > SGE_MAX_WR_LEN / 8);
719 return DIV_ROUND_UP(n, 8);
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
725 * @chip_ver: chip version
727 * Returns whether an Ethernet packet is small enough to fit as
728 * immediate data. Return value corresponds to headroom required.
730 static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
734 if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
735 chip_ver > CHELSIO_T5) {
736 hdrlen = sizeof(struct cpl_tx_tnl_lso);
737 hdrlen += sizeof(struct cpl_tx_pkt_core);
738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
741 hdrlen = skb_shinfo(skb)->gso_size ?
742 sizeof(struct cpl_tx_pkt_lso_core) : 0;
743 hdrlen += sizeof(struct cpl_tx_pkt);
745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
753 * @chip_ver: chip version
755 * Returns the number of flits needed for a Tx WR for the given Ethernet
756 * packet, including the needed WR and CPL headers.
758 static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
759 unsigned int chip_ver)
762 int hdrlen = is_eth_imm(skb, chip_ver);
764 /* If the skb is small enough, we can pump it out as a work request
765 * with only immediate data. In that case we just have to have the
766 * TX Packet header plus the skb data in the Work Request.
770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
772 /* Otherwise, we're going to have to construct a Scatter gather list
773 * of the skb body and fragments. We also include the flits necessary
774 * for the TX Packet Work Request and CPL. We always have a firmware
775 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
776 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
777 * message or, if we're doing a Large Send Offload, an LSO CPL message
778 * with an embedded TX Packet Write CPL message.
780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
781 if (skb_shinfo(skb)->gso_size) {
782 if (skb->encapsulation && chip_ver > CHELSIO_T5) {
783 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
784 sizeof(struct cpl_tx_tnl_lso);
785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
790 hdrlen = sizeof(struct fw_eth_tx_eo_wr) +
791 round_up(pkt_hdrlen, 16);
793 hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
794 sizeof(struct cpl_tx_pkt_lso_core);
797 hdrlen += sizeof(struct cpl_tx_pkt_core);
798 flits += (hdrlen / sizeof(__be64));
800 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
801 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
807 * calc_tx_descs - calculate the number of Tx descriptors for a packet
809 * @chip_ver: chip version
811 * Returns the number of Tx descriptors needed for the given Ethernet
812 * packet, including the needed WR and CPL headers.
814 static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
815 unsigned int chip_ver)
817 return flits_to_desc(calc_tx_flits(skb, chip_ver));
821 * cxgb4_write_sgl - populate a scatter/gather list for a packet
823 * @q: the Tx queue we are writing into
824 * @sgl: starting location for writing the SGL
825 * @end: points right after the end of the SGL
826 * @start: start offset into skb main-body data to include in the SGL
827 * @addr: the list of bus addresses for the SGL elements
829 * Generates a gather list for the buffers that make up a packet.
830 * The caller must provide adequate space for the SGL that will be written.
831 * The SGL includes all of the packet's page fragments and the data in its
832 * main body except for the first @start bytes. @sgl must be 16-byte
833 * aligned and within a Tx descriptor with available space. @end points
834 * right after the end of the SGL but does not account for any potential
835 * wrap around, i.e., @end > @sgl.
837 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
838 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
839 const dma_addr_t *addr)
842 struct ulptx_sge_pair *to;
843 const struct skb_shared_info *si = skb_shinfo(skb);
844 unsigned int nfrags = si->nr_frags;
845 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
847 len = skb_headlen(skb) - start;
849 sgl->len0 = htonl(len);
850 sgl->addr0 = cpu_to_be64(addr[0] + start);
853 sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
854 sgl->addr0 = cpu_to_be64(addr[1]);
857 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
858 ULPTX_NSGE_V(nfrags));
859 if (likely(--nfrags == 0))
862 * Most of the complexity below deals with the possibility we hit the
863 * end of the queue in the middle of writing the SGL. For this case
864 * only we create the SGL in a temporary buffer and then copy it.
866 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
868 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
869 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
870 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
871 to->addr[0] = cpu_to_be64(addr[i]);
872 to->addr[1] = cpu_to_be64(addr[++i]);
875 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
876 to->len[1] = cpu_to_be32(0);
877 to->addr[0] = cpu_to_be64(addr[i + 1]);
879 if (unlikely((u8 *)end > (u8 *)q->stat)) {
880 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
883 memcpy(sgl->sge, buf, part0);
884 part1 = (u8 *)end - (u8 *)q->stat;
885 memcpy(q->desc, (u8 *)buf + part0, part1);
886 end = (void *)q->desc + part1;
888 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
891 EXPORT_SYMBOL(cxgb4_write_sgl);
893 /* cxgb4_write_partial_sgl - populate SGL for partial packet
895 * @q: the Tx queue we are writing into
896 * @sgl: starting location for writing the SGL
897 * @end: points right after the end of the SGL
898 * @addr: the list of bus addresses for the SGL elements
899 * @start: start offset in the SKB where partial data starts
900 * @len: length of data from @start to send out
902 * This API will handle sending out partial data of a skb if required.
903 * Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
904 * and @len will decide how much data after @start offset to send out.
906 void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
907 struct ulptx_sgl *sgl, u64 *end,
908 const dma_addr_t *addr, u32 start, u32 len)
910 struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
911 u32 frag_size, skb_linear_data_len = skb_headlen(skb);
912 struct skb_shared_info *si = skb_shinfo(skb);
913 u8 i = 0, frag_idx = 0, nfrags = 0;
916 /* Fill the first SGL either from linear data or from partial
917 * frag based on @start.
919 if (unlikely(start < skb_linear_data_len)) {
920 frag_size = min(len, skb_linear_data_len - start);
921 sgl->len0 = htonl(frag_size);
922 sgl->addr0 = cpu_to_be64(addr[0] + start);
926 start -= skb_linear_data_len;
927 frag = &si->frags[frag_idx];
928 frag_size = skb_frag_size(frag);
929 /* find the first frag */
930 while (start >= frag_size) {
933 frag = &si->frags[frag_idx];
934 frag_size = skb_frag_size(frag);
937 frag_size = min(len, skb_frag_size(frag) - start);
938 sgl->len0 = cpu_to_be32(frag_size);
939 sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
945 /* If the entire partial data fit in one SGL, then send it out
951 /* Most of the complexity below deals with the possibility we hit the
952 * end of the queue in the middle of writing the SGL. For this case
953 * only we create the SGL in a temporary buffer and then copy it.
955 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
957 /* If the skb couldn't fit in first SGL completely, fill the
958 * rest of the frags in subsequent SGLs. Note that each SGL
959 * pair can store 2 frags.
962 frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
963 to->len[i & 1] = cpu_to_be32(frag_size);
964 to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
973 /* If we ended in an odd boundary, then set the second SGL's
974 * length in the pair to 0.
977 to->len[1] = cpu_to_be32(0);
979 /* Copy from temporary buffer to Tx ring, in case we hit the
980 * end of the queue in the middle of writing the SGL.
982 if (unlikely((u8 *)end > (u8 *)q->stat)) {
983 u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
986 memcpy(sgl->sge, buf, part0);
987 part1 = (u8 *)end - (u8 *)q->stat;
988 memcpy(q->desc, (u8 *)buf + part0, part1);
989 end = (void *)q->desc + part1;
992 /* 0-pad to multiple of 16 */
993 if ((uintptr_t)end & 8)
996 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
997 ULPTX_NSGE_V(nfrags));
999 EXPORT_SYMBOL(cxgb4_write_partial_sgl);
1001 /* This function copies 64 byte coalesced work request to
1002 * memory mapped BAR2 space. For coalesced WR SGE fetches
1003 * data from the FIFO instead of from Host.
1005 static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
1018 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
1019 * @adap: the adapter
1021 * @n: number of new descriptors to give to HW
1023 * Ring the doorbel for a Tx queue.
1025 inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
1027 /* Make sure that all writes to the TX Descriptors are committed
1028 * before we tell the hardware about them.
1032 /* If we don't have access to the new User Doorbell (T5+), use the old
1033 * doorbell mechanism; otherwise use the new BAR2 mechanism.
1035 if (unlikely(q->bar2_addr == NULL)) {
1036 u32 val = PIDX_V(n);
1037 unsigned long flags;
1039 /* For T4 we need to participate in the Doorbell Recovery
1042 spin_lock_irqsave(&q->db_lock, flags);
1043 if (!q->db_disabled)
1044 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
1045 QID_V(q->cntxt_id) | val);
1047 q->db_pidx_inc += n;
1048 q->db_pidx = q->pidx;
1049 spin_unlock_irqrestore(&q->db_lock, flags);
1051 u32 val = PIDX_T5_V(n);
1053 /* T4 and later chips share the same PIDX field offset within
1054 * the doorbell, but T5 and later shrank the field in order to
1055 * gain a bit for Doorbell Priority. The field was absurdly
1056 * large in the first place (14 bits) so we just use the T5
1057 * and later limits and warn if a Queue ID is too large.
1059 WARN_ON(val & DBPRIO_F);
1061 /* If we're only writing a single TX Descriptor and we can use
1062 * Inferred QID registers, we can use the Write Combining
1063 * Gather Buffer; otherwise we use the simple doorbell.
1065 if (n == 1 && q->bar2_qid == 0) {
1066 int index = (q->pidx
1069 u64 *wr = (u64 *)&q->desc[index];
1071 cxgb_pio_copy((u64 __iomem *)
1072 (q->bar2_addr + SGE_UDB_WCDOORBELL),
1075 writel(val | QID_V(q->bar2_qid),
1076 q->bar2_addr + SGE_UDB_KDOORBELL);
1079 /* This Write Memory Barrier will force the write to the User
1080 * Doorbell area to be flushed. This is needed to prevent
1081 * writes on different CPUs for the same queue from hitting
1082 * the adapter out of order. This is required when some Work
1083 * Requests take the Write Combine Gather Buffer path (user
1084 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1085 * take the traditional path where we simply increment the
1086 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1087 * hardware DMA read the actual Work Request.
1092 EXPORT_SYMBOL(cxgb4_ring_tx_db);
1095 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1097 * @q: the Tx queue where the packet will be inlined
1098 * @pos: starting position in the Tx queue where to inline the packet
1100 * Inline a packet's contents directly into Tx descriptors, starting at
1101 * the given position within the Tx DMA ring.
1102 * Most of the complexity of this operation is dealing with wrap arounds
1103 * in the middle of the packet we want to inline.
1105 void cxgb4_inline_tx_skb(const struct sk_buff *skb,
1106 const struct sge_txq *q, void *pos)
1108 int left = (void *)q->stat - pos;
1111 if (likely(skb->len <= left)) {
1112 if (likely(!skb->data_len))
1113 skb_copy_from_linear_data(skb, pos, skb->len);
1115 skb_copy_bits(skb, 0, pos, skb->len);
1118 skb_copy_bits(skb, 0, pos, left);
1119 skb_copy_bits(skb, left, q->desc, skb->len - left);
1120 pos = (void *)q->desc + (skb->len - left);
1123 /* 0-pad to multiple of 16 */
1124 p = PTR_ALIGN(pos, 8);
1125 if ((uintptr_t)p & 8)
1128 EXPORT_SYMBOL(cxgb4_inline_tx_skb);
1130 static void *inline_tx_skb_header(const struct sk_buff *skb,
1131 const struct sge_txq *q, void *pos,
1135 int left = (void *)q->stat - pos;
1137 if (likely(length <= left)) {
1138 memcpy(pos, skb->data, length);
1141 memcpy(pos, skb->data, left);
1142 memcpy(q->desc, skb->data + left, length - left);
1143 pos = (void *)q->desc + (length - left);
1145 /* 0-pad to multiple of 16 */
1146 p = PTR_ALIGN(pos, 8);
1147 if ((uintptr_t)p & 8) {
1155 * Figure out what HW csum a packet wants and return the appropriate control
1158 static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
1161 bool inner_hdr_csum = false;
1164 if (skb->encapsulation &&
1165 (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
1166 inner_hdr_csum = true;
1168 if (inner_hdr_csum) {
1169 ver = inner_ip_hdr(skb)->version;
1170 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
1171 inner_ipv6_hdr(skb)->nexthdr;
1173 ver = ip_hdr(skb)->version;
1174 proto = (ver == 4) ? ip_hdr(skb)->protocol :
1175 ipv6_hdr(skb)->nexthdr;
1179 if (proto == IPPROTO_TCP)
1180 csum_type = TX_CSUM_TCPIP;
1181 else if (proto == IPPROTO_UDP)
1182 csum_type = TX_CSUM_UDPIP;
1185 * unknown protocol, disable HW csum
1186 * and hope a bad packet is detected
1188 return TXPKT_L4CSUM_DIS_F;
1192 * this doesn't work with extension headers
1194 if (proto == IPPROTO_TCP)
1195 csum_type = TX_CSUM_TCPIP6;
1196 else if (proto == IPPROTO_UDP)
1197 csum_type = TX_CSUM_UDPIP6;
1202 if (likely(csum_type >= TX_CSUM_TCPIP)) {
1203 int eth_hdr_len, l4_len;
1206 if (inner_hdr_csum) {
1207 /* This allows checksum offload for all encapsulated
1208 * packets like GRE etc..
1210 l4_len = skb_inner_network_header_len(skb);
1211 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
1213 l4_len = skb_network_header_len(skb);
1214 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
1216 hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
1218 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
1219 hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1221 hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
1222 return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
1224 int start = skb_transport_offset(skb);
1226 return TXPKT_CSUM_TYPE_V(csum_type) |
1227 TXPKT_CSUM_START_V(start) |
1228 TXPKT_CSUM_LOC_V(start + skb->csum_offset);
1232 static void eth_txq_stop(struct sge_eth_txq *q)
1234 netif_tx_stop_queue(q->txq);
1238 static inline void txq_advance(struct sge_txq *q, unsigned int n)
1242 if (q->pidx >= q->size)
1246 #ifdef CONFIG_CHELSIO_T4_FCOE
1248 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1249 const struct port_info *pi, u64 *cntrl)
1251 const struct cxgb_fcoe *fcoe = &pi->fcoe;
1253 if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1256 if (skb->protocol != htons(ETH_P_FCOE))
1259 skb_reset_mac_header(skb);
1260 skb->mac_len = sizeof(struct ethhdr);
1262 skb_set_network_header(skb, skb->mac_len);
1263 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1265 if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1268 /* FC CRC offload */
1269 *cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
1270 TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
1271 TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
1272 TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
1273 TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
1276 #endif /* CONFIG_CHELSIO_T4_FCOE */
1278 /* Returns tunnel type if hardware supports offloading of the same.
1279 * It is called only for T5 and onwards.
1281 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
1284 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1285 struct port_info *pi = netdev_priv(skb->dev);
1286 struct adapter *adapter = pi->adapter;
1288 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1289 skb->inner_protocol != htons(ETH_P_TEB))
1292 switch (vlan_get_protocol(skb)) {
1293 case htons(ETH_P_IP):
1294 l4_hdr = ip_hdr(skb)->protocol;
1296 case htons(ETH_P_IPV6):
1297 l4_hdr = ipv6_hdr(skb)->nexthdr;
1305 if (adapter->vxlan_port == udp_hdr(skb)->dest)
1306 tnl_type = TX_TNL_TYPE_VXLAN;
1307 else if (adapter->geneve_port == udp_hdr(skb)->dest)
1308 tnl_type = TX_TNL_TYPE_GENEVE;
1317 static inline void t6_fill_tnl_lso(struct sk_buff *skb,
1318 struct cpl_tx_tnl_lso *tnl_lso,
1319 enum cpl_tx_tnl_lso_type tnl_type)
1322 int in_eth_xtra_len;
1323 int l3hdr_len = skb_network_header_len(skb);
1324 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1325 const struct skb_shared_info *ssi = skb_shinfo(skb);
1326 bool v6 = (ip_hdr(skb)->version == 6);
1328 val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
1329 CPL_TX_TNL_LSO_FIRST_F |
1330 CPL_TX_TNL_LSO_LAST_F |
1331 (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
1332 CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
1333 CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
1334 (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
1335 CPL_TX_TNL_LSO_IPLENSETOUT_F |
1336 (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
1337 tnl_lso->op_to_IpIdSplitOut = htonl(val);
1339 tnl_lso->IpIdOffsetOut = 0;
1341 /* Get the tunnel header length */
1342 val = skb_inner_mac_header(skb) - skb_mac_header(skb);
1343 in_eth_xtra_len = skb_inner_network_header(skb) -
1344 skb_inner_mac_header(skb) - ETH_HLEN;
1347 case TX_TNL_TYPE_VXLAN:
1348 case TX_TNL_TYPE_GENEVE:
1349 tnl_lso->UdpLenSetOut_to_TnlHdrLen =
1350 htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
1351 CPL_TX_TNL_LSO_UDPLENSETOUT_F);
1354 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
1358 tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
1359 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
1360 CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
1364 val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
1365 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
1366 CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
1367 CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
1368 tnl_lso->Flow_to_TcpHdrLen = htonl(val);
1370 tnl_lso->IpIdOffset = htons(0);
1372 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
1373 tnl_lso->TCPSeqOffset = htonl(0);
1374 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
1377 static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
1378 struct cpl_tx_pkt_lso_core *lso)
1380 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1381 int l3hdr_len = skb_network_header_len(skb);
1382 const struct skb_shared_info *ssi;
1385 ssi = skb_shinfo(skb);
1386 if (ssi->gso_type & SKB_GSO_TCPV6)
1389 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1390 LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
1392 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1393 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1394 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1395 lso->ipid_ofst = htons(0);
1396 lso->mss = htons(ssi->gso_size);
1397 lso->seqno_offset = htonl(0);
1398 if (is_t4(adap->params.chip))
1399 lso->len = htonl(skb->len);
1401 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
1403 return (void *)(lso + 1);
1407 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1408 * @adap: the adapter
1409 * @eq: the Ethernet TX Queue
1410 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1412 * We're typically called here to update the state of an Ethernet TX
1413 * Queue with respect to the hardware's progress in consuming the TX
1414 * Work Requests that we've put on that Egress Queue. This happens
1415 * when we get Egress Queue Update messages and also prophylactically
1416 * in regular timer-based Ethernet TX Queue maintenance.
1418 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
1421 unsigned int reclaimed, hw_cidx;
1422 struct sge_txq *q = &eq->q;
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq))
1428 /* Reclaim pending completed TX Descriptors. */
1429 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1431 hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
1432 hw_in_use = q->pidx - hw_cidx;
1434 hw_in_use += q->size;
1436 /* If the TX Queue is currently stopped and there's now more than half
1437 * the queue available, restart it. Otherwise bail out since the rest
1438 * of what we want do here is with the possibility of shipping any
1439 * currently buffered Coalesced TX Work Request.
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
1442 netif_tx_wake_queue(eq->txq);
1446 __netif_tx_unlock(eq->txq);
1450 static inline int cxgb4_validate_skb(struct sk_buff *skb,
1451 struct net_device *dev,
1456 /* The chip min packet length is 10 octets but some firmware
1457 * commands have a minimum packet length requirement. So, play
1458 * safe and reject anything shorter than @min_pkt_len.
1460 if (unlikely(skb->len < min_pkt_len))
1463 /* Discard the packet if the length is greater than mtu */
1464 max_pkt_len = ETH_HLEN + dev->mtu;
1466 if (skb_vlan_tagged(skb))
1467 max_pkt_len += VLAN_HLEN;
1469 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
1475 static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
1478 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
1479 wr->u.udpseg.ethlen = skb_network_offset(skb);
1480 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
1481 wr->u.udpseg.udplen = sizeof(struct udphdr);
1482 wr->u.udpseg.rtplen = 0;
1483 wr->u.udpseg.r4 = 0;
1484 if (skb_shinfo(skb)->gso_size)
1485 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1487 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len);
1488 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
1489 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len);
1491 return (void *)(wr + 1);
1495 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1497 * @dev: the egress net device
1499 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1501 static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1503 enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
1504 bool ptp_enabled = is_ptp_enabled(skb, dev);
1505 unsigned int last_desc, flits, ndesc;
1506 u32 wr_mid, ctrl0, op, sgl_off = 0;
1507 const struct skb_shared_info *ssi;
1508 int len, qidx, credits, ret, left;
1509 struct tx_sw_desc *sgl_sdesc;
1510 struct fw_eth_tx_eo_wr *eowr;
1511 struct fw_eth_tx_pkt_wr *wr;
1512 struct cpl_tx_pkt_core *cpl;
1513 const struct port_info *pi;
1514 bool immediate = false;
1515 u64 cntrl, *end, *sgl;
1516 struct sge_eth_txq *q;
1517 unsigned int chip_ver;
1518 struct adapter *adap;
1520 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
1524 pi = netdev_priv(dev);
1526 ssi = skb_shinfo(skb);
1527 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
1528 if (xfrm_offload(skb) && !ssi->gso_size)
1529 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
1530 #endif /* CHELSIO_IPSEC_INLINE */
1532 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
1533 if (cxgb4_is_ktls_skb(skb) &&
1534 (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
1535 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
1536 #endif /* CHELSIO_TLS_DEVICE */
1538 qidx = skb_get_queue_mapping(skb);
1540 if (!(adap->ptp_tx_skb)) {
1541 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1542 adap->ptp_tx_skb = skb_get(skb);
1546 q = &adap->sge.ptptxq;
1548 q = &adap->sge.ethtxq[qidx + pi->first_qset];
1550 skb_tx_timestamp(skb);
1552 reclaim_completed_tx(adap, &q->q, -1, true);
1553 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
1555 #ifdef CONFIG_CHELSIO_T4_FCOE
1556 ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1557 if (unlikely(ret == -EOPNOTSUPP))
1559 #endif /* CONFIG_CHELSIO_T4_FCOE */
1561 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1562 flits = calc_tx_flits(skb, chip_ver);
1563 ndesc = flits_to_desc(flits);
1564 credits = txq_avail(&q->q) - ndesc;
1566 if (unlikely(credits < 0)) {
1568 dev_err(adap->pdev_dev,
1569 "%s: Tx ring %u full while queue awake!\n",
1571 return NETDEV_TX_BUSY;
1574 if (is_eth_imm(skb, chip_ver))
1577 if (skb->encapsulation && chip_ver > CHELSIO_T5)
1578 tnl_type = cxgb_encap_offload_supported(skb);
1580 last_desc = q->q.pidx + ndesc - 1;
1581 if (last_desc >= q->q.size)
1582 last_desc -= q->q.size;
1583 sgl_sdesc = &q->q.sdesc[last_desc];
1586 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
1587 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1592 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1593 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1594 /* After we're done injecting the Work Request for this
1595 * packet, we'll be below our "stop threshold" so stop the TX
1596 * Queue now and schedule a request for an SGE Egress Queue
1597 * Update message. The queue will get started later on when
1598 * the firmware processes this Work Request and sends us an
1599 * Egress Queue Status Update message indicating that space
1603 if (chip_ver > CHELSIO_T5)
1604 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1607 wr = (void *)&q->q.desc[q->q.pidx];
1608 eowr = (void *)&q->q.desc[q->q.pidx];
1609 wr->equiq_to_len16 = htonl(wr_mid);
1610 wr->r3 = cpu_to_be64(0);
1611 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
1612 end = (u64 *)eowr + flits;
1614 end = (u64 *)wr + flits;
1616 len = immediate ? skb->len : 0;
1617 len += sizeof(*cpl);
1618 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
1619 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1620 struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
1623 len += sizeof(*tnl_lso);
1625 len += sizeof(*lso);
1627 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1628 FW_WR_IMMDLEN_V(len));
1630 struct iphdr *iph = ip_hdr(skb);
1632 t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
1633 cpl = (void *)(tnl_lso + 1);
1634 /* Driver is expected to compute partial checksum that
1635 * does not include the IP Total Length.
1637 if (iph->version == 4) {
1640 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
1642 if (skb->ip_summed == CHECKSUM_PARTIAL)
1643 cntrl = hwcsum(adap->params.chip, skb);
1645 cpl = write_tso_wr(adap, skb, lso);
1646 cntrl = hwcsum(adap->params.chip, skb);
1648 sgl = (u64 *)(cpl + 1); /* sgl start here */
1650 q->tx_cso += ssi->gso_segs;
1651 } else if (ssi->gso_size) {
1655 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb));
1657 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
1658 FW_ETH_TX_EO_WR_IMMDLEN_V(len));
1659 cpl = write_eo_udp_wr(skb, eowr, hdrlen);
1660 cntrl = hwcsum(adap->params.chip, skb);
1662 start = (u64 *)(cpl + 1);
1663 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
1665 if (unlikely(start > sgl)) {
1666 left = (u8 *)end - (u8 *)q->q.stat;
1667 end = (void *)q->q.desc + left;
1671 q->tx_cso += ssi->gso_segs;
1674 op = FW_PTP_TX_PKT_WR;
1676 op = FW_ETH_TX_PKT_WR;
1677 wr->op_immdlen = htonl(FW_WR_OP_V(op) |
1678 FW_WR_IMMDLEN_V(len));
1679 cpl = (void *)(wr + 1);
1680 sgl = (u64 *)(cpl + 1);
1681 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1682 cntrl = hwcsum(adap->params.chip, skb) |
1688 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
1689 /* If current position is already at the end of the
1690 * txq, reset the current to point to start of the queue
1691 * and update the end ptr as well.
1693 left = (u8 *)end - (u8 *)q->q.stat;
1694 end = (void *)q->q.desc + left;
1695 sgl = (void *)q->q.desc;
1698 if (skb_vlan_tag_present(skb)) {
1700 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
1701 #ifdef CONFIG_CHELSIO_T4_FCOE
1702 if (skb->protocol == htons(ETH_P_FCOE))
1703 cntrl |= TXPKT_VLAN_V(
1704 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1705 #endif /* CONFIG_CHELSIO_T4_FCOE */
1708 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
1709 TXPKT_PF_V(adap->pf);
1711 ctrl0 |= TXPKT_TSTAMP_F;
1712 #ifdef CONFIG_CHELSIO_T4_DCB
1713 if (is_t4(adap->params.chip))
1714 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
1716 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
1718 cpl->ctrl0 = htonl(ctrl0);
1719 cpl->pack = htons(0);
1720 cpl->len = htons(skb->len);
1721 cpl->ctrl1 = cpu_to_be64(cntrl);
1724 cxgb4_inline_tx_skb(skb, &q->q, sgl);
1725 dev_consume_skb_any(skb);
1727 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
1730 sgl_sdesc->skb = skb;
1733 txq_advance(&q->q, ndesc);
1735 cxgb4_ring_tx_db(adap, &q->q, ndesc);
1736 return NETDEV_TX_OK;
1739 dev_kfree_skb_any(skb);
1740 return NETDEV_TX_OK;
1745 /* Egress Queue sizes, producer and consumer indices are all in units
1746 * of Egress Context Units bytes. Note that as far as the hardware is
1747 * concerned, the free list is an Egress Queue (the host produces free
1748 * buffers which the hardware consumes) and free list entries are
1749 * 64-bit PCI DMA addresses.
1751 EQ_UNIT = SGE_EQ_IDXSIZE,
1752 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1753 TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
1755 T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1756 sizeof(struct cpl_tx_pkt_lso_core) +
1757 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
1761 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1764 * Returns whether an Ethernet packet is small enough to fit completely as
1767 static inline int t4vf_is_eth_imm(const struct sk_buff *skb)
1769 /* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
1770 * which does not accommodate immediate data. We could dike out all
1771 * of the support code for immediate data but that would tie our hands
1772 * too much if we ever want to enhace the firmware. It would also
1773 * create more differences between the PF and VF Drivers.
1779 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1782 * Returns the number of flits needed for a TX Work Request for the
1783 * given Ethernet packet, including the needed WR and CPL headers.
1785 static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
1789 /* If the skb is small enough, we can pump it out as a work request
1790 * with only immediate data. In that case we just have to have the
1791 * TX Packet header plus the skb data in the Work Request.
1793 if (t4vf_is_eth_imm(skb))
1794 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
1797 /* Otherwise, we're going to have to construct a Scatter gather list
1798 * of the skb body and fragments. We also include the flits necessary
1799 * for the TX Packet Work Request and CPL. We always have a firmware
1800 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
1801 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
1802 * message or, if we're doing a Large Send Offload, an LSO CPL message
1803 * with an embedded TX Packet Write CPL message.
1805 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
1806 if (skb_shinfo(skb)->gso_size)
1807 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1808 sizeof(struct cpl_tx_pkt_lso_core) +
1809 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1811 flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
1812 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
1817 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1819 * @dev: the egress net device
1821 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1823 static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
1824 struct net_device *dev)
1826 unsigned int last_desc, flits, ndesc;
1827 const struct skb_shared_info *ssi;
1828 struct fw_eth_tx_pkt_vm_wr *wr;
1829 struct tx_sw_desc *sgl_sdesc;
1830 struct cpl_tx_pkt_core *cpl;
1831 const struct port_info *pi;
1832 struct sge_eth_txq *txq;
1833 struct adapter *adapter;
1834 int qidx, credits, ret;
1835 size_t fw_hdr_copy_len;
1836 unsigned int chip_ver;
1840 /* The chip minimum packet length is 10 octets but the firmware
1841 * command that we are using requires that we copy the Ethernet header
1842 * (including the VLAN tag) into the header so we reject anything
1843 * smaller than that ...
1845 fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
1846 sizeof(wr->ethtype) + sizeof(wr->vlantci);
1847 ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
1851 /* Figure out which TX Queue we're going to use. */
1852 pi = netdev_priv(dev);
1853 adapter = pi->adapter;
1854 qidx = skb_get_queue_mapping(skb);
1855 WARN_ON(qidx >= pi->nqsets);
1856 txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1858 /* Take this opportunity to reclaim any TX Descriptors whose DMA
1859 * transfers have completed.
1861 reclaim_completed_tx(adapter, &txq->q, -1, true);
1863 /* Calculate the number of flits and TX Descriptors we're going to
1864 * need along with how many TX Descriptors will be left over after
1865 * we inject our Work Request.
1867 flits = t4vf_calc_tx_flits(skb);
1868 ndesc = flits_to_desc(flits);
1869 credits = txq_avail(&txq->q) - ndesc;
1871 if (unlikely(credits < 0)) {
1872 /* Not enough room for this packet's Work Request. Stop the
1873 * TX Queue and return a "busy" condition. The queue will get
1874 * started later on when the firmware informs us that space
1878 dev_err(adapter->pdev_dev,
1879 "%s: TX ring %u full while queue awake!\n",
1881 return NETDEV_TX_BUSY;
1884 last_desc = txq->q.pidx + ndesc - 1;
1885 if (last_desc >= txq->q.size)
1886 last_desc -= txq->q.size;
1887 sgl_sdesc = &txq->q.sdesc[last_desc];
1889 if (!t4vf_is_eth_imm(skb) &&
1890 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
1891 sgl_sdesc->addr) < 0)) {
1892 /* We need to map the skb into PCI DMA space (because it can't
1893 * be in-lined directly into the Work Request) and the mapping
1894 * operation failed. Record the error and drop the packet.
1896 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
1901 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1902 wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1903 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1904 /* After we're done injecting the Work Request for this
1905 * packet, we'll be below our "stop threshold" so stop the TX
1906 * Queue now and schedule a request for an SGE Egress Queue
1907 * Update message. The queue will get started later on when
1908 * the firmware processes this Work Request and sends us an
1909 * Egress Queue Status Update message indicating that space
1913 if (chip_ver > CHELSIO_T5)
1914 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1917 /* Start filling in our Work Request. Note that we do _not_ handle
1918 * the WR Header wrapping around the TX Descriptor Ring. If our
1919 * maximum header size ever exceeds one TX Descriptor, we'll need to
1920 * do something else here.
1922 WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1923 wr = (void *)&txq->q.desc[txq->q.pidx];
1924 wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1925 wr->r3[0] = cpu_to_be32(0);
1926 wr->r3[1] = cpu_to_be32(0);
1927 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1928 end = (u64 *)wr + flits;
1930 /* If this is a Large Send Offload packet we'll put in an LSO CPL
1931 * message with an encapsulated TX Packet CPL message. Otherwise we
1932 * just use a TX Packet CPL message.
1934 ssi = skb_shinfo(skb);
1935 if (ssi->gso_size) {
1936 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1937 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1938 int l3hdr_len = skb_network_header_len(skb);
1939 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1942 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1943 FW_WR_IMMDLEN_V(sizeof(*lso) +
1945 /* Fill in the LSO CPL message. */
1947 cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
1951 LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
1952 LSO_IPHDR_LEN_V(l3hdr_len / 4) |
1953 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
1954 lso->ipid_ofst = cpu_to_be16(0);
1955 lso->mss = cpu_to_be16(ssi->gso_size);
1956 lso->seqno_offset = cpu_to_be32(0);
1957 if (is_t4(adapter->params.chip))
1958 lso->len = cpu_to_be32(skb->len);
1960 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
1962 /* Set up TX Packet CPL pointer, control word and perform
1965 cpl = (void *)(lso + 1);
1967 if (chip_ver <= CHELSIO_T5)
1968 cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1970 cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
1972 cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
1973 TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1974 TXPKT_IPHDR_LEN_V(l3hdr_len);
1976 txq->tx_cso += ssi->gso_segs;
1980 len = (t4vf_is_eth_imm(skb)
1981 ? skb->len + sizeof(*cpl)
1984 cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1985 FW_WR_IMMDLEN_V(len));
1987 /* Set up TX Packet CPL pointer, control word and perform
1990 cpl = (void *)(wr + 1);
1991 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1992 cntrl = hwcsum(adapter->params.chip, skb) |
1996 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
2000 /* If there's a VLAN tag present, add that to the list of things to
2001 * do in this Work Request.
2003 if (skb_vlan_tag_present(skb)) {
2005 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
2008 /* Fill in the TX Packet CPL message header. */
2009 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
2010 TXPKT_INTF_V(pi->port_id) |
2012 cpl->pack = cpu_to_be16(0);
2013 cpl->len = cpu_to_be16(skb->len);
2014 cpl->ctrl1 = cpu_to_be64(cntrl);
2016 /* Fill in the body of the TX Packet CPL message with either in-lined
2017 * data or a Scatter/Gather List.
2019 if (t4vf_is_eth_imm(skb)) {
2020 /* In-line the packet's data and free the skb since we don't
2021 * need it any longer.
2023 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
2024 dev_consume_skb_any(skb);
2026 /* Write the skb's Scatter/Gather list into the TX Packet CPL
2027 * message and retain a pointer to the skb so we can free it
2028 * later when its DMA completes. (We store the skb pointer
2029 * in the Software Descriptor corresponding to the last TX
2030 * Descriptor used by the Work Request.)
2032 * The retained skb will be freed when the corresponding TX
2033 * Descriptors are reclaimed after their DMAs complete.
2034 * However, this could take quite a while since, in general,
2035 * the hardware is set up to be lazy about sending DMA
2036 * completion notifications to us and we mostly perform TX
2037 * reclaims in the transmit routine.
2039 * This is good for performamce but means that we rely on new
2040 * TX packets arriving to run the destructors of completed
2041 * packets, which open up space in their sockets' send queues.
2042 * Sometimes we do not get such new packets causing TX to
2043 * stall. A single UDP transmitter is a good example of this
2044 * situation. We have a clean up timer that periodically
2045 * reclaims completed packets but it doesn't run often enough
2046 * (nor do we want it to) to prevent lengthy stalls. A
2047 * solution to this problem is to run the destructor early,
2048 * after the packet is queued but before it's DMAd. A con is
2049 * that we lie to socket memory accounting, but the amount of
2050 * extra memory is reasonable (limited by the number of TX
2051 * descriptors), the packets do actually get freed quickly by
2052 * new packets almost always, and for protocols like TCP that
2053 * wait for acks to really free up the data the extra memory
2054 * is even less. On the positive side we run the destructors
2055 * on the sending CPU rather than on a potentially different
2056 * completing CPU, usually a good thing.
2058 * Run the destructor before telling the DMA engine about the
2059 * packet to make sure it doesn't complete and get freed
2062 struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
2063 struct sge_txq *tq = &txq->q;
2065 /* If the Work Request header was an exact multiple of our TX
2066 * Descriptor length, then it's possible that the starting SGL
2067 * pointer lines up exactly with the end of our TX Descriptor
2068 * ring. If that's the case, wrap around to the beginning
2071 if (unlikely((void *)sgl == (void *)tq->stat)) {
2072 sgl = (void *)tq->desc;
2073 end = (void *)((void *)tq->desc +
2074 ((void *)end - (void *)tq->stat));
2077 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr);
2079 sgl_sdesc->skb = skb;
2082 /* Advance our internal TX Queue state, tell the hardware about
2083 * the new TX descriptors and return success.
2085 txq_advance(&txq->q, ndesc);
2087 cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
2088 return NETDEV_TX_OK;
2091 /* An error of some sort happened. Free the TX skb and tell the
2092 * OS that we've "dealt" with the packet ...
2094 dev_kfree_skb_any(skb);
2095 return NETDEV_TX_OK;
2099 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2100 * @q: the SGE control Tx queue
2102 * This is a variant of cxgb4_reclaim_completed_tx() that is used
2103 * for Tx queues that send only immediate data (presently just
2104 * the control queues) and thus do not have any sk_buffs to release.
2106 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
2108 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
2109 int reclaim = hw_cidx - q->cidx;
2114 q->in_use -= reclaim;
2118 static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
2128 void cxgb4_eosw_txq_free_desc(struct adapter *adap,
2129 struct sge_eosw_txq *eosw_txq, u32 ndesc)
2131 struct tx_sw_desc *d;
2133 d = &eosw_txq->desc[eosw_txq->last_cidx];
2137 unmap_skb(adap->pdev_dev, d->skb, d->addr);
2138 memset(d->addr, 0, sizeof(d->addr));
2140 dev_consume_skb_any(d->skb);
2143 eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
2145 d = &eosw_txq->desc[eosw_txq->last_cidx];
2149 static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
2151 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
2152 eosw_txq->inuse += n;
2155 static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
2156 struct sk_buff *skb)
2158 if (eosw_txq->inuse == eosw_txq->ndesc)
2161 eosw_txq->desc[eosw_txq->pidx].skb = skb;
2165 static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
2167 return eosw_txq->desc[eosw_txq->last_pidx].skb;
2170 static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
2171 struct sk_buff *skb, u32 hdr_len)
2176 wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
2177 if (skb_shinfo(skb)->gso_size &&
2178 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
2179 wrlen += sizeof(struct cpl_tx_pkt_lso_core);
2181 wrlen += roundup(hdr_len, 16);
2183 /* Packet headers + WR + CPLs */
2184 flits = DIV_ROUND_UP(wrlen, 8);
2186 if (skb_shinfo(skb)->nr_frags > 0) {
2187 if (skb_headlen(skb) - hdr_len)
2188 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1);
2190 nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
2191 } else if (skb->len - hdr_len) {
2195 return flits + nsgl;
2198 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
2199 struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
2200 u32 hdr_len, u32 wrlen)
2202 const struct skb_shared_info *ssi = skb_shinfo(skb);
2203 struct cpl_tx_pkt_core *cpl;
2204 u32 immd_len, wrlen16;
2208 ver = ip_hdr(skb)->version;
2209 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol;
2211 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2212 immd_len = sizeof(struct cpl_tx_pkt_core);
2213 if (skb_shinfo(skb)->gso_size &&
2214 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
2215 immd_len += sizeof(struct cpl_tx_pkt_lso_core);
2216 immd_len += hdr_len;
2218 if (!eosw_txq->ncompl ||
2219 (eosw_txq->last_compl + wrlen16) >=
2220 (adap->params.ofldq_wr_cred / 2)) {
2223 eosw_txq->last_compl = 0;
2226 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
2227 FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
2228 FW_WR_COMPL_V(compl));
2229 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
2230 FW_WR_FLOWID_V(eosw_txq->hwtid));
2232 if (proto == IPPROTO_UDP) {
2233 cpl = write_eo_udp_wr(skb, wr, hdr_len);
2235 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
2236 wr->u.tcpseg.ethlen = skb_network_offset(skb);
2237 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
2238 wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
2239 wr->u.tcpseg.tsclk_tsoff = 0;
2240 wr->u.tcpseg.r4 = 0;
2241 wr->u.tcpseg.r5 = 0;
2242 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
2244 if (ssi->gso_size) {
2245 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
2247 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
2248 cpl = write_tso_wr(adap, skb, lso);
2250 wr->u.tcpseg.mss = cpu_to_be16(0xffff);
2251 cpl = (void *)(wr + 1);
2255 eosw_txq->cred -= wrlen16;
2256 eosw_txq->last_compl += wrlen16;
2260 static int ethofld_hard_xmit(struct net_device *dev,
2261 struct sge_eosw_txq *eosw_txq)
2263 struct port_info *pi = netdev2pinfo(dev);
2264 struct adapter *adap = netdev2adap(dev);
2265 u32 wrlen, wrlen16, hdr_len, data_len;
2266 enum sge_eosw_state next_state;
2267 u64 cntrl, *start, *end, *sgl;
2268 struct sge_eohw_txq *eohw_txq;
2269 struct cpl_tx_pkt_core *cpl;
2270 struct fw_eth_tx_eo_wr *wr;
2271 bool skip_eotx_wr = false;
2272 struct tx_sw_desc *d;
2273 struct sk_buff *skb;
2277 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
2278 spin_lock(&eohw_txq->lock);
2279 reclaim_completed_tx_imm(&eohw_txq->q);
2281 d = &eosw_txq->desc[eosw_txq->last_pidx];
2283 skb_tx_timestamp(skb);
2285 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
2286 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
2287 eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
2290 flits = DIV_ROUND_UP(hdr_len, 8);
2291 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
2292 next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
2294 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
2295 skip_eotx_wr = true;
2297 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
2298 data_len = skb->len - hdr_len;
2299 flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
2301 ndesc = flits_to_desc(flits);
2303 wrlen16 = DIV_ROUND_UP(wrlen, 16);
2305 left = txq_avail(&eohw_txq->q) - ndesc;
2307 /* If there are no descriptors left in hardware queues or no
2308 * CPL credits left in software queues, then wait for them
2309 * to come back and retry again. Note that we always request
2310 * for credits update via interrupt for every half credits
2311 * consumed. So, the interrupt will eventually restore the
2312 * credits and invoke the Tx path again.
2314 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
2319 if (unlikely(skip_eotx_wr)) {
2321 eosw_txq->state = next_state;
2322 eosw_txq->cred -= wrlen16;
2324 eosw_txq->last_compl = 0;
2325 goto write_wr_headers;
2328 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
2329 cntrl = hwcsum(adap->params.chip, skb);
2330 if (skb_vlan_tag_present(skb))
2331 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
2333 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
2334 TXPKT_INTF_V(pi->tx_chan) |
2335 TXPKT_PF_V(adap->pf));
2337 cpl->len = cpu_to_be16(skb->len);
2338 cpl->ctrl1 = cpu_to_be64(cntrl);
2340 start = (u64 *)(cpl + 1);
2343 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
2346 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
2347 if (unlikely(ret)) {
2348 memset(d->addr, 0, sizeof(d->addr));
2349 eohw_txq->mapping_err++;
2353 end = (u64 *)wr + flits;
2354 if (unlikely(start > sgl)) {
2355 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2356 end = (void *)eohw_txq->q.desc + left;
2359 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
2360 /* If current position is already at the end of the
2361 * txq, reset the current to point to start of the queue
2362 * and update the end ptr as well.
2364 left = (u8 *)end - (u8 *)eohw_txq->q.stat;
2366 end = (void *)eohw_txq->q.desc + left;
2367 sgl = (void *)eohw_txq->q.desc;
2370 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
2374 if (skb_shinfo(skb)->gso_size) {
2375 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
2379 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
2380 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2384 if (skb_vlan_tag_present(skb))
2385 eohw_txq->vlan_ins++;
2387 txq_advance(&eohw_txq->q, ndesc);
2388 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
2389 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
2392 spin_unlock(&eohw_txq->lock);
2396 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
2398 struct sk_buff *skb;
2401 switch (eosw_txq->state) {
2402 case CXGB4_EO_STATE_ACTIVE:
2403 case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
2404 case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
2405 pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2407 pktcount += eosw_txq->ndesc;
2409 case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
2410 case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
2411 case CXGB4_EO_STATE_CLOSED:
2416 while (pktcount--) {
2417 skb = eosw_txq_peek(eosw_txq);
2419 eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
2424 ret = ethofld_hard_xmit(dev, eosw_txq);
2430 static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
2431 struct net_device *dev)
2433 struct cxgb4_tc_port_mqprio *tc_port_mqprio;
2434 struct port_info *pi = netdev2pinfo(dev);
2435 struct adapter *adap = netdev2adap(dev);
2436 struct sge_eosw_txq *eosw_txq;
2440 ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
2444 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
2445 qid = skb_get_queue_mapping(skb) - pi->nqsets;
2446 eosw_txq = &tc_port_mqprio->eosw_txq[qid];
2447 spin_lock_bh(&eosw_txq->lock);
2448 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2451 ret = eosw_txq_enqueue(eosw_txq, skb);
2455 /* SKB is queued for processing until credits are available.
2456 * So, call the destructor now and we'll free the skb later
2457 * after it has been successfully transmitted.
2461 eosw_txq_advance(eosw_txq, 1);
2462 ethofld_xmit(dev, eosw_txq);
2463 spin_unlock_bh(&eosw_txq->lock);
2464 return NETDEV_TX_OK;
2467 spin_unlock_bh(&eosw_txq->lock);
2469 dev_kfree_skb_any(skb);
2470 return NETDEV_TX_OK;
2473 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
2475 struct port_info *pi = netdev_priv(dev);
2476 u16 qid = skb_get_queue_mapping(skb);
2478 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
2479 return cxgb4_vf_eth_xmit(skb, dev);
2481 if (unlikely(qid >= pi->nqsets))
2482 return cxgb4_ethofld_xmit(skb, dev);
2484 if (is_ptp_enabled(skb, dev)) {
2485 struct adapter *adap = netdev2adap(dev);
2488 spin_lock(&adap->ptp_lock);
2489 ret = cxgb4_eth_xmit(skb, dev);
2490 spin_unlock(&adap->ptp_lock);
2494 return cxgb4_eth_xmit(skb, dev);
2497 static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
2499 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
2500 int pidx = eosw_txq->pidx;
2501 struct sk_buff *skb;
2507 pktcount += eosw_txq->ndesc;
2509 while (pktcount--) {
2512 pidx += eosw_txq->ndesc;
2514 skb = eosw_txq->desc[pidx].skb;
2516 dev_consume_skb_any(skb);
2517 eosw_txq->desc[pidx].skb = NULL;
2522 eosw_txq->pidx = eosw_txq->last_pidx + 1;
2526 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2528 * @eotid: ETHOFLD tid to bind/unbind
2529 * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
2531 * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
2532 * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
2535 int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
2537 struct port_info *pi = netdev2pinfo(dev);
2538 struct adapter *adap = netdev2adap(dev);
2539 enum sge_eosw_state next_state;
2540 struct sge_eosw_txq *eosw_txq;
2541 u32 len, len16, nparams = 6;
2542 struct fw_flowc_wr *flowc;
2543 struct eotid_entry *entry;
2544 struct sge_ofld_rxq *rxq;
2545 struct sk_buff *skb;
2548 len = struct_size(flowc, mnemval, nparams);
2549 len16 = DIV_ROUND_UP(len, 16);
2551 entry = cxgb4_lookup_eotid(&adap->tids, eotid);
2555 eosw_txq = (struct sge_eosw_txq *)entry->data;
2559 skb = alloc_skb(len, GFP_KERNEL);
2563 spin_lock_bh(&eosw_txq->lock);
2564 if (tc != FW_SCHED_CLS_NONE) {
2565 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
2568 next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
2570 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
2573 next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
2576 flowc = __skb_put(skb, len);
2577 memset(flowc, 0, len);
2579 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
2580 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
2581 FW_WR_FLOWID_V(eosw_txq->hwtid));
2582 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
2583 FW_FLOWC_WR_NPARAMS_V(nparams) |
2585 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
2586 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
2587 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
2588 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
2589 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
2590 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
2591 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
2592 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
2593 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
2594 flowc->mnemval[4].val = cpu_to_be32(tc);
2595 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
2596 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
2597 FW_FLOWC_MNEM_EOSTATE_CLOSING :
2598 FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
2600 /* Free up any pending skbs to ensure there's room for
2601 * termination FLOWC.
2603 if (tc == FW_SCHED_CLS_NONE)
2604 eosw_txq_flush_pending_skbs(eosw_txq);
2606 ret = eosw_txq_enqueue(eosw_txq, skb);
2608 dev_consume_skb_any(skb);
2612 eosw_txq->state = next_state;
2613 eosw_txq->flowc_idx = eosw_txq->pidx;
2614 eosw_txq_advance(eosw_txq, 1);
2615 ethofld_xmit(dev, eosw_txq);
2618 spin_unlock_bh(&eosw_txq->lock);
2623 * is_imm - check whether a packet can be sent as immediate data
2626 * Returns true if a packet can be sent as a WR with immediate data.
2628 static inline int is_imm(const struct sk_buff *skb)
2630 return skb->len <= MAX_CTRL_WR_LEN;
2634 * ctrlq_check_stop - check if a control queue is full and should stop
2636 * @wr: most recent WR written to the queue
2638 * Check if a control queue has become full and should be stopped.
2639 * We clean up control queue descriptors very lazily, only when we are out.
2640 * If the queue is still full after reclaiming any completed descriptors
2641 * we suspend it and have the last WR wake it up.
2643 static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
2645 reclaim_completed_tx_imm(&q->q);
2646 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2647 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2653 #define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST"
2655 int cxgb4_selftest_lb_pkt(struct net_device *netdev)
2657 struct port_info *pi = netdev_priv(netdev);
2658 struct adapter *adap = pi->adapter;
2659 struct cxgb4_ethtool_lb_test *lb;
2660 int ret, i = 0, pkt_len, credits;
2661 struct fw_eth_tx_pkt_wr *wr;
2662 struct cpl_tx_pkt_core *cpl;
2663 u32 ctrl0, ndesc, flits;
2664 struct sge_eth_txq *q;
2667 pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR);
2669 flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr),
2671 ndesc = flits_to_desc(flits);
2673 lb = &pi->ethtool_lb;
2676 q = &adap->sge.ethtxq[pi->first_qset];
2677 __netif_tx_lock(q->txq, smp_processor_id());
2679 reclaim_completed_tx(adap, &q->q, -1, true);
2680 credits = txq_avail(&q->q) - ndesc;
2681 if (unlikely(credits < 0)) {
2682 __netif_tx_unlock(q->txq);
2686 wr = (void *)&q->q.desc[q->q.pidx];
2687 memset(wr, 0, sizeof(struct tx_desc));
2689 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
2690 FW_WR_IMMDLEN_V(pkt_len +
2692 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)));
2693 wr->r3 = cpu_to_be64(0);
2695 cpl = (void *)(wr + 1);
2696 sgl = (u8 *)(cpl + 1);
2698 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) |
2699 TXPKT_INTF_V(pi->tx_chan + 4);
2701 cpl->ctrl0 = htonl(ctrl0);
2702 cpl->pack = htons(0);
2703 cpl->len = htons(pkt_len);
2704 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F);
2706 eth_broadcast_addr(sgl);
2708 ether_addr_copy(&sgl[i], netdev->dev_addr);
2711 snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s",
2712 CXGB4_SELFTEST_LB_STR);
2714 init_completion(&lb->completion);
2715 txq_advance(&q->q, ndesc);
2716 cxgb4_ring_tx_db(adap, &q->q, ndesc);
2717 __netif_tx_unlock(q->txq);
2719 /* wait for the pkt to return */
2720 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
2732 * ctrl_xmit - send a packet through an SGE control Tx queue
2733 * @q: the control queue
2736 * Send a packet through an SGE control Tx queue. Packets sent through
2737 * a control queue must fit entirely as immediate data.
2739 static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
2742 struct fw_wr_hdr *wr;
2744 if (unlikely(!is_imm(skb))) {
2747 return NET_XMIT_DROP;
2750 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
2751 spin_lock(&q->sendq.lock);
2753 if (unlikely(q->full)) {
2754 skb->priority = ndesc; /* save for restart */
2755 __skb_queue_tail(&q->sendq, skb);
2756 spin_unlock(&q->sendq.lock);
2760 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2761 cxgb4_inline_tx_skb(skb, &q->q, wr);
2763 txq_advance(&q->q, ndesc);
2764 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
2765 ctrlq_check_stop(q, wr);
2767 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
2768 spin_unlock(&q->sendq.lock);
2771 return NET_XMIT_SUCCESS;
2775 * restart_ctrlq - restart a suspended control queue
2776 * @t: pointer to the tasklet associated with this handler
2778 * Resumes transmission on a suspended Tx control queue.
2780 static void restart_ctrlq(struct tasklet_struct *t)
2782 struct sk_buff *skb;
2783 unsigned int written = 0;
2784 struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
2786 spin_lock(&q->sendq.lock);
2787 reclaim_completed_tx_imm(&q->q);
2788 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */
2790 while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
2791 struct fw_wr_hdr *wr;
2792 unsigned int ndesc = skb->priority; /* previously saved */
2795 /* Write descriptors and free skbs outside the lock to limit
2796 * wait times. q->full is still set so new skbs will be queued.
2798 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
2799 txq_advance(&q->q, ndesc);
2800 spin_unlock(&q->sendq.lock);
2802 cxgb4_inline_tx_skb(skb, &q->q, wr);
2805 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
2806 unsigned long old = q->q.stops;
2808 ctrlq_check_stop(q, wr);
2809 if (q->q.stops != old) { /* suspended anew */
2810 spin_lock(&q->sendq.lock);
2815 cxgb4_ring_tx_db(q->adap, &q->q, written);
2818 spin_lock(&q->sendq.lock);
2823 cxgb4_ring_tx_db(q->adap, &q->q, written);
2824 spin_unlock(&q->sendq.lock);
2828 * t4_mgmt_tx - send a management message
2829 * @adap: the adapter
2830 * @skb: the packet containing the management message
2832 * Send a management message through control queue 0.
2834 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
2839 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
2845 * is_ofld_imm - check whether a packet can be sent as immediate data
2848 * Returns true if a packet can be sent as an offload WR with immediate
2850 * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
2851 * However, FW_ULPTX_WR commands have a 256 byte immediate only
2854 static inline int is_ofld_imm(const struct sk_buff *skb)
2856 struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
2857 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
2859 if (unlikely(opcode == FW_ULPTX_WR))
2860 return skb->len <= MAX_IMM_ULPTX_WR_LEN;
2861 else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
2862 return skb->len <= SGE_MAX_WR_LEN;
2864 return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
2868 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2871 * Returns the number of flits needed for the given offload packet.
2872 * These packets are already fully constructed and no additional headers
2875 static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
2877 unsigned int flits, cnt;
2879 if (is_ofld_imm(skb))
2880 return DIV_ROUND_UP(skb->len, 8);
2882 flits = skb_transport_offset(skb) / 8U; /* headers */
2883 cnt = skb_shinfo(skb)->nr_frags;
2884 if (skb_tail_pointer(skb) != skb_transport_header(skb))
2886 return flits + sgl_len(cnt);
2890 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2891 * @q: the queue to stop
2893 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2894 * inability to map packets. A periodic timer attempts to restart
2897 static void txq_stop_maperr(struct sge_uld_txq *q)
2901 set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
2902 q->adap->sge.txq_maperr);
2906 * ofldtxq_stop - stop an offload Tx queue that has become full
2907 * @q: the queue to stop
2908 * @wr: the Work Request causing the queue to become full
2910 * Stops an offload Tx queue that has become full and modifies the packet
2911 * being written to request a wakeup.
2913 static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
2915 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
2921 * service_ofldq - service/restart a suspended offload queue
2922 * @q: the offload queue
2924 * Services an offload Tx queue by moving packets from its Pending Send
2925 * Queue to the Hardware TX ring. The function starts and ends with the
2926 * Send Queue locked, but drops the lock while putting the skb at the
2927 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2928 * allows more skbs to be added to the Send Queue by other threads.
2929 * The packet being processed at the head of the Pending Send Queue is
2930 * left on the queue in case we experience DMA Mapping errors, etc.
2931 * and need to give up and restart later.
2933 * service_ofldq() can be thought of as a task which opportunistically
2934 * uses other threads execution contexts. We use the Offload Queue
2935 * boolean "service_ofldq_running" to make sure that only one instance
2936 * is ever running at a time ...
2938 static void service_ofldq(struct sge_uld_txq *q)
2939 __must_hold(&q->sendq.lock)
2941 u64 *pos, *before, *end;
2943 struct sk_buff *skb;
2944 struct sge_txq *txq;
2946 unsigned int written = 0;
2947 unsigned int flits, ndesc;
2949 /* If another thread is currently in service_ofldq() processing the
2950 * Pending Send Queue then there's nothing to do. Otherwise, flag
2951 * that we're doing the work and continue. Examining/modifying
2952 * the Offload Queue boolean "service_ofldq_running" must be done
2953 * while holding the Pending Send Queue Lock.
2955 if (q->service_ofldq_running)
2957 q->service_ofldq_running = true;
2959 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
2960 /* We drop the lock while we're working with the skb at the
2961 * head of the Pending Send Queue. This allows more skbs to
2962 * be added to the Pending Send Queue while we're working on
2963 * this one. We don't need to lock to guard the TX Ring
2964 * updates because only one thread of execution is ever
2965 * allowed into service_ofldq() at a time.
2967 spin_unlock(&q->sendq.lock);
2969 cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
2971 flits = skb->priority; /* previously saved */
2972 ndesc = flits_to_desc(flits);
2973 credits = txq_avail(&q->q) - ndesc;
2974 BUG_ON(credits < 0);
2975 if (unlikely(credits < TXQ_STOP_THRES))
2976 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
2978 pos = (u64 *)&q->q.desc[q->q.pidx];
2979 if (is_ofld_imm(skb))
2980 cxgb4_inline_tx_skb(skb, &q->q, pos);
2981 else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
2982 (dma_addr_t *)skb->head)) {
2984 spin_lock(&q->sendq.lock);
2987 int last_desc, hdr_len = skb_transport_offset(skb);
2989 /* The WR headers may not fit within one descriptor.
2990 * So we need to deal with wrap-around here.
2992 before = (u64 *)pos;
2993 end = (u64 *)pos + flits;
2995 pos = (void *)inline_tx_skb_header(skb, &q->q,
2998 if (before > (u64 *)pos) {
2999 left = (u8 *)end - (u8 *)txq->stat;
3000 end = (void *)txq->desc + left;
3003 /* If current position is already at the end of the
3004 * ofld queue, reset the current to point to
3005 * start of the queue and update the end ptr as well.
3007 if (pos == (u64 *)txq->stat) {
3008 left = (u8 *)end - (u8 *)txq->stat;
3009 end = (void *)txq->desc + left;
3010 pos = (void *)txq->desc;
3013 cxgb4_write_sgl(skb, &q->q, (void *)pos,
3015 (dma_addr_t *)skb->head);
3016 #ifdef CONFIG_NEED_DMA_MAP_STATE
3017 skb->dev = q->adap->port[0];
3018 skb->destructor = deferred_unmap_destructor;
3020 last_desc = q->q.pidx + ndesc - 1;
3021 if (last_desc >= q->q.size)
3022 last_desc -= q->q.size;
3023 q->q.sdesc[last_desc].skb = skb;
3026 txq_advance(&q->q, ndesc);
3028 if (unlikely(written > 32)) {
3029 cxgb4_ring_tx_db(q->adap, &q->q, written);
3033 /* Reacquire the Pending Send Queue Lock so we can unlink the
3034 * skb we've just successfully transferred to the TX Ring and
3035 * loop for the next skb which may be at the head of the
3036 * Pending Send Queue.
3038 spin_lock(&q->sendq.lock);
3039 __skb_unlink(skb, &q->sendq);
3040 if (is_ofld_imm(skb))
3043 if (likely(written))
3044 cxgb4_ring_tx_db(q->adap, &q->q, written);
3046 /*Indicate that no thread is processing the Pending Send Queue
3049 q->service_ofldq_running = false;
3053 * ofld_xmit - send a packet through an offload queue
3054 * @q: the Tx offload queue
3057 * Send an offload packet through an SGE offload queue.
3059 static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
3061 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */
3062 spin_lock(&q->sendq.lock);
3064 /* Queue the new skb onto the Offload Queue's Pending Send Queue. If
3065 * that results in this new skb being the only one on the queue, start
3066 * servicing it. If there are other skbs already on the list, then
3067 * either the queue is currently being processed or it's been stopped
3068 * for some reason and it'll be restarted at a later time. Restart
3069 * paths are triggered by events like experiencing a DMA Mapping Error
3070 * or filling the Hardware TX Ring.
3072 __skb_queue_tail(&q->sendq, skb);
3073 if (q->sendq.qlen == 1)
3076 spin_unlock(&q->sendq.lock);
3077 return NET_XMIT_SUCCESS;
3081 * restart_ofldq - restart a suspended offload queue
3082 * @t: pointer to the tasklet associated with this handler
3084 * Resumes transmission on a suspended Tx offload queue.
3086 static void restart_ofldq(struct tasklet_struct *t)
3088 struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
3090 spin_lock(&q->sendq.lock);
3091 q->full = 0; /* the queue actually is completely empty now */
3093 spin_unlock(&q->sendq.lock);
3097 * skb_txq - return the Tx queue an offload packet should use
3100 * Returns the Tx queue an offload packet should use as indicated by bits
3101 * 1-15 in the packet's queue_mapping.
3103 static inline unsigned int skb_txq(const struct sk_buff *skb)
3105 return skb->queue_mapping >> 1;
3109 * is_ctrl_pkt - return whether an offload packet is a control packet
3112 * Returns whether an offload packet should use an OFLD or a CTRL
3113 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
3115 static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
3117 return skb->queue_mapping & 1;
3120 static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
3121 unsigned int tx_uld_type)
3123 struct sge_uld_txq_info *txq_info;
3124 struct sge_uld_txq *txq;
3125 unsigned int idx = skb_txq(skb);
3127 if (unlikely(is_ctrl_pkt(skb))) {
3128 /* Single ctrl queue is a requirement for LE workaround path */
3129 if (adap->tids.nsftids)
3131 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
3134 txq_info = adap->sge.uld_txq_info[tx_uld_type];
3135 if (unlikely(!txq_info)) {
3138 return NET_XMIT_DROP;
3141 txq = &txq_info->uldtxq[idx];
3142 return ofld_xmit(txq, skb);
3146 * t4_ofld_send - send an offload packet
3147 * @adap: the adapter
3150 * Sends an offload packet. We use the packet queue_mapping to select the
3151 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3152 * should be sent as regular or control, bits 1-15 select the queue.
3154 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
3159 ret = uld_send(adap, skb, CXGB4_TX_OFLD);
3165 * cxgb4_ofld_send - send an offload packet
3166 * @dev: the net device
3169 * Sends an offload packet. This is an exported version of @t4_ofld_send,
3170 * intended for ULDs.
3172 int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
3174 return t4_ofld_send(netdev2adap(dev), skb);
3176 EXPORT_SYMBOL(cxgb4_ofld_send);
3178 static void *inline_tx_header(const void *src,
3179 const struct sge_txq *q,
3180 void *pos, int length)
3182 int left = (void *)q->stat - pos;
3185 if (likely(length <= left)) {
3186 memcpy(pos, src, length);
3189 memcpy(pos, src, left);
3190 memcpy(q->desc, src + left, length - left);
3191 pos = (void *)q->desc + (length - left);
3193 /* 0-pad to multiple of 16 */
3194 p = PTR_ALIGN(pos, 8);
3195 if ((uintptr_t)p & 8) {
3203 * ofld_xmit_direct - copy a WR into offload queue
3204 * @q: the Tx offload queue
3205 * @src: location of WR
3208 * Copy an immediate WR into an uncontended SGE offload queue.
3210 static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
3217 /* Use the lower limit as the cut-off */
3218 if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
3220 return NET_XMIT_DROP;
3223 /* Don't return NET_XMIT_CN here as the current
3224 * implementation doesn't queue the request
3225 * using an skb when the following conditions not met
3227 if (!spin_trylock(&q->sendq.lock))
3228 return NET_XMIT_DROP;
3230 if (q->full || !skb_queue_empty(&q->sendq) ||
3231 q->service_ofldq_running) {
3232 spin_unlock(&q->sendq.lock);
3233 return NET_XMIT_DROP;
3235 ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
3236 credits = txq_avail(&q->q) - ndesc;
3237 pos = (u64 *)&q->q.desc[q->q.pidx];
3239 /* ofldtxq_stop modifies WR header in-situ */
3240 inline_tx_header(src, &q->q, pos, len);
3241 if (unlikely(credits < TXQ_STOP_THRES))
3242 ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
3243 txq_advance(&q->q, ndesc);
3244 cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
3246 spin_unlock(&q->sendq.lock);
3247 return NET_XMIT_SUCCESS;
3250 int cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
3251 const void *src, unsigned int len)
3253 struct sge_uld_txq_info *txq_info;
3254 struct sge_uld_txq *txq;
3255 struct adapter *adap;
3258 adap = netdev2adap(dev);
3261 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
3262 if (unlikely(!txq_info)) {
3265 return NET_XMIT_DROP;
3267 txq = &txq_info->uldtxq[idx];
3269 ret = ofld_xmit_direct(txq, src, len);
3271 return net_xmit_eval(ret);
3273 EXPORT_SYMBOL(cxgb4_immdata_send);
3276 * t4_crypto_send - send crypto packet
3277 * @adap: the adapter
3280 * Sends crypto packet. We use the packet queue_mapping to select the
3281 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3282 * should be sent as regular or control, bits 1-15 select the queue.
3284 static int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
3289 ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
3295 * cxgb4_crypto_send - send crypto packet
3296 * @dev: the net device
3299 * Sends crypto packet. This is an exported version of @t4_crypto_send,
3300 * intended for ULDs.
3302 int cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
3304 return t4_crypto_send(netdev2adap(dev), skb);
3306 EXPORT_SYMBOL(cxgb4_crypto_send);
3308 static inline void copy_frags(struct sk_buff *skb,
3309 const struct pkt_gl *gl, unsigned int offset)
3313 /* usually there's just one frag */
3314 __skb_fill_page_desc(skb, 0, gl->frags[0].page,
3315 gl->frags[0].offset + offset,
3316 gl->frags[0].size - offset);
3317 skb_shinfo(skb)->nr_frags = gl->nfrags;
3318 for (i = 1; i < gl->nfrags; i++)
3319 __skb_fill_page_desc(skb, i, gl->frags[i].page,
3320 gl->frags[i].offset,
3323 /* get a reference to the last page, we don't own it */
3324 get_page(gl->frags[gl->nfrags - 1].page);
3328 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3329 * @gl: the gather list
3330 * @skb_len: size of sk_buff main body if it carries fragments
3331 * @pull_len: amount of data to move to the sk_buff's main body
3333 * Builds an sk_buff from the given packet gather list. Returns the
3334 * sk_buff or %NULL if sk_buff allocation failed.
3336 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
3337 unsigned int skb_len, unsigned int pull_len)
3339 struct sk_buff *skb;
3342 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
3343 * size, which is expected since buffers are at least PAGE_SIZEd.
3344 * In this case packets up to RX_COPY_THRES have only one fragment.
3346 if (gl->tot_len <= RX_COPY_THRES) {
3347 skb = dev_alloc_skb(gl->tot_len);
3350 __skb_put(skb, gl->tot_len);
3351 skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
3353 skb = dev_alloc_skb(skb_len);
3356 __skb_put(skb, pull_len);
3357 skb_copy_to_linear_data(skb, gl->va, pull_len);
3359 copy_frags(skb, gl, pull_len);
3360 skb->len = gl->tot_len;
3361 skb->data_len = skb->len - pull_len;
3362 skb->truesize += skb->data_len;
3366 EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
3369 * t4_pktgl_free - free a packet gather list
3370 * @gl: the gather list
3372 * Releases the pages of a packet gather list. We do not own the last
3373 * page on the list and do not free it.
3375 static void t4_pktgl_free(const struct pkt_gl *gl)
3378 const struct page_frag *p;
3380 for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
3385 * Process an MPS trace packet. Give it an unused protocol number so it won't
3386 * be delivered to anyone and send it to the stack for capture.
3388 static noinline int handle_trace_pkt(struct adapter *adap,
3389 const struct pkt_gl *gl)
3391 struct sk_buff *skb;
3393 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
3394 if (unlikely(!skb)) {
3399 if (is_t4(adap->params.chip))
3400 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
3402 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
3404 skb_reset_mac_header(skb);
3405 skb->protocol = htons(0xffff);
3406 skb->dev = adap->port[0];
3407 netif_receive_skb(skb);
3412 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3413 * @adap: the adapter
3414 * @hwtstamps: time stamp structure to update
3415 * @sgetstamp: 60bit iqe timestamp
3417 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3418 * which is in Core Clock ticks into ktime_t and assign it
3420 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
3421 struct skb_shared_hwtstamps *hwtstamps,
3425 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
3427 ns = div_u64(tmp, adap->params.vpd.cclk);
3429 memset(hwtstamps, 0, sizeof(*hwtstamps));
3430 hwtstamps->hwtstamp = ns_to_ktime(ns);
3433 static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
3434 const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
3436 struct adapter *adapter = rxq->rspq.adap;
3437 struct sge *s = &adapter->sge;
3438 struct port_info *pi;
3440 struct sk_buff *skb;
3442 skb = napi_get_frags(&rxq->rspq.napi);
3443 if (unlikely(!skb)) {
3445 rxq->stats.rx_drops++;
3449 copy_frags(skb, gl, s->pktshift);
3451 skb->csum_level = 1;
3452 skb->len = gl->tot_len - s->pktshift;
3453 skb->data_len = skb->len;
3454 skb->truesize += skb->data_len;
3455 skb->ip_summed = CHECKSUM_UNNECESSARY;
3456 skb_record_rx_queue(skb, rxq->rspq.idx);
3457 pi = netdev_priv(skb->dev);
3459 cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
3461 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
3462 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3465 if (unlikely(pkt->vlan_ex)) {
3466 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3467 rxq->stats.vlan_ex++;
3469 ret = napi_gro_frags(&rxq->rspq.napi);
3470 if (ret == GRO_HELD)
3471 rxq->stats.lro_pkts++;
3472 else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
3473 rxq->stats.lro_merged++;
3475 rxq->stats.rx_cso++;
3485 * t4_systim_to_hwstamp - read hardware time stamp
3486 * @adapter: the adapter
3489 * Read Time Stamp from MPS packet and insert in skb which
3490 * is forwarded to PTP application
3492 static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
3493 struct sk_buff *skb)
3495 struct skb_shared_hwtstamps *hwtstamps;
3496 struct cpl_rx_mps_pkt *cpl = NULL;
3497 unsigned char *data;
3500 cpl = (struct cpl_rx_mps_pkt *)skb->data;
3501 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
3502 X_CPL_RX_MPS_PKT_TYPE_PTP))
3503 return RX_PTP_PKT_ERR;
3505 data = skb->data + sizeof(*cpl);
3506 skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
3507 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
3508 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
3509 return RX_PTP_PKT_ERR;
3511 hwtstamps = skb_hwtstamps(skb);
3512 memset(hwtstamps, 0, sizeof(*hwtstamps));
3513 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
3515 return RX_PTP_PKT_SUC;
3519 * t4_rx_hststamp - Recv PTP Event Message
3520 * @adapter: the adapter
3521 * @rsp: the response queue descriptor holding the RX_PKT message
3522 * @rxq: the response queue holding the RX_PKT message
3525 * PTP enabled and MPS packet, read HW timestamp
3527 static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
3528 struct sge_eth_rxq *rxq, struct sk_buff *skb)
3532 if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
3533 !is_t4(adapter->params.chip))) {
3534 ret = t4_systim_to_hwstamp(adapter, skb);
3535 if (ret == RX_PTP_PKT_ERR) {
3537 rxq->stats.rx_drops++;
3541 return RX_NON_PTP_PKT;
3545 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3546 * @adapter: the adapter
3548 * @dev: the ingress net device
3550 * Read hardware timestamp for the loopback PTP Tx event message
3552 static int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
3553 struct net_device *dev)
3555 struct port_info *pi = netdev_priv(dev);
3557 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
3558 cxgb4_ptp_read_hwstamp(adapter, pi);
3566 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3567 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3568 * @rsp: Response Entry pointer into Response Queue
3569 * @gl: Gather List pointer
3571 * For adapters which support the SGE Doorbell Queue Timer facility,
3572 * we configure the Ethernet TX Queues to send CIDX Updates to the
3573 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
3574 * messages. This adds a small load to PCIe Link RX bandwidth and,
3575 * potentially, higher CPU Interrupt load, but allows us to respond
3576 * much more quickly to the CIDX Updates. This is important for
3577 * Upper Layer Software which isn't willing to have a large amount
3578 * of TX Data outstanding before receiving DMA Completions.
3580 static void t4_tx_completion_handler(struct sge_rspq *rspq,
3582 const struct pkt_gl *gl)
3584 u8 opcode = ((const struct rss_header *)rsp)->opcode;
3585 struct port_info *pi = netdev_priv(rspq->netdev);
3586 struct adapter *adapter = rspq->adap;
3587 struct sge *s = &adapter->sge;
3588 struct sge_eth_txq *txq;
3590 /* skip RSS header */
3593 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
3595 if (unlikely(opcode == CPL_FW4_MSG &&
3596 ((const struct cpl_fw4_msg *)rsp)->type ==
3599 opcode = ((const struct rss_header *)rsp)->opcode;
3603 if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
3604 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
3609 txq = &s->ethtxq[pi->first_qset + rspq->idx];
3611 /* We've got the Hardware Consumer Index Update in the Egress Update
3612 * message. These Egress Update messages will be our sole CIDX Updates
3613 * we get since we don't want to chew up PCIe bandwidth for both Ingress
3614 * Messages and Status Page writes. However, The code which manages
3615 * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
3616 * stored in the Status Page at the end of the TX Queue. It's easiest
3617 * to simply copy the CIDX Update value from the Egress Update message
3618 * to the Status Page. Also note that no Endian issues need to be
3619 * considered here since both are Big Endian and we're just copying
3620 * bytes consistently ...
3622 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
3623 struct cpl_sge_egr_update *egr;
3625 egr = (struct cpl_sge_egr_update *)rsp;
3626 WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
3629 t4_sge_eth_txq_egress_update(adapter, txq, -1);
3632 static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si)
3634 struct adapter *adap = pi->adapter;
3635 struct cxgb4_ethtool_lb_test *lb;
3636 struct sge *s = &adap->sge;
3637 struct net_device *netdev;
3641 netdev = adap->port[pi->port_id];
3642 lb = &pi->ethtool_lb;
3643 data = si->va + s->pktshift;
3646 if (!ether_addr_equal(data + i, netdev->dev_addr))
3650 if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR))
3653 complete(&lb->completion);
3658 * t4_ethrx_handler - process an ingress ethernet packet
3659 * @q: the response queue that received the packet
3660 * @rsp: the response queue descriptor holding the RX_PKT message
3661 * @si: the gather list of packet fragments
3663 * Process an ingress ethernet packet and deliver it to the stack.
3665 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
3666 const struct pkt_gl *si)
3669 struct sk_buff *skb;
3670 const struct cpl_rx_pkt *pkt;
3671 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3672 struct adapter *adapter = q->adap;
3673 struct sge *s = &q->adap->sge;
3674 int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
3675 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
3676 u16 err_vec, tnl_hdr_len = 0;
3677 struct port_info *pi;
3680 pi = netdev_priv(q->netdev);
3681 /* If we're looking at TX Queue CIDX Update, handle that separately
3684 if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
3685 (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
3686 t4_tx_completion_handler(q, rsp, si);
3690 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
3691 return handle_trace_pkt(q->adap, si);
3693 pkt = (const struct cpl_rx_pkt *)rsp;
3694 /* Compressed error vector is enabled for T6 only */
3695 if (q->adap->params.tp.rx_pkt_encap) {
3696 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
3697 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
3699 err_vec = be16_to_cpu(pkt->err_vec);
3702 csum_ok = pkt->csum_calc && !err_vec &&
3703 (q->netdev->features & NETIF_F_RXCSUM);
3706 rxq->stats.bad_rx_pkts++;
3708 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) {
3709 ret = cxgb4_validate_lb_pkt(pi, si);
3714 if (((pkt->l2info & htonl(RXF_TCP_F)) ||
3716 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
3717 do_gro(rxq, si, pkt, tnl_hdr_len);
3721 skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
3722 if (unlikely(!skb)) {
3724 rxq->stats.rx_drops++;
3728 /* Handle PTP Event Rx packet */
3729 if (unlikely(pi->ptp_enable)) {
3730 ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
3731 if (ret == RX_PTP_PKT_ERR)
3735 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */
3737 /* Handle the PTP Event Tx Loopback packet */
3738 if (unlikely(pi->ptp_enable && !ret &&
3739 (pkt->l2info & htonl(RXF_UDP_F)) &&
3740 cxgb4_ptp_is_ptp_rx(skb))) {
3741 if (!t4_tx_hststamp(adapter, skb, q->netdev))
3745 skb->protocol = eth_type_trans(skb, q->netdev);
3746 skb_record_rx_queue(skb, q->idx);
3747 if (skb->dev->features & NETIF_F_RXHASH)
3748 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
3754 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
3756 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
3757 if (!pkt->ip_frag) {
3758 skb->ip_summed = CHECKSUM_UNNECESSARY;
3759 rxq->stats.rx_cso++;
3760 } else if (pkt->l2info & htonl(RXF_IP_F)) {
3761 __sum16 c = (__force __sum16)pkt->csum;
3762 skb->csum = csum_unfold(c);
3765 skb->ip_summed = CHECKSUM_UNNECESSARY;
3766 skb->csum_level = 1;
3768 skb->ip_summed = CHECKSUM_COMPLETE;
3770 rxq->stats.rx_cso++;
3773 skb_checksum_none_assert(skb);
3774 #ifdef CONFIG_CHELSIO_T4_FCOE
3775 #define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
3776 RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
3778 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
3779 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
3780 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
3781 if (q->adap->params.tp.rx_pkt_encap)
3783 T6_COMPR_RXERR_SUM_F;
3785 csum_ok = err_vec & RXERR_CSUM_F;
3787 skb->ip_summed = CHECKSUM_UNNECESSARY;
3791 #undef CPL_RX_PKT_FLAGS
3792 #endif /* CONFIG_CHELSIO_T4_FCOE */
3795 if (unlikely(pkt->vlan_ex)) {
3796 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
3797 rxq->stats.vlan_ex++;
3799 skb_mark_napi_id(skb, &q->napi);
3800 netif_receive_skb(skb);
3805 * restore_rx_bufs - put back a packet's Rx buffers
3806 * @si: the packet gather list
3807 * @q: the SGE free list
3808 * @frags: number of FL buffers to restore
3810 * Puts back on an FL the Rx buffers associated with @si. The buffers
3811 * have already been unmapped and are left unmapped, we mark them so to
3812 * prevent further unmapping attempts.
3814 * This function undoes a series of @unmap_rx_buf calls when we find out
3815 * that the current packet can't be processed right away afterall and we
3816 * need to come back to it later. This is a very rare event and there's
3817 * no effort to make this particularly efficient.
3819 static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
3822 struct rx_sw_desc *d;
3826 q->cidx = q->size - 1;
3829 d = &q->sdesc[q->cidx];
3830 d->page = si->frags[frags].page;
3831 d->dma_addr |= RX_UNMAPPED_BUF;
3837 * is_new_response - check if a response is newly written
3838 * @r: the response descriptor
3839 * @q: the response queue
3841 * Returns true if a response descriptor contains a yet unprocessed
3844 static inline bool is_new_response(const struct rsp_ctrl *r,
3845 const struct sge_rspq *q)
3847 return (r->type_gen >> RSPD_GEN_S) == q->gen;
3851 * rspq_next - advance to the next entry in a response queue
3854 * Updates the state of a response queue to advance it to the next entry.
3856 static inline void rspq_next(struct sge_rspq *q)
3858 q->cur_desc = (void *)q->cur_desc + q->iqe_len;
3859 if (unlikely(++q->cidx == q->size)) {
3862 q->cur_desc = q->desc;
3867 * process_responses - process responses from an SGE response queue
3868 * @q: the ingress queue to process
3869 * @budget: how many responses can be processed in this round
3871 * Process responses from an SGE response queue up to the supplied budget.
3872 * Responses include received packets as well as control messages from FW
3875 * Additionally choose the interrupt holdoff time for the next interrupt
3876 * on this queue. If the system is under memory shortage use a fairly
3877 * long delay to help recovery.
3879 static int process_responses(struct sge_rspq *q, int budget)
3882 int budget_left = budget;
3883 const struct rsp_ctrl *rc;
3884 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
3885 struct adapter *adapter = q->adap;
3886 struct sge *s = &adapter->sge;
3888 while (likely(budget_left)) {
3889 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
3890 if (!is_new_response(rc, q)) {
3891 if (q->flush_handler)
3892 q->flush_handler(q);
3897 rsp_type = RSPD_TYPE_G(rc->type_gen);
3898 if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
3899 struct page_frag *fp;
3901 const struct rx_sw_desc *rsd;
3902 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
3904 if (len & RSPD_NEWBUF_F) {
3905 if (likely(q->offset > 0)) {
3906 free_rx_bufs(q->adap, &rxq->fl, 1);
3909 len = RSPD_LEN_G(len);
3913 /* gather packet fragments */
3914 for (frags = 0, fp = si.frags; ; frags++, fp++) {
3915 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
3916 bufsz = get_buf_size(adapter, rsd);
3917 fp->page = rsd->page;
3918 fp->offset = q->offset;
3919 fp->size = min(bufsz, len);
3923 unmap_rx_buf(q->adap, &rxq->fl);
3926 si.sgetstamp = SGE_TIMESTAMP_G(
3927 be64_to_cpu(rc->last_flit));
3929 * Last buffer remains mapped so explicitly make it
3930 * coherent for CPU access.
3932 dma_sync_single_for_cpu(q->adap->pdev_dev,
3934 fp->size, DMA_FROM_DEVICE);
3936 si.va = page_address(si.frags[0].page) +
3940 si.nfrags = frags + 1;
3941 ret = q->handler(q, q->cur_desc, &si);
3942 if (likely(ret == 0))
3943 q->offset += ALIGN(fp->size, s->fl_align);
3945 restore_rx_bufs(&si, &rxq->fl, frags);
3946 } else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
3947 ret = q->handler(q, q->cur_desc, NULL);
3949 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
3952 if (unlikely(ret)) {
3953 /* couldn't process descriptor, back off for recovery */
3954 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
3962 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
3963 __refill_fl(q->adap, &rxq->fl);
3964 return budget - budget_left;
3968 * napi_rx_handler - the NAPI handler for Rx processing
3969 * @napi: the napi instance
3970 * @budget: how many packets we can process in this round
3972 * Handler for new data events when using NAPI. This does not need any
3973 * locking or protection from interrupts as data interrupts are off at
3974 * this point and other adapter interrupts do not interfere (the latter
3975 * in not a concern at all with MSI-X as non-data interrupts then have
3976 * a separate handler).
3978 static int napi_rx_handler(struct napi_struct *napi, int budget)
3980 unsigned int params;
3981 struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
3985 work_done = process_responses(q, budget);
3986 if (likely(work_done < budget)) {
3989 napi_complete_done(napi, work_done);
3990 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
3992 if (q->adaptive_rx) {
3993 if (work_done > max(timer_pkt_quota[timer_index],
3995 timer_index = (timer_index + 1);
3997 timer_index = timer_index - 1;
3999 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
4000 q->next_intr_params =
4001 QINTR_TIMER_IDX_V(timer_index) |
4003 params = q->next_intr_params;
4005 params = q->next_intr_params;
4006 q->next_intr_params = q->intr_params;
4009 params = QINTR_TIMER_IDX_V(7);
4011 val = CIDXINC_V(work_done) | SEINTARM_V(params);
4013 /* If we don't have access to the new User GTS (T5+), use the old
4014 * doorbell mechanism; otherwise use the new BAR2 mechanism.
4016 if (unlikely(q->bar2_addr == NULL)) {
4017 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
4018 val | INGRESSQID_V((u32)q->cntxt_id));
4020 writel(val | INGRESSQID_V(q->bar2_qid),
4021 q->bar2_addr + SGE_UDB_GTS);
4027 void cxgb4_ethofld_restart(struct tasklet_struct *t)
4029 struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t,
4033 spin_lock(&eosw_txq->lock);
4034 pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
4036 pktcount += eosw_txq->ndesc;
4039 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
4040 eosw_txq, pktcount);
4041 eosw_txq->inuse -= pktcount;
4044 /* There may be some packets waiting for completions. So,
4045 * attempt to send these packets now.
4047 ethofld_xmit(eosw_txq->netdev, eosw_txq);
4048 spin_unlock(&eosw_txq->lock);
4051 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
4052 * @q: the response queue that received the packet
4053 * @rsp: the response queue descriptor holding the CPL message
4054 * @si: the gather list of packet fragments
4056 * Process a ETHOFLD Tx completion. Increment the cidx here, but
4057 * free up the descriptors in a tasklet later.
4059 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
4060 const struct pkt_gl *si)
4062 u8 opcode = ((const struct rss_header *)rsp)->opcode;
4064 /* skip RSS header */
4067 if (opcode == CPL_FW4_ACK) {
4068 const struct cpl_fw4_ack *cpl;
4069 struct sge_eosw_txq *eosw_txq;
4070 struct eotid_entry *entry;
4071 struct sk_buff *skb;
4076 cpl = (const struct cpl_fw4_ack *)rsp;
4077 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
4078 q->adap->tids.eotid_base;
4079 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
4083 eosw_txq = (struct sge_eosw_txq *)entry->data;
4087 spin_lock(&eosw_txq->lock);
4088 credits = cpl->credits;
4089 while (credits > 0) {
4090 skb = eosw_txq->desc[eosw_txq->cidx].skb;
4094 if (unlikely((eosw_txq->state ==
4095 CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
4097 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
4098 eosw_txq->cidx == eosw_txq->flowc_idx)) {
4099 flits = DIV_ROUND_UP(skb->len, 8);
4100 if (eosw_txq->state ==
4101 CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
4102 eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
4104 eosw_txq->state = CXGB4_EO_STATE_CLOSED;
4105 complete(&eosw_txq->completion);
4107 hdr_len = eth_get_headlen(eosw_txq->netdev,
4110 flits = ethofld_calc_tx_flits(q->adap, skb,
4113 eosw_txq_advance_index(&eosw_txq->cidx, 1,
4115 wrlen16 = DIV_ROUND_UP(flits * 8, 16);
4119 eosw_txq->cred += cpl->credits;
4122 spin_unlock(&eosw_txq->lock);
4124 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
4125 * if there were packets waiting for completion.
4127 tasklet_schedule(&eosw_txq->qresume_tsk);
4135 * The MSI-X interrupt handler for an SGE response queue.
4137 irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
4139 struct sge_rspq *q = cookie;
4141 napi_schedule(&q->napi);
4146 * Process the indirect interrupt entries in the interrupt queue and kick off
4147 * NAPI for each queue that has generated an entry.
4149 static unsigned int process_intrq(struct adapter *adap)
4151 unsigned int credits;
4152 const struct rsp_ctrl *rc;
4153 struct sge_rspq *q = &adap->sge.intrq;
4156 spin_lock(&adap->sge.intrq_lock);
4157 for (credits = 0; ; credits++) {
4158 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
4159 if (!is_new_response(rc, q))
4163 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
4164 unsigned int qid = ntohl(rc->pldbuflen_qid);
4166 qid -= adap->sge.ingr_start;
4167 napi_schedule(&adap->sge.ingr_map[qid]->napi);
4173 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
4175 /* If we don't have access to the new User GTS (T5+), use the old
4176 * doorbell mechanism; otherwise use the new BAR2 mechanism.
4178 if (unlikely(q->bar2_addr == NULL)) {
4179 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
4180 val | INGRESSQID_V(q->cntxt_id));
4182 writel(val | INGRESSQID_V(q->bar2_qid),
4183 q->bar2_addr + SGE_UDB_GTS);
4186 spin_unlock(&adap->sge.intrq_lock);
4191 * The MSI interrupt handler, which handles data events from SGE response queues
4192 * as well as error and other async events as they all use the same MSI vector.
4194 static irqreturn_t t4_intr_msi(int irq, void *cookie)
4196 struct adapter *adap = cookie;
4198 if (adap->flags & CXGB4_MASTER_PF)
4199 t4_slow_intr_handler(adap);
4200 process_intrq(adap);
4205 * Interrupt handler for legacy INTx interrupts.
4206 * Handles data events from SGE response queues as well as error and other
4207 * async events as they all use the same interrupt line.
4209 static irqreturn_t t4_intr_intx(int irq, void *cookie)
4211 struct adapter *adap = cookie;
4213 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
4214 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
4215 process_intrq(adap))
4217 return IRQ_NONE; /* probably shared interrupt */
4221 * t4_intr_handler - select the top-level interrupt handler
4222 * @adap: the adapter
4224 * Selects the top-level interrupt handler based on the type of interrupts
4225 * (MSI-X, MSI, or INTx).
4227 irq_handler_t t4_intr_handler(struct adapter *adap)
4229 if (adap->flags & CXGB4_USING_MSIX)
4230 return t4_sge_intr_msix;
4231 if (adap->flags & CXGB4_USING_MSI)
4233 return t4_intr_intx;
4236 static void sge_rx_timer_cb(struct timer_list *t)
4240 struct adapter *adap = from_timer(adap, t, sge.rx_timer);
4241 struct sge *s = &adap->sge;
4243 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
4244 for (m = s->starving_fl[i]; m; m &= m - 1) {
4245 struct sge_eth_rxq *rxq;
4246 unsigned int id = __ffs(m) + i * BITS_PER_LONG;
4247 struct sge_fl *fl = s->egr_map[id];
4249 clear_bit(id, s->starving_fl);
4250 smp_mb__after_atomic();
4252 if (fl_starving(adap, fl)) {
4253 rxq = container_of(fl, struct sge_eth_rxq, fl);
4254 if (napi_reschedule(&rxq->rspq.napi))
4257 set_bit(id, s->starving_fl);
4260 /* The remainder of the SGE RX Timer Callback routine is dedicated to
4261 * global Master PF activities like checking for chip ingress stalls,
4264 if (!(adap->flags & CXGB4_MASTER_PF))
4267 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
4270 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
4273 static void sge_tx_timer_cb(struct timer_list *t)
4275 struct adapter *adap = from_timer(adap, t, sge.tx_timer);
4276 struct sge *s = &adap->sge;
4277 unsigned long m, period;
4278 unsigned int i, budget;
4280 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
4281 for (m = s->txq_maperr[i]; m; m &= m - 1) {
4282 unsigned long id = __ffs(m) + i * BITS_PER_LONG;
4283 struct sge_uld_txq *txq = s->egr_map[id];
4285 clear_bit(id, s->txq_maperr);
4286 tasklet_schedule(&txq->qresume_tsk);
4289 if (!is_t4(adap->params.chip)) {
4290 struct sge_eth_txq *q = &s->ptptxq;
4293 spin_lock(&adap->ptp_lock);
4294 avail = reclaimable(&q->q);
4297 free_tx_desc(adap, &q->q, avail, false);
4298 q->q.in_use -= avail;
4300 spin_unlock(&adap->ptp_lock);
4303 budget = MAX_TIMER_TX_RECLAIM;
4304 i = s->ethtxq_rover;
4306 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
4311 if (++i >= s->ethqsets)
4313 } while (i != s->ethtxq_rover);
4314 s->ethtxq_rover = i;
4317 /* If we found too many reclaimable packets schedule a timer
4318 * in the near future to continue where we left off.
4322 /* We reclaimed all reclaimable TX Descriptors, so reschedule
4323 * at the normal period.
4325 period = TX_QCHECK_PERIOD;
4328 mod_timer(&s->tx_timer, jiffies + period);
4332 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4333 * @adapter: the adapter
4334 * @qid: the SGE Queue ID
4335 * @qtype: the SGE Queue Type (Egress or Ingress)
4336 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4338 * Returns the BAR2 address for the SGE Queue Registers associated with
4339 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
4340 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
4341 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
4342 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
4344 static void __iomem *bar2_address(struct adapter *adapter,
4346 enum t4_bar2_qtype qtype,
4347 unsigned int *pbar2_qid)
4352 ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
4353 &bar2_qoffset, pbar2_qid);
4357 return adapter->bar2 + bar2_qoffset;
4360 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4361 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4363 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
4364 struct net_device *dev, int intr_idx,
4365 struct sge_fl *fl, rspq_handler_t hnd,
4366 rspq_flush_handler_t flush_hnd, int cong)
4370 struct sge *s = &adap->sge;
4371 struct port_info *pi = netdev_priv(dev);
4372 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
4374 /* Size needs to be multiple of 16, including status entry. */
4375 iq->size = roundup(iq->size, 16);
4377 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
4378 &iq->phys_addr, NULL, 0,
4379 dev_to_node(adap->pdev_dev));
4383 memset(&c, 0, sizeof(c));
4384 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
4385 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4386 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
4387 c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
4389 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
4390 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
4391 FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
4392 FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
4393 FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
4395 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
4396 FW_IQ_CMD_IQGTSMODE_F |
4397 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
4398 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
4399 c.iqsize = htons(iq->size);
4400 c.iqaddr = cpu_to_be64(iq->phys_addr);
4402 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
4403 FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
4404 : FW_IQ_IQTYPE_OFLD));
4407 unsigned int chip_ver =
4408 CHELSIO_CHIP_VERSION(adap->params.chip);
4410 /* Allocate the ring for the hardware free list (with space
4411 * for its status page) along with the associated software
4412 * descriptor ring. The free list size needs to be a multiple
4413 * of the Egress Queue Unit and at least 2 Egress Units larger
4414 * than the SGE's Egress Congrestion Threshold
4415 * (fl_starve_thres - 1).
4417 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
4418 fl->size = s->fl_starve_thres - 1 + 2 * 8;
4419 fl->size = roundup(fl->size, 8);
4420 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
4421 sizeof(struct rx_sw_desc), &fl->addr,
4422 &fl->sdesc, s->stat_len,
4423 dev_to_node(adap->pdev_dev));
4427 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
4428 c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
4429 FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
4430 FW_IQ_CMD_FL0DATARO_V(relaxed) |
4431 FW_IQ_CMD_FL0PADEN_F);
4433 c.iqns_to_fl0congen |=
4434 htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
4435 FW_IQ_CMD_FL0CONGCIF_F |
4436 FW_IQ_CMD_FL0CONGEN_F);
4437 /* In T6, for egress queue type FL there is internal overhead
4438 * of 16B for header going into FLM module. Hence the maximum
4439 * allowed burst size is 448 bytes. For T4/T5, the hardware
4440 * doesn't coalesce fetch requests if more than 64 bytes of
4441 * Free List pointers are provided, so we use a 128-byte Fetch
4442 * Burst Minimum there (T6 implements coalescing so we can use
4443 * the smaller 64-byte value there).
4445 c.fl0dcaen_to_fl0cidxfthresh =
4446 htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
4447 FETCHBURSTMIN_128B_X :
4448 FETCHBURSTMIN_64B_T6_X) |
4449 FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
4450 FETCHBURSTMAX_512B_X :
4451 FETCHBURSTMAX_256B_X));
4452 c.fl0size = htons(flsz);
4453 c.fl0addr = cpu_to_be64(fl->addr);
4456 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4460 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
4461 iq->cur_desc = iq->desc;
4464 iq->next_intr_params = iq->intr_params;
4465 iq->cntxt_id = ntohs(c.iqid);
4466 iq->abs_id = ntohs(c.physiqid);
4467 iq->bar2_addr = bar2_address(adap,
4469 T4_BAR2_QTYPE_INGRESS,
4471 iq->size--; /* subtract status entry */
4474 iq->flush_handler = flush_hnd;
4476 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
4477 skb_queue_head_init(&iq->lro_mgr.lroq);
4479 /* set offset to -1 to distinguish ingress queues without FL */
4480 iq->offset = fl ? 0 : -1;
4482 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
4485 fl->cntxt_id = ntohs(c.fl0id);
4486 fl->avail = fl->pend_cred = 0;
4487 fl->pidx = fl->cidx = 0;
4488 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
4489 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
4491 /* Note, we must initialize the BAR2 Free List User Doorbell
4492 * information before refilling the Free List!
4494 fl->bar2_addr = bar2_address(adap,
4496 T4_BAR2_QTYPE_EGRESS,
4498 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
4501 /* For T5 and later we attempt to set up the Congestion Manager values
4502 * of the new RX Ethernet Queue. This should really be handled by
4503 * firmware because it's more complex than any host driver wants to
4504 * get involved with and it's different per chip and this is almost
4505 * certainly wrong. Firmware would be wrong as well, but it would be
4506 * a lot easier to fix in one place ... For now we do something very
4507 * simple (and hopefully less wrong).
4509 if (!is_t4(adap->params.chip) && cong >= 0) {
4510 u32 param, val, ch_map = 0;
4512 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
4514 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4515 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
4516 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
4518 val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
4521 CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
4522 for (i = 0; i < 4; i++) {
4523 if (cong & (1 << i))
4524 ch_map |= 1 << (i << cng_ch_bits_log);
4526 val |= CONMCTXT_CNGCHMAP_V(ch_map);
4528 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
4531 dev_warn(adap->pdev_dev, "Failed to set Congestion"
4532 " Manager Context for Ingress Queue %d: %d\n",
4533 iq->cntxt_id, -ret);
4542 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
4543 iq->desc, iq->phys_addr);
4546 if (fl && fl->desc) {
4549 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
4550 fl->desc, fl->addr);
4556 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
4559 q->bar2_addr = bar2_address(adap,
4561 T4_BAR2_QTYPE_EGRESS,
4564 q->cidx = q->pidx = 0;
4565 q->stops = q->restarts = 0;
4566 q->stat = (void *)&q->desc[q->size];
4567 spin_lock_init(&q->db_lock);
4568 adap->sge.egr_map[id - adap->sge.egr_start] = q;
4572 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4573 * @adap: the adapter
4574 * @txq: the SGE Ethernet TX Queue to initialize
4575 * @dev: the Linux Network Device
4576 * @netdevq: the corresponding Linux TX Queue
4577 * @iqid: the Ingress Queue to which to deliver CIDX Update messages
4578 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
4580 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
4581 struct net_device *dev, struct netdev_queue *netdevq,
4582 unsigned int iqid, u8 dbqt)
4584 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4585 struct port_info *pi = netdev_priv(dev);
4586 struct sge *s = &adap->sge;
4587 struct fw_eq_eth_cmd c;
4590 /* Add status entries */
4591 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4593 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
4594 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
4595 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
4596 netdev_queue_numa_node_read(netdevq));
4600 memset(&c, 0, sizeof(c));
4601 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
4602 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4603 FW_EQ_ETH_CMD_PFN_V(adap->pf) |
4604 FW_EQ_ETH_CMD_VFN_V(0));
4605 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
4606 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
4608 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer
4609 * mechanism, we use Ingress Queue messages for Hardware Consumer
4610 * Index Updates on the TX Queue. Otherwise we have the Hardware
4611 * write the CIDX Updates into the Status Page at the end of the
4614 c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ?
4615 FW_EQ_ETH_CMD_AUTOEQUIQE_F :
4616 FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
4617 FW_EQ_ETH_CMD_VIID_V(pi->viid));
4619 c.fetchszm_to_iqid =
4620 htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ?
4621 HOSTFCMODE_INGRESS_QUEUE_X :
4622 HOSTFCMODE_STATUS_PAGE_X) |
4623 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
4624 FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
4626 /* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
4628 htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4629 ? FETCHBURSTMIN_64B_X
4630 : FETCHBURSTMIN_64B_T6_X) |
4631 FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4632 FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4633 FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) |
4634 FW_EQ_ETH_CMD_EQSIZE_V(nentries));
4636 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4638 /* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
4639 * currently configured Timer Index. THis can be changed later via an
4640 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE
4641 * Doorbell Queue mode is currently automatically enabled in the
4642 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
4646 cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
4647 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
4649 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4651 kfree(txq->q.sdesc);
4652 txq->q.sdesc = NULL;
4653 dma_free_coherent(adap->pdev_dev,
4654 nentries * sizeof(struct tx_desc),
4655 txq->q.desc, txq->q.phys_addr);
4660 txq->q.q_type = CXGB4_TXQ_ETH;
4661 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
4667 txq->mapping_err = 0;
4673 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
4674 struct net_device *dev, unsigned int iqid,
4675 unsigned int cmplqid)
4677 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4678 struct port_info *pi = netdev_priv(dev);
4679 struct sge *s = &adap->sge;
4680 struct fw_eq_ctrl_cmd c;
4683 /* Add status entries */
4684 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
4686 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
4687 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
4688 NULL, 0, dev_to_node(adap->pdev_dev));
4692 c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
4693 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4694 FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
4695 FW_EQ_CTRL_CMD_VFN_V(0));
4696 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
4697 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
4698 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
4699 c.physeqid_pkd = htonl(0);
4700 c.fetchszm_to_iqid =
4701 htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4702 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
4703 FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
4705 htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
4706 ? FETCHBURSTMIN_64B_X
4707 : FETCHBURSTMIN_64B_T6_X) |
4708 FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4709 FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4710 FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
4711 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
4713 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4715 dma_free_coherent(adap->pdev_dev,
4716 nentries * sizeof(struct tx_desc),
4717 txq->q.desc, txq->q.phys_addr);
4722 txq->q.q_type = CXGB4_TXQ_CTRL;
4723 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
4725 skb_queue_head_init(&txq->sendq);
4726 tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
4731 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
4732 unsigned int cmplqid)
4736 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
4737 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
4738 FW_PARAMS_PARAM_YZ_V(eqid));
4740 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val);
4743 static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
4744 struct net_device *dev, u32 cmd, u32 iqid)
4746 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
4747 struct port_info *pi = netdev_priv(dev);
4748 struct sge *s = &adap->sge;
4749 struct fw_eq_ofld_cmd c;
4750 u32 fb_min, nentries;
4753 /* Add status entries */
4754 nentries = q->size + s->stat_len / sizeof(struct tx_desc);
4755 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
4756 sizeof(struct tx_sw_desc), &q->phys_addr,
4757 &q->sdesc, s->stat_len, NUMA_NO_NODE);
4761 if (chip_ver <= CHELSIO_T5)
4762 fb_min = FETCHBURSTMIN_64B_X;
4764 fb_min = FETCHBURSTMIN_64B_T6_X;
4766 memset(&c, 0, sizeof(c));
4767 c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
4768 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
4769 FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
4770 FW_EQ_OFLD_CMD_VFN_V(0));
4771 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
4772 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
4773 c.fetchszm_to_iqid =
4774 htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
4775 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
4776 FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
4778 htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
4779 FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
4780 FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
4781 FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
4782 c.eqaddr = cpu_to_be64(q->phys_addr);
4784 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
4788 dma_free_coherent(adap->pdev_dev,
4789 nentries * sizeof(struct tx_desc),
4790 q->desc, q->phys_addr);
4795 init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
4799 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
4800 struct net_device *dev, unsigned int iqid,
4801 unsigned int uld_type)
4803 u32 cmd = FW_EQ_OFLD_CMD;
4806 if (unlikely(uld_type == CXGB4_TX_CRYPTO))
4807 cmd = FW_EQ_CTRL_CMD;
4809 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
4813 txq->q.q_type = CXGB4_TXQ_ULD;
4815 skb_queue_head_init(&txq->sendq);
4816 tasklet_setup(&txq->qresume_tsk, restart_ofldq);
4818 txq->mapping_err = 0;
4822 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
4823 struct net_device *dev, u32 iqid)
4827 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
4831 txq->q.q_type = CXGB4_TXQ_ULD;
4832 spin_lock_init(&txq->lock);
4838 txq->mapping_err = 0;
4842 void free_txq(struct adapter *adap, struct sge_txq *q)
4844 struct sge *s = &adap->sge;
4846 dma_free_coherent(adap->pdev_dev,
4847 q->size * sizeof(struct tx_desc) + s->stat_len,
4848 q->desc, q->phys_addr);
4854 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
4857 struct sge *s = &adap->sge;
4858 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
4860 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
4861 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
4862 rq->cntxt_id, fl_id, 0xffff);
4863 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
4864 rq->desc, rq->phys_addr);
4865 netif_napi_del(&rq->napi);
4867 rq->cntxt_id = rq->abs_id = 0;
4871 free_rx_bufs(adap, fl, fl->avail);
4872 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
4873 fl->desc, fl->addr);
4882 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4883 * @adap: the adapter
4884 * @n: number of queues
4885 * @q: pointer to first queue
4887 * Release the resources of a consecutive block of offload Rx queues.
4889 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
4891 for ( ; n; n--, q++)
4893 free_rspq_fl(adap, &q->rspq,
4894 q->fl.size ? &q->fl : NULL);
4897 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
4900 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
4902 free_tx_desc(adap, &txq->q, txq->q.in_use, false);
4903 kfree(txq->q.sdesc);
4904 free_txq(adap, &txq->q);
4909 * t4_free_sge_resources - free SGE resources
4910 * @adap: the adapter
4912 * Frees resources used by the SGE queue sets.
4914 void t4_free_sge_resources(struct adapter *adap)
4917 struct sge_eth_rxq *eq;
4918 struct sge_eth_txq *etq;
4920 /* stop all Rx queues in order to start them draining */
4921 for (i = 0; i < adap->sge.ethqsets; i++) {
4922 eq = &adap->sge.ethrxq[i];
4924 t4_iq_stop(adap, adap->mbox, adap->pf, 0,
4925 FW_IQ_TYPE_FL_INT_CAP,
4927 eq->fl.size ? eq->fl.cntxt_id : 0xffff,
4931 /* clean up Ethernet Tx/Rx queues */
4932 for (i = 0; i < adap->sge.ethqsets; i++) {
4933 eq = &adap->sge.ethrxq[i];
4935 free_rspq_fl(adap, &eq->rspq,
4936 eq->fl.size ? &eq->fl : NULL);
4938 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
4942 etq = &adap->sge.ethtxq[i];
4944 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4946 __netif_tx_lock_bh(etq->txq);
4947 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4948 __netif_tx_unlock_bh(etq->txq);
4949 kfree(etq->q.sdesc);
4950 free_txq(adap, &etq->q);
4954 /* clean up control Tx queues */
4955 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
4956 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
4959 tasklet_kill(&cq->qresume_tsk);
4960 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
4962 __skb_queue_purge(&cq->sendq);
4963 free_txq(adap, &cq->q);
4967 if (adap->sge.fw_evtq.desc) {
4968 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
4969 if (adap->sge.fwevtq_msix_idx >= 0)
4970 cxgb4_free_msix_idx_in_bmap(adap,
4971 adap->sge.fwevtq_msix_idx);
4974 if (adap->sge.nd_msix_idx >= 0)
4975 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
4977 if (adap->sge.intrq.desc)
4978 free_rspq_fl(adap, &adap->sge.intrq, NULL);
4980 if (!is_t4(adap->params.chip)) {
4981 etq = &adap->sge.ptptxq;
4983 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
4985 spin_lock_bh(&adap->ptp_lock);
4986 free_tx_desc(adap, &etq->q, etq->q.in_use, true);
4987 spin_unlock_bh(&adap->ptp_lock);
4988 kfree(etq->q.sdesc);
4989 free_txq(adap, &etq->q);
4993 /* clear the reverse egress queue map */
4994 memset(adap->sge.egr_map, 0,
4995 adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
4998 void t4_sge_start(struct adapter *adap)
5000 adap->sge.ethtxq_rover = 0;
5001 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
5002 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
5006 * t4_sge_stop - disable SGE operation
5007 * @adap: the adapter
5009 * Stop tasklets and timers associated with the DMA engine. Note that
5010 * this is effective only if measures have been taken to disable any HW
5011 * events that may restart them.
5013 void t4_sge_stop(struct adapter *adap)
5016 struct sge *s = &adap->sge;
5018 if (s->rx_timer.function)
5019 del_timer_sync(&s->rx_timer);
5020 if (s->tx_timer.function)
5021 del_timer_sync(&s->tx_timer);
5023 if (is_offload(adap)) {
5024 struct sge_uld_txq_info *txq_info;
5026 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
5028 struct sge_uld_txq *txq = txq_info->uldtxq;
5030 for_each_ofldtxq(&adap->sge, i) {
5032 tasklet_kill(&txq->qresume_tsk);
5037 if (is_pci_uld(adap)) {
5038 struct sge_uld_txq_info *txq_info;
5040 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
5042 struct sge_uld_txq *txq = txq_info->uldtxq;
5044 for_each_ofldtxq(&adap->sge, i) {
5046 tasklet_kill(&txq->qresume_tsk);
5051 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
5052 struct sge_ctrl_txq *cq = &s->ctrlq[i];
5055 tasklet_kill(&cq->qresume_tsk);
5060 * t4_sge_init_soft - grab core SGE values needed by SGE code
5061 * @adap: the adapter
5063 * We need to grab the SGE operating parameters that we need to have
5064 * in order to do our job and make sure we can live with them.
5067 static int t4_sge_init_soft(struct adapter *adap)
5069 struct sge *s = &adap->sge;
5070 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
5071 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
5072 u32 ingress_rx_threshold;
5075 * Verify that CPL messages are going to the Ingress Queue for
5076 * process_responses() and that only packet data is going to the
5079 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
5080 RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
5081 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
5086 * Validate the Host Buffer Register Array indices that we want to
5089 * XXX Note that we should really read through the Host Buffer Size
5090 * XXX register array and find the indices of the Buffer Sizes which
5091 * XXX meet our needs!
5093 #define READ_FL_BUF(x) \
5094 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
5096 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
5097 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
5098 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
5099 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
5101 /* We only bother using the Large Page logic if the Large Page Buffer
5102 * is larger than our Page Size Buffer.
5104 if (fl_large_pg <= fl_small_pg)
5109 /* The Page Size Buffer must be exactly equal to our Page Size and the
5110 * Large Page Size Buffer should be 0 (per above) or a power of 2.
5112 if (fl_small_pg != PAGE_SIZE ||
5113 (fl_large_pg & (fl_large_pg-1)) != 0) {
5114 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
5115 fl_small_pg, fl_large_pg);
5119 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
5121 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
5122 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
5123 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
5124 fl_small_mtu, fl_large_mtu);
5129 * Retrieve our RX interrupt holdoff timer values and counter
5130 * threshold values from the SGE parameters.
5132 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
5133 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
5134 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
5135 s->timer_val[0] = core_ticks_to_us(adap,
5136 TIMERVALUE0_G(timer_value_0_and_1));
5137 s->timer_val[1] = core_ticks_to_us(adap,
5138 TIMERVALUE1_G(timer_value_0_and_1));
5139 s->timer_val[2] = core_ticks_to_us(adap,
5140 TIMERVALUE2_G(timer_value_2_and_3));
5141 s->timer_val[3] = core_ticks_to_us(adap,
5142 TIMERVALUE3_G(timer_value_2_and_3));
5143 s->timer_val[4] = core_ticks_to_us(adap,
5144 TIMERVALUE4_G(timer_value_4_and_5));
5145 s->timer_val[5] = core_ticks_to_us(adap,
5146 TIMERVALUE5_G(timer_value_4_and_5));
5148 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
5149 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
5150 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
5151 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
5152 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
5158 * t4_sge_init - initialize SGE
5159 * @adap: the adapter
5161 * Perform low-level SGE code initialization needed every time after a
5164 int t4_sge_init(struct adapter *adap)
5166 struct sge *s = &adap->sge;
5167 u32 sge_control, sge_conm_ctrl;
5168 int ret, egress_threshold;
5171 * Ingress Padding Boundary and Egress Status Page Size are set up by
5172 * t4_fixup_host_params().
5174 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
5175 s->pktshift = PKTSHIFT_G(sge_control);
5176 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
5178 s->fl_align = t4_fl_pkt_align(adap);
5179 ret = t4_sge_init_soft(adap);
5184 * A FL with <= fl_starve_thres buffers is starving and a periodic
5185 * timer will attempt to refill it. This needs to be larger than the
5186 * SGE's Egress Congestion Threshold. If it isn't, then we can get
5187 * stuck waiting for new packets while the SGE is waiting for us to
5188 * give it more Free List entries. (Note that the SGE's Egress
5189 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
5190 * there was only a single field to control this. For T5 there's the
5191 * original field which now only applies to Unpacked Mode Free List
5192 * buffers and a new field which only applies to Packed Mode Free List
5195 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
5196 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
5198 egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
5201 egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
5204 egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
5207 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
5208 CHELSIO_CHIP_VERSION(adap->params.chip));
5211 s->fl_starve_thres = 2*egress_threshold + 1;
5213 t4_idma_monitor_init(adap, &s->idma_monitor);
5215 /* Set up timers used for recuring callbacks to process RX and TX
5216 * administrative tasks.
5218 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
5219 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
5221 spin_lock_init(&s->intrq_lock);