1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
4 #include <linux/etherdevice.h>
5 #include <net/ip6_checksum.h>
6 #include <net/page_pool/helpers.h>
7 #include <net/inet_ecn.h>
8 #include <linux/iopoll.h>
9 #include <linux/sctp.h>
10 #include <linux/pci.h>
18 /* Lookup table mapping the HW PTYPE to the bit field for decoding */
19 static struct wx_dec_ptype wx_ptype_lookup[256] = {
21 [0x11] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
22 [0x12] = WX_PTT(L2, NONE, NONE, NONE, TS, PAY2),
23 [0x13] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
24 [0x14] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
25 [0x15] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
26 [0x16] = WX_PTT(L2, NONE, NONE, NONE, NONE, PAY2),
27 [0x17] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
29 /* L2: ethertype filter */
30 [0x18 ... 0x1F] = WX_PTT(L2, NONE, NONE, NONE, NONE, NONE),
32 /* L3: ip non-tunnel */
33 [0x21] = WX_PTT(IP, FGV4, NONE, NONE, NONE, PAY3),
34 [0x22] = WX_PTT(IP, IPV4, NONE, NONE, NONE, PAY3),
35 [0x23] = WX_PTT(IP, IPV4, NONE, NONE, UDP, PAY4),
36 [0x24] = WX_PTT(IP, IPV4, NONE, NONE, TCP, PAY4),
37 [0x25] = WX_PTT(IP, IPV4, NONE, NONE, SCTP, PAY4),
38 [0x29] = WX_PTT(IP, FGV6, NONE, NONE, NONE, PAY3),
39 [0x2A] = WX_PTT(IP, IPV6, NONE, NONE, NONE, PAY3),
40 [0x2B] = WX_PTT(IP, IPV6, NONE, NONE, UDP, PAY3),
41 [0x2C] = WX_PTT(IP, IPV6, NONE, NONE, TCP, PAY4),
42 [0x2D] = WX_PTT(IP, IPV6, NONE, NONE, SCTP, PAY4),
45 [0x30 ... 0x34] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3),
46 [0x38 ... 0x3C] = WX_PTT(FCOE, NONE, NONE, NONE, NONE, PAY3),
48 /* IPv4 --> IPv4/IPv6 */
49 [0x81] = WX_PTT(IP, IPV4, IPIP, FGV4, NONE, PAY3),
50 [0x82] = WX_PTT(IP, IPV4, IPIP, IPV4, NONE, PAY3),
51 [0x83] = WX_PTT(IP, IPV4, IPIP, IPV4, UDP, PAY4),
52 [0x84] = WX_PTT(IP, IPV4, IPIP, IPV4, TCP, PAY4),
53 [0x85] = WX_PTT(IP, IPV4, IPIP, IPV4, SCTP, PAY4),
54 [0x89] = WX_PTT(IP, IPV4, IPIP, FGV6, NONE, PAY3),
55 [0x8A] = WX_PTT(IP, IPV4, IPIP, IPV6, NONE, PAY3),
56 [0x8B] = WX_PTT(IP, IPV4, IPIP, IPV6, UDP, PAY4),
57 [0x8C] = WX_PTT(IP, IPV4, IPIP, IPV6, TCP, PAY4),
58 [0x8D] = WX_PTT(IP, IPV4, IPIP, IPV6, SCTP, PAY4),
60 /* IPv4 --> GRE/NAT --> NONE/IPv4/IPv6 */
61 [0x90] = WX_PTT(IP, IPV4, IG, NONE, NONE, PAY3),
62 [0x91] = WX_PTT(IP, IPV4, IG, FGV4, NONE, PAY3),
63 [0x92] = WX_PTT(IP, IPV4, IG, IPV4, NONE, PAY3),
64 [0x93] = WX_PTT(IP, IPV4, IG, IPV4, UDP, PAY4),
65 [0x94] = WX_PTT(IP, IPV4, IG, IPV4, TCP, PAY4),
66 [0x95] = WX_PTT(IP, IPV4, IG, IPV4, SCTP, PAY4),
67 [0x99] = WX_PTT(IP, IPV4, IG, FGV6, NONE, PAY3),
68 [0x9A] = WX_PTT(IP, IPV4, IG, IPV6, NONE, PAY3),
69 [0x9B] = WX_PTT(IP, IPV4, IG, IPV6, UDP, PAY4),
70 [0x9C] = WX_PTT(IP, IPV4, IG, IPV6, TCP, PAY4),
71 [0x9D] = WX_PTT(IP, IPV4, IG, IPV6, SCTP, PAY4),
73 /* IPv4 --> GRE/NAT --> MAC --> NONE/IPv4/IPv6 */
74 [0xA0] = WX_PTT(IP, IPV4, IGM, NONE, NONE, PAY3),
75 [0xA1] = WX_PTT(IP, IPV4, IGM, FGV4, NONE, PAY3),
76 [0xA2] = WX_PTT(IP, IPV4, IGM, IPV4, NONE, PAY3),
77 [0xA3] = WX_PTT(IP, IPV4, IGM, IPV4, UDP, PAY4),
78 [0xA4] = WX_PTT(IP, IPV4, IGM, IPV4, TCP, PAY4),
79 [0xA5] = WX_PTT(IP, IPV4, IGM, IPV4, SCTP, PAY4),
80 [0xA9] = WX_PTT(IP, IPV4, IGM, FGV6, NONE, PAY3),
81 [0xAA] = WX_PTT(IP, IPV4, IGM, IPV6, NONE, PAY3),
82 [0xAB] = WX_PTT(IP, IPV4, IGM, IPV6, UDP, PAY4),
83 [0xAC] = WX_PTT(IP, IPV4, IGM, IPV6, TCP, PAY4),
84 [0xAD] = WX_PTT(IP, IPV4, IGM, IPV6, SCTP, PAY4),
86 /* IPv4 --> GRE/NAT --> MAC+VLAN --> NONE/IPv4/IPv6 */
87 [0xB0] = WX_PTT(IP, IPV4, IGMV, NONE, NONE, PAY3),
88 [0xB1] = WX_PTT(IP, IPV4, IGMV, FGV4, NONE, PAY3),
89 [0xB2] = WX_PTT(IP, IPV4, IGMV, IPV4, NONE, PAY3),
90 [0xB3] = WX_PTT(IP, IPV4, IGMV, IPV4, UDP, PAY4),
91 [0xB4] = WX_PTT(IP, IPV4, IGMV, IPV4, TCP, PAY4),
92 [0xB5] = WX_PTT(IP, IPV4, IGMV, IPV4, SCTP, PAY4),
93 [0xB9] = WX_PTT(IP, IPV4, IGMV, FGV6, NONE, PAY3),
94 [0xBA] = WX_PTT(IP, IPV4, IGMV, IPV6, NONE, PAY3),
95 [0xBB] = WX_PTT(IP, IPV4, IGMV, IPV6, UDP, PAY4),
96 [0xBC] = WX_PTT(IP, IPV4, IGMV, IPV6, TCP, PAY4),
97 [0xBD] = WX_PTT(IP, IPV4, IGMV, IPV6, SCTP, PAY4),
99 /* IPv6 --> IPv4/IPv6 */
100 [0xC1] = WX_PTT(IP, IPV6, IPIP, FGV4, NONE, PAY3),
101 [0xC2] = WX_PTT(IP, IPV6, IPIP, IPV4, NONE, PAY3),
102 [0xC3] = WX_PTT(IP, IPV6, IPIP, IPV4, UDP, PAY4),
103 [0xC4] = WX_PTT(IP, IPV6, IPIP, IPV4, TCP, PAY4),
104 [0xC5] = WX_PTT(IP, IPV6, IPIP, IPV4, SCTP, PAY4),
105 [0xC9] = WX_PTT(IP, IPV6, IPIP, FGV6, NONE, PAY3),
106 [0xCA] = WX_PTT(IP, IPV6, IPIP, IPV6, NONE, PAY3),
107 [0xCB] = WX_PTT(IP, IPV6, IPIP, IPV6, UDP, PAY4),
108 [0xCC] = WX_PTT(IP, IPV6, IPIP, IPV6, TCP, PAY4),
109 [0xCD] = WX_PTT(IP, IPV6, IPIP, IPV6, SCTP, PAY4),
111 /* IPv6 --> GRE/NAT -> NONE/IPv4/IPv6 */
112 [0xD0] = WX_PTT(IP, IPV6, IG, NONE, NONE, PAY3),
113 [0xD1] = WX_PTT(IP, IPV6, IG, FGV4, NONE, PAY3),
114 [0xD2] = WX_PTT(IP, IPV6, IG, IPV4, NONE, PAY3),
115 [0xD3] = WX_PTT(IP, IPV6, IG, IPV4, UDP, PAY4),
116 [0xD4] = WX_PTT(IP, IPV6, IG, IPV4, TCP, PAY4),
117 [0xD5] = WX_PTT(IP, IPV6, IG, IPV4, SCTP, PAY4),
118 [0xD9] = WX_PTT(IP, IPV6, IG, FGV6, NONE, PAY3),
119 [0xDA] = WX_PTT(IP, IPV6, IG, IPV6, NONE, PAY3),
120 [0xDB] = WX_PTT(IP, IPV6, IG, IPV6, UDP, PAY4),
121 [0xDC] = WX_PTT(IP, IPV6, IG, IPV6, TCP, PAY4),
122 [0xDD] = WX_PTT(IP, IPV6, IG, IPV6, SCTP, PAY4),
124 /* IPv6 --> GRE/NAT -> MAC -> NONE/IPv4/IPv6 */
125 [0xE0] = WX_PTT(IP, IPV6, IGM, NONE, NONE, PAY3),
126 [0xE1] = WX_PTT(IP, IPV6, IGM, FGV4, NONE, PAY3),
127 [0xE2] = WX_PTT(IP, IPV6, IGM, IPV4, NONE, PAY3),
128 [0xE3] = WX_PTT(IP, IPV6, IGM, IPV4, UDP, PAY4),
129 [0xE4] = WX_PTT(IP, IPV6, IGM, IPV4, TCP, PAY4),
130 [0xE5] = WX_PTT(IP, IPV6, IGM, IPV4, SCTP, PAY4),
131 [0xE9] = WX_PTT(IP, IPV6, IGM, FGV6, NONE, PAY3),
132 [0xEA] = WX_PTT(IP, IPV6, IGM, IPV6, NONE, PAY3),
133 [0xEB] = WX_PTT(IP, IPV6, IGM, IPV6, UDP, PAY4),
134 [0xEC] = WX_PTT(IP, IPV6, IGM, IPV6, TCP, PAY4),
135 [0xED] = WX_PTT(IP, IPV6, IGM, IPV6, SCTP, PAY4),
137 /* IPv6 --> GRE/NAT -> MAC--> NONE/IPv */
138 [0xF0] = WX_PTT(IP, IPV6, IGMV, NONE, NONE, PAY3),
139 [0xF1] = WX_PTT(IP, IPV6, IGMV, FGV4, NONE, PAY3),
140 [0xF2] = WX_PTT(IP, IPV6, IGMV, IPV4, NONE, PAY3),
141 [0xF3] = WX_PTT(IP, IPV6, IGMV, IPV4, UDP, PAY4),
142 [0xF4] = WX_PTT(IP, IPV6, IGMV, IPV4, TCP, PAY4),
143 [0xF5] = WX_PTT(IP, IPV6, IGMV, IPV4, SCTP, PAY4),
144 [0xF9] = WX_PTT(IP, IPV6, IGMV, FGV6, NONE, PAY3),
145 [0xFA] = WX_PTT(IP, IPV6, IGMV, IPV6, NONE, PAY3),
146 [0xFB] = WX_PTT(IP, IPV6, IGMV, IPV6, UDP, PAY4),
147 [0xFC] = WX_PTT(IP, IPV6, IGMV, IPV6, TCP, PAY4),
148 [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4),
151 static struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
153 return wx_ptype_lookup[ptype];
156 /* wx_test_staterr - tests bits in Rx descriptor status and error fields */
157 static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
158 const u32 stat_err_bits)
160 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
163 static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
166 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
167 struct page *page = rx_buffer->page;
169 /* avoid re-using remote and pfmemalloc pages */
170 if (!dev_page_is_reusable(page))
173 #if (PAGE_SIZE < 8192)
174 /* if we are only owner of page we can reuse it */
175 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
179 /* If we have drained the page fragment pool we need to update
180 * the pagecnt_bias and page count so that we fully restock the
181 * number of references the driver holds.
183 if (unlikely(pagecnt_bias == 1)) {
184 page_ref_add(page, USHRT_MAX - 1);
185 rx_buffer->pagecnt_bias = USHRT_MAX;
192 * wx_reuse_rx_page - page flip buffer and store it back on the ring
193 * @rx_ring: rx descriptor ring to store buffers on
194 * @old_buff: donor buffer to have page reused
196 * Synchronizes page for reuse by the adapter
198 static void wx_reuse_rx_page(struct wx_ring *rx_ring,
199 struct wx_rx_buffer *old_buff)
201 u16 nta = rx_ring->next_to_alloc;
202 struct wx_rx_buffer *new_buff;
204 new_buff = &rx_ring->rx_buffer_info[nta];
206 /* update, and store next to alloc */
208 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
210 /* transfer page from old buffer to new buffer */
211 new_buff->page = old_buff->page;
212 new_buff->page_dma = old_buff->page_dma;
213 new_buff->page_offset = old_buff->page_offset;
214 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
217 static void wx_dma_sync_frag(struct wx_ring *rx_ring,
218 struct wx_rx_buffer *rx_buffer)
220 struct sk_buff *skb = rx_buffer->skb;
221 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
223 dma_sync_single_range_for_cpu(rx_ring->dev,
229 /* If the page was released, just unmap it. */
230 if (unlikely(WX_CB(skb)->page_released))
231 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
234 static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
235 union wx_rx_desc *rx_desc,
236 struct sk_buff **skb,
237 int *rx_buffer_pgcnt)
239 struct wx_rx_buffer *rx_buffer;
242 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
243 size = le16_to_cpu(rx_desc->wb.upper.length);
245 #if (PAGE_SIZE < 8192)
246 *rx_buffer_pgcnt = page_count(rx_buffer->page);
248 *rx_buffer_pgcnt = 0;
251 prefetchw(rx_buffer->page);
252 *skb = rx_buffer->skb;
254 /* Delay unmapping of the first packet. It carries the header
255 * information, HW may still access the header after the writeback.
256 * Only unmap it when EOP is reached
258 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) {
263 wx_dma_sync_frag(rx_ring, rx_buffer);
266 /* we are reusing so sync this buffer for CPU use */
267 dma_sync_single_range_for_cpu(rx_ring->dev,
269 rx_buffer->page_offset,
273 rx_buffer->pagecnt_bias--;
278 static void wx_put_rx_buffer(struct wx_ring *rx_ring,
279 struct wx_rx_buffer *rx_buffer,
283 if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
284 /* hand second half of page back to the ring */
285 wx_reuse_rx_page(rx_ring, rx_buffer);
287 if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
288 /* the page has been released from the ring */
289 WX_CB(skb)->page_released = true;
291 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
293 __page_frag_cache_drain(rx_buffer->page,
294 rx_buffer->pagecnt_bias);
297 /* clear contents of rx_buffer */
298 rx_buffer->page = NULL;
299 rx_buffer->skb = NULL;
302 static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
303 struct wx_rx_buffer *rx_buffer,
304 union wx_rx_desc *rx_desc)
306 unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
307 #if (PAGE_SIZE < 8192)
308 unsigned int truesize = WX_RX_BUFSZ;
310 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
312 struct sk_buff *skb = rx_buffer->skb;
315 void *page_addr = page_address(rx_buffer->page) +
316 rx_buffer->page_offset;
318 /* prefetch first cache line of first page */
320 #if L1_CACHE_BYTES < 128
321 prefetch(page_addr + L1_CACHE_BYTES);
324 /* allocate a skb to store the frags */
325 skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
329 /* we will be copying header into skb->data in
330 * pskb_may_pull so it is in our interest to prefetch
331 * it now to avoid a possible cache miss
333 prefetchw(skb->data);
335 if (size <= WX_RXBUFFER_256) {
336 memcpy(__skb_put(skb, size), page_addr,
337 ALIGN(size, sizeof(long)));
338 rx_buffer->pagecnt_bias++;
343 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
344 WX_CB(skb)->dma = rx_buffer->dma;
346 skb_add_rx_frag(skb, 0, rx_buffer->page,
347 rx_buffer->page_offset,
352 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
353 rx_buffer->page_offset, size, truesize);
357 #if (PAGE_SIZE < 8192)
358 /* flip page offset to other buffer */
359 rx_buffer->page_offset ^= truesize;
361 /* move offset up to the next cache line */
362 rx_buffer->page_offset += truesize;
368 static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
369 struct wx_rx_buffer *bi)
371 struct page *page = bi->page;
374 /* since we are recycling buffers we should seldom need to alloc */
378 page = page_pool_dev_alloc_pages(rx_ring->page_pool);
380 dma = page_pool_get_dma_addr(page);
385 page_ref_add(page, USHRT_MAX - 1);
386 bi->pagecnt_bias = USHRT_MAX;
392 * wx_alloc_rx_buffers - Replace used receive buffers
393 * @rx_ring: ring to place buffers on
394 * @cleaned_count: number of buffers to replace
396 void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
398 u16 i = rx_ring->next_to_use;
399 union wx_rx_desc *rx_desc;
400 struct wx_rx_buffer *bi;
406 rx_desc = WX_RX_DESC(rx_ring, i);
407 bi = &rx_ring->rx_buffer_info[i];
411 if (!wx_alloc_mapped_page(rx_ring, bi))
414 /* sync the buffer for use by the device */
415 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
420 rx_desc->read.pkt_addr =
421 cpu_to_le64(bi->page_dma + bi->page_offset);
427 rx_desc = WX_RX_DESC(rx_ring, 0);
428 bi = rx_ring->rx_buffer_info;
432 /* clear the status bits for the next_to_use descriptor */
433 rx_desc->wb.upper.status_error = 0;
436 } while (cleaned_count);
440 if (rx_ring->next_to_use != i) {
441 rx_ring->next_to_use = i;
442 /* update next to alloc since we have filled the ring */
443 rx_ring->next_to_alloc = i;
445 /* Force memory writes to complete before letting h/w
446 * know there are new descriptors to fetch. (Only
447 * applicable for weak-ordered memory model archs,
451 writel(i, rx_ring->tail);
455 u16 wx_desc_unused(struct wx_ring *ring)
457 u16 ntc = ring->next_to_clean;
458 u16 ntu = ring->next_to_use;
460 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
464 * wx_is_non_eop - process handling of non-EOP buffers
465 * @rx_ring: Rx ring being processed
466 * @rx_desc: Rx descriptor for current buffer
467 * @skb: Current socket buffer containing buffer in progress
469 * This function updates next to clean. If the buffer is an EOP buffer
470 * this function exits returning false, otherwise it will place the
471 * sk_buff in the next buffer to be chained and return true indicating
472 * that this is in fact a non-EOP buffer.
474 static bool wx_is_non_eop(struct wx_ring *rx_ring,
475 union wx_rx_desc *rx_desc,
478 u32 ntc = rx_ring->next_to_clean + 1;
480 /* fetch, update, and store next to clean */
481 ntc = (ntc < rx_ring->count) ? ntc : 0;
482 rx_ring->next_to_clean = ntc;
484 prefetch(WX_RX_DESC(rx_ring, ntc));
486 /* if we are the last buffer then there is nothing else to do */
487 if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)))
490 rx_ring->rx_buffer_info[ntc].skb = skb;
491 rx_ring->rx_stats.non_eop_descs++;
496 static void wx_pull_tail(struct sk_buff *skb)
498 skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
499 unsigned int pull_len;
502 /* it is valid to use page_address instead of kmap since we are
503 * working with pages allocated out of the lomem pool per
504 * alloc_page(GFP_ATOMIC)
506 va = skb_frag_address(frag);
508 /* we need the header to contain the greater of either ETH_HLEN or
509 * 60 bytes if the skb->len is less than 60 for skb_pad.
511 pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256);
513 /* align pull length to size of long to optimize memcpy performance */
514 skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
516 /* update all of the pointers */
517 skb_frag_size_sub(frag, pull_len);
518 skb_frag_off_add(frag, pull_len);
519 skb->data_len -= pull_len;
520 skb->tail += pull_len;
524 * wx_cleanup_headers - Correct corrupted or empty headers
525 * @rx_ring: rx descriptor ring packet is being transacted on
526 * @rx_desc: pointer to the EOP Rx descriptor
527 * @skb: pointer to current skb being fixed
529 * Check for corrupted packet headers caused by senders on the local L2
530 * embedded NIC switch not setting up their Tx Descriptors right. These
531 * should be very rare.
533 * Also address the case where we are pulling data in on pages only
534 * and as such no data is present in the skb header.
536 * In addition if skb is not at least 60 bytes we need to pad it so that
537 * it is large enough to qualify as a valid Ethernet frame.
539 * Returns true if an error was encountered and skb was freed.
541 static bool wx_cleanup_headers(struct wx_ring *rx_ring,
542 union wx_rx_desc *rx_desc,
545 struct net_device *netdev = rx_ring->netdev;
547 /* verify that the packet does not have any known errors */
549 unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) &&
550 !(netdev->features & NETIF_F_RXALL))) {
551 dev_kfree_skb_any(skb);
555 /* place header in linear portion of buffer */
556 if (!skb_headlen(skb))
559 /* if eth_skb_pad returns an error the skb was freed */
560 if (eth_skb_pad(skb))
566 static void wx_rx_hash(struct wx_ring *ring,
567 union wx_rx_desc *rx_desc,
572 if (!(ring->netdev->features & NETIF_F_RXHASH))
575 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
581 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
582 (WX_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
583 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
587 * wx_rx_checksum - indicate in skb if hw indicated a good cksum
588 * @ring: structure containing ring specific data
589 * @rx_desc: current Rx descriptor being processed
590 * @skb: skb currently being received and modified
592 static void wx_rx_checksum(struct wx_ring *ring,
593 union wx_rx_desc *rx_desc,
596 struct wx_dec_ptype dptype = wx_decode_ptype(WX_RXD_PKTTYPE(rx_desc));
598 skb_checksum_none_assert(skb);
599 /* Rx csum disabled */
600 if (!(ring->netdev->features & NETIF_F_RXCSUM))
603 /* if IPv4 header checksum error */
604 if ((wx_test_staterr(rx_desc, WX_RXD_STAT_IPCS) &&
605 wx_test_staterr(rx_desc, WX_RXD_ERR_IPE)) ||
606 (wx_test_staterr(rx_desc, WX_RXD_STAT_OUTERIPCS) &&
607 wx_test_staterr(rx_desc, WX_RXD_ERR_OUTERIPER))) {
608 ring->rx_stats.csum_err++;
612 /* L4 checksum offload flag must set for the below code to work */
613 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_L4CS))
616 /* Hardware can't guarantee csum if IPv6 Dest Header found */
617 if (dptype.prot != WX_DEC_PTYPE_PROT_SCTP && WX_RXD_IPV6EX(rx_desc))
620 /* if L4 checksum error */
621 if (wx_test_staterr(rx_desc, WX_RXD_ERR_TCPE)) {
622 ring->rx_stats.csum_err++;
626 /* It must be a TCP or UDP or SCTP packet with a valid checksum */
627 skb->ip_summed = CHECKSUM_UNNECESSARY;
629 /* If there is an outer header present that might contain a checksum
630 * we need to bump the checksum level by 1 to reflect the fact that
631 * we are indicating we validated the inner checksum.
633 if (dptype.etype >= WX_DEC_PTYPE_ETYPE_IG)
634 __skb_incr_checksum_unnecessary(skb);
635 ring->rx_stats.csum_good_cnt++;
638 static void wx_rx_vlan(struct wx_ring *ring, union wx_rx_desc *rx_desc,
644 if ((ring->netdev->features &
645 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) &&
646 wx_test_staterr(rx_desc, WX_RXD_STAT_VP)) {
647 idx = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
649 ethertype = ring->q_vector->wx->tpid[idx];
650 __vlan_hwaccel_put_tag(skb, htons(ethertype),
651 le16_to_cpu(rx_desc->wb.upper.vlan));
656 * wx_process_skb_fields - Populate skb header fields from Rx descriptor
657 * @rx_ring: rx descriptor ring packet is being transacted on
658 * @rx_desc: pointer to the EOP Rx descriptor
659 * @skb: pointer to current skb being populated
661 * This function checks the ring, descriptor, and packet information in
662 * order to populate the hash, checksum, protocol, and
663 * other fields within the skb.
665 static void wx_process_skb_fields(struct wx_ring *rx_ring,
666 union wx_rx_desc *rx_desc,
669 wx_rx_hash(rx_ring, rx_desc, skb);
670 wx_rx_checksum(rx_ring, rx_desc, skb);
671 wx_rx_vlan(rx_ring, rx_desc, skb);
672 skb_record_rx_queue(skb, rx_ring->queue_index);
673 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
677 * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
678 * @q_vector: structure containing interrupt and ring information
679 * @rx_ring: rx descriptor ring to transact packets on
680 * @budget: Total limit on number of packets to process
682 * This function provides a "bounce buffer" approach to Rx interrupt
683 * processing. The advantage to this is that on systems that have
684 * expensive overhead for IOMMU access this provides a means of avoiding
685 * it by maintaining the mapping of the page to the system.
687 * Returns amount of work completed.
689 static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
690 struct wx_ring *rx_ring,
693 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
694 u16 cleaned_count = wx_desc_unused(rx_ring);
697 struct wx_rx_buffer *rx_buffer;
698 union wx_rx_desc *rx_desc;
702 /* return some buffers to hardware, one at a time is too slow */
703 if (cleaned_count >= WX_RX_BUFFER_WRITE) {
704 wx_alloc_rx_buffers(rx_ring, cleaned_count);
708 rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean);
709 if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD))
712 /* This memory barrier is needed to keep us from reading
713 * any other fields out of the rx_desc until we know the
714 * descriptor has been written back
718 rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt);
720 /* retrieve a buffer from the ring */
721 skb = wx_build_skb(rx_ring, rx_buffer, rx_desc);
723 /* exit if we failed to retrieve a buffer */
725 rx_ring->rx_stats.alloc_rx_buff_failed++;
726 rx_buffer->pagecnt_bias++;
730 wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
733 /* place incomplete frames back on ring for completion */
734 if (wx_is_non_eop(rx_ring, rx_desc, skb))
737 /* verify the packet layout is correct */
738 if (wx_cleanup_headers(rx_ring, rx_desc, skb))
741 /* probably a little skewed due to removing CRC */
742 total_rx_bytes += skb->len;
744 /* populate checksum, timestamp, VLAN, and protocol */
745 wx_process_skb_fields(rx_ring, rx_desc, skb);
746 napi_gro_receive(&q_vector->napi, skb);
748 /* update budget accounting */
750 } while (likely(total_rx_packets < budget));
752 u64_stats_update_begin(&rx_ring->syncp);
753 rx_ring->stats.packets += total_rx_packets;
754 rx_ring->stats.bytes += total_rx_bytes;
755 u64_stats_update_end(&rx_ring->syncp);
756 q_vector->rx.total_packets += total_rx_packets;
757 q_vector->rx.total_bytes += total_rx_bytes;
759 return total_rx_packets;
762 static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring)
764 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
768 * wx_clean_tx_irq - Reclaim resources after transmit completes
769 * @q_vector: structure containing interrupt and ring information
770 * @tx_ring: tx ring to clean
771 * @napi_budget: Used to determine if we are in netpoll
773 static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
774 struct wx_ring *tx_ring, int napi_budget)
776 unsigned int budget = q_vector->wx->tx_work_limit;
777 unsigned int total_bytes = 0, total_packets = 0;
778 unsigned int i = tx_ring->next_to_clean;
779 struct wx_tx_buffer *tx_buffer;
780 union wx_tx_desc *tx_desc;
782 if (!netif_carrier_ok(tx_ring->netdev))
785 tx_buffer = &tx_ring->tx_buffer_info[i];
786 tx_desc = WX_TX_DESC(tx_ring, i);
790 union wx_tx_desc *eop_desc = tx_buffer->next_to_watch;
792 /* if next_to_watch is not set then there is no work pending */
796 /* prevent any other reads prior to eop_desc */
799 /* if DD is not set pending work has not been completed */
800 if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD)))
803 /* clear next_to_watch to prevent false hangs */
804 tx_buffer->next_to_watch = NULL;
806 /* update the statistics for this packet */
807 total_bytes += tx_buffer->bytecount;
808 total_packets += tx_buffer->gso_segs;
811 napi_consume_skb(tx_buffer->skb, napi_budget);
813 /* unmap skb header data */
814 dma_unmap_single(tx_ring->dev,
815 dma_unmap_addr(tx_buffer, dma),
816 dma_unmap_len(tx_buffer, len),
819 /* clear tx_buffer data */
820 dma_unmap_len_set(tx_buffer, len, 0);
822 /* unmap remaining buffers */
823 while (tx_desc != eop_desc) {
829 tx_buffer = tx_ring->tx_buffer_info;
830 tx_desc = WX_TX_DESC(tx_ring, 0);
833 /* unmap any remaining paged data */
834 if (dma_unmap_len(tx_buffer, len)) {
835 dma_unmap_page(tx_ring->dev,
836 dma_unmap_addr(tx_buffer, dma),
837 dma_unmap_len(tx_buffer, len),
839 dma_unmap_len_set(tx_buffer, len, 0);
843 /* move us one more past the eop_desc for start of next pkt */
849 tx_buffer = tx_ring->tx_buffer_info;
850 tx_desc = WX_TX_DESC(tx_ring, 0);
853 /* issue prefetch for next Tx descriptor */
856 /* update budget accounting */
858 } while (likely(budget));
861 tx_ring->next_to_clean = i;
862 u64_stats_update_begin(&tx_ring->syncp);
863 tx_ring->stats.bytes += total_bytes;
864 tx_ring->stats.packets += total_packets;
865 u64_stats_update_end(&tx_ring->syncp);
866 q_vector->tx.total_bytes += total_bytes;
867 q_vector->tx.total_packets += total_packets;
869 netdev_tx_completed_queue(wx_txring_txq(tx_ring),
870 total_packets, total_bytes);
872 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
873 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
874 (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
875 /* Make sure that anybody stopping the queue after this
876 * sees the new next_to_clean.
880 if (__netif_subqueue_stopped(tx_ring->netdev,
881 tx_ring->queue_index) &&
882 netif_running(tx_ring->netdev)) {
883 netif_wake_subqueue(tx_ring->netdev,
884 tx_ring->queue_index);
885 ++tx_ring->tx_stats.restart_queue;
893 * wx_poll - NAPI polling RX/TX cleanup routine
894 * @napi: napi struct with our devices info in it
895 * @budget: amount of work driver is allowed to do this pass, in packets
897 * This function will clean all queues associated with a q_vector.
899 static int wx_poll(struct napi_struct *napi, int budget)
901 struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi);
902 int per_ring_budget, work_done = 0;
903 struct wx *wx = q_vector->wx;
904 bool clean_complete = true;
905 struct wx_ring *ring;
907 wx_for_each_ring(ring, q_vector->tx) {
908 if (!wx_clean_tx_irq(q_vector, ring, budget))
909 clean_complete = false;
912 /* Exit if we are called by netpoll */
916 /* attempt to distribute budget to each queue fairly, but don't allow
917 * the budget to go below 1 because we'll exit polling
919 if (q_vector->rx.count > 1)
920 per_ring_budget = max(budget / q_vector->rx.count, 1);
922 per_ring_budget = budget;
924 wx_for_each_ring(ring, q_vector->rx) {
925 int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget);
927 work_done += cleaned;
928 if (cleaned >= per_ring_budget)
929 clean_complete = false;
932 /* If all work not completed, return budget and keep polling */
936 /* all work done, exit the polling mode */
937 if (likely(napi_complete_done(napi, work_done))) {
938 if (netif_running(wx->netdev))
939 wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
942 return min(work_done, budget - 1);
945 static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size)
947 if (likely(wx_desc_unused(tx_ring) >= size))
950 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
952 /* For the next check */
955 /* We need to check again in a case another CPU has just
956 * made room available.
958 if (likely(wx_desc_unused(tx_ring) < size))
961 /* A reprieve! - use start_queue because it doesn't call schedule */
962 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
963 ++tx_ring->tx_stats.restart_queue;
968 static u32 wx_tx_cmd_type(u32 tx_flags)
970 /* set type for advanced descriptor with frame checksum insertion */
971 u32 cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS;
973 /* set HW vlan bit if vlan is present */
974 cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_HW_VLAN, WX_TXD_VLE);
975 /* set segmentation enable bits for TSO/FSO */
976 cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSO, WX_TXD_TSE);
977 /* set timestamp bit if present */
978 cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_TSTAMP, WX_TXD_MAC_TSTAMP);
979 cmd_type |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_LINKSEC, WX_TXD_LINKSEC);
984 static void wx_tx_olinfo_status(union wx_tx_desc *tx_desc,
985 u32 tx_flags, unsigned int paylen)
987 u32 olinfo_status = paylen << WX_TXD_PAYLEN_SHIFT;
989 /* enable L4 checksum for TSO and TX checksum offload */
990 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CSUM, WX_TXD_L4CS);
991 /* enable IPv4 checksum for TSO */
992 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPV4, WX_TXD_IIPCS);
993 /* enable outer IPv4 checksum for TSO */
994 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_OUTER_IPV4,
996 /* Check Context must be set if Tx switch is enabled, which it
997 * always is for case where virtual functions are running
999 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_CC, WX_TXD_CC);
1000 olinfo_status |= WX_SET_FLAG(tx_flags, WX_TX_FLAGS_IPSEC,
1002 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1005 static void wx_tx_map(struct wx_ring *tx_ring,
1006 struct wx_tx_buffer *first,
1009 struct sk_buff *skb = first->skb;
1010 struct wx_tx_buffer *tx_buffer;
1011 u32 tx_flags = first->tx_flags;
1012 u16 i = tx_ring->next_to_use;
1013 unsigned int data_len, size;
1014 union wx_tx_desc *tx_desc;
1019 cmd_type = wx_tx_cmd_type(tx_flags);
1020 tx_desc = WX_TX_DESC(tx_ring, i);
1021 wx_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
1023 size = skb_headlen(skb);
1024 data_len = skb->data_len;
1025 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1029 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1030 if (dma_mapping_error(tx_ring->dev, dma))
1033 /* record length, and DMA address */
1034 dma_unmap_len_set(tx_buffer, len, size);
1035 dma_unmap_addr_set(tx_buffer, dma, dma);
1037 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1039 while (unlikely(size > WX_MAX_DATA_PER_TXD)) {
1040 tx_desc->read.cmd_type_len =
1041 cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD);
1045 if (i == tx_ring->count) {
1046 tx_desc = WX_TX_DESC(tx_ring, 0);
1049 tx_desc->read.olinfo_status = 0;
1051 dma += WX_MAX_DATA_PER_TXD;
1052 size -= WX_MAX_DATA_PER_TXD;
1054 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1057 if (likely(!data_len))
1060 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1064 if (i == tx_ring->count) {
1065 tx_desc = WX_TX_DESC(tx_ring, 0);
1068 tx_desc->read.olinfo_status = 0;
1070 size = skb_frag_size(frag);
1074 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1077 tx_buffer = &tx_ring->tx_buffer_info[i];
1080 /* write last descriptor with RS and EOP bits */
1081 cmd_type |= size | WX_TXD_EOP | WX_TXD_RS;
1082 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1084 netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
1086 skb_tx_timestamp(skb);
1088 /* Force memory writes to complete before letting h/w know there
1089 * are new descriptors to fetch. (Only applicable for weak-ordered
1090 * memory model archs, such as IA-64).
1092 * We also need this memory barrier to make certain all of the
1093 * status bits have been updated before next_to_watch is written.
1097 /* set next_to_watch value indicating a packet is present */
1098 first->next_to_watch = tx_desc;
1101 if (i == tx_ring->count)
1104 tx_ring->next_to_use = i;
1106 wx_maybe_stop_tx(tx_ring, DESC_NEEDED);
1108 if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
1109 writel(i, tx_ring->tail);
1113 dev_err(tx_ring->dev, "TX DMA map failed\n");
1115 /* clear dma mappings for failed tx_buffer_info map */
1117 tx_buffer = &tx_ring->tx_buffer_info[i];
1118 if (dma_unmap_len(tx_buffer, len))
1119 dma_unmap_page(tx_ring->dev,
1120 dma_unmap_addr(tx_buffer, dma),
1121 dma_unmap_len(tx_buffer, len),
1123 dma_unmap_len_set(tx_buffer, len, 0);
1124 if (tx_buffer == first)
1127 i += tx_ring->count;
1131 dev_kfree_skb_any(first->skb);
1134 tx_ring->next_to_use = i;
1137 static void wx_tx_ctxtdesc(struct wx_ring *tx_ring, u32 vlan_macip_lens,
1138 u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
1140 struct wx_tx_context_desc *context_desc;
1141 u16 i = tx_ring->next_to_use;
1143 context_desc = WX_TX_CTXTDESC(tx_ring, i);
1145 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1147 /* set bits to identify this as an advanced context descriptor */
1148 type_tucmd |= WX_TXD_DTYP_CTXT;
1149 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1150 context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
1151 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1152 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1155 static void wx_get_ipv6_proto(struct sk_buff *skb, int offset, u8 *nexthdr)
1157 struct ipv6hdr *hdr = (struct ipv6hdr *)(skb->data + offset);
1159 *nexthdr = hdr->nexthdr;
1160 offset += sizeof(struct ipv6hdr);
1161 while (ipv6_ext_hdr(*nexthdr)) {
1162 struct ipv6_opt_hdr _hdr, *hp;
1164 if (*nexthdr == NEXTHDR_NONE)
1166 hp = skb_header_pointer(skb, offset, sizeof(_hdr), &_hdr);
1169 if (*nexthdr == NEXTHDR_FRAGMENT)
1171 *nexthdr = hp->nexthdr;
1175 union network_header {
1177 struct ipv6hdr *ipv6;
1181 static u8 wx_encode_tx_desc_ptype(const struct wx_tx_buffer *first)
1183 u8 tun_prot = 0, l4_prot = 0, ptype = 0;
1184 struct sk_buff *skb = first->skb;
1186 if (skb->encapsulation) {
1187 union network_header hdr;
1189 switch (first->protocol) {
1190 case htons(ETH_P_IP):
1191 tun_prot = ip_hdr(skb)->protocol;
1192 ptype = WX_PTYPE_TUN_IPV4;
1194 case htons(ETH_P_IPV6):
1195 wx_get_ipv6_proto(skb, skb_network_offset(skb), &tun_prot);
1196 ptype = WX_PTYPE_TUN_IPV6;
1202 if (tun_prot == IPPROTO_IPIP) {
1203 hdr.raw = (void *)inner_ip_hdr(skb);
1204 ptype |= WX_PTYPE_PKT_IPIP;
1205 } else if (tun_prot == IPPROTO_UDP) {
1206 hdr.raw = (void *)inner_ip_hdr(skb);
1207 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1208 skb->inner_protocol != htons(ETH_P_TEB)) {
1209 ptype |= WX_PTYPE_PKT_IG;
1211 if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto
1212 == htons(ETH_P_8021Q))
1213 ptype |= WX_PTYPE_PKT_IGMV;
1215 ptype |= WX_PTYPE_PKT_IGM;
1218 } else if (tun_prot == IPPROTO_GRE) {
1219 hdr.raw = (void *)inner_ip_hdr(skb);
1220 if (skb->inner_protocol == htons(ETH_P_IP) ||
1221 skb->inner_protocol == htons(ETH_P_IPV6)) {
1222 ptype |= WX_PTYPE_PKT_IG;
1224 if (((struct ethhdr *)skb_inner_mac_header(skb))->h_proto
1225 == htons(ETH_P_8021Q))
1226 ptype |= WX_PTYPE_PKT_IGMV;
1228 ptype |= WX_PTYPE_PKT_IGM;
1234 switch (hdr.ipv4->version) {
1236 l4_prot = hdr.ipv4->protocol;
1239 wx_get_ipv6_proto(skb, skb_inner_network_offset(skb), &l4_prot);
1240 ptype |= WX_PTYPE_PKT_IPV6;
1246 switch (first->protocol) {
1247 case htons(ETH_P_IP):
1248 l4_prot = ip_hdr(skb)->protocol;
1249 ptype = WX_PTYPE_PKT_IP;
1251 case htons(ETH_P_IPV6):
1252 wx_get_ipv6_proto(skb, skb_network_offset(skb), &l4_prot);
1253 ptype = WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6;
1256 return WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC;
1261 ptype |= WX_PTYPE_TYP_TCP;
1264 ptype |= WX_PTYPE_TYP_UDP;
1267 ptype |= WX_PTYPE_TYP_SCTP;
1270 ptype |= WX_PTYPE_TYP_IP;
1277 static int wx_tso(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
1278 u8 *hdr_len, u8 ptype)
1280 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1281 struct net_device *netdev = tx_ring->netdev;
1282 u32 l4len, tunhdr_eiplen_tunlen = 0;
1283 struct sk_buff *skb = first->skb;
1284 bool enc = skb->encapsulation;
1285 struct ipv6hdr *ipv6h;
1286 struct tcphdr *tcph;
1291 if (skb->ip_summed != CHECKSUM_PARTIAL)
1294 if (!skb_is_gso(skb))
1297 err = skb_cow_head(skb, 0);
1301 /* indicates the inner headers in the skbuff are valid. */
1302 iph = enc ? inner_ip_hdr(skb) : ip_hdr(skb);
1303 if (iph->version == 4) {
1304 tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1307 tcph->check = ~csum_tcpudp_magic(iph->saddr,
1310 first->tx_flags |= WX_TX_FLAGS_TSO |
1314 } else if (iph->version == 6 && skb_is_gso_v6(skb)) {
1315 ipv6h = enc ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1316 tcph = enc ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1317 ipv6h->payload_len = 0;
1318 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
1321 first->tx_flags |= WX_TX_FLAGS_TSO |
1326 /* compute header lengths */
1327 l4len = enc ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb);
1328 *hdr_len = enc ? (skb_inner_transport_header(skb) - skb->data) :
1329 skb_transport_offset(skb);
1332 /* update gso size and bytecount with header size */
1333 first->gso_segs = skb_shinfo(skb)->gso_segs;
1334 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1336 /* mss_l4len_id: use 0 as index for TSO */
1337 mss_l4len_idx = l4len << WX_TXD_L4LEN_SHIFT;
1338 mss_l4len_idx |= skb_shinfo(skb)->gso_size << WX_TXD_MSS_SHIFT;
1340 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
1342 switch (first->protocol) {
1343 case htons(ETH_P_IP):
1344 tun_prot = ip_hdr(skb)->protocol;
1345 first->tx_flags |= WX_TX_FLAGS_OUTER_IPV4;
1347 case htons(ETH_P_IPV6):
1348 tun_prot = ipv6_hdr(skb)->nexthdr;
1355 tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP;
1356 tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
1357 WX_TXD_OUTER_IPLEN_SHIFT) |
1358 (((skb_inner_mac_header(skb) -
1359 skb_transport_header(skb)) >> 1) <<
1360 WX_TXD_TUNNEL_LEN_SHIFT);
1363 tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE;
1364 tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
1365 WX_TXD_OUTER_IPLEN_SHIFT) |
1366 (((skb_inner_mac_header(skb) -
1367 skb_transport_header(skb)) >> 1) <<
1368 WX_TXD_TUNNEL_LEN_SHIFT);
1371 tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
1372 (char *)ip_hdr(skb)) >> 2) <<
1373 WX_TXD_OUTER_IPLEN_SHIFT;
1378 vlan_macip_lens = skb_inner_network_header_len(skb) >> 1;
1380 vlan_macip_lens = skb_network_header_len(skb) >> 1;
1383 vlan_macip_lens |= skb_network_offset(skb) << WX_TXD_MACLEN_SHIFT;
1384 vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK;
1386 type_tucmd = ptype << 24;
1387 if (skb->vlan_proto == htons(ETH_P_8021AD) &&
1388 netdev->features & NETIF_F_HW_VLAN_STAG_TX)
1389 type_tucmd |= WX_SET_FLAG(first->tx_flags,
1390 WX_TX_FLAGS_HW_VLAN,
1391 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT);
1392 wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
1393 type_tucmd, mss_l4len_idx);
1398 static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
1401 u32 tunhdr_eiplen_tunlen = 0, vlan_macip_lens = 0;
1402 struct net_device *netdev = tx_ring->netdev;
1403 u32 mss_l4len_idx = 0, type_tucmd;
1404 struct sk_buff *skb = first->skb;
1407 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1408 if (!(first->tx_flags & WX_TX_FLAGS_HW_VLAN) &&
1409 !(first->tx_flags & WX_TX_FLAGS_CC))
1411 vlan_macip_lens = skb_network_offset(skb) <<
1412 WX_TXD_MACLEN_SHIFT;
1417 struct ipv6hdr *ipv6;
1421 struct tcphdr *tcphdr;
1425 if (skb->encapsulation) {
1426 network_hdr.raw = skb_inner_network_header(skb);
1427 transport_hdr.raw = skb_inner_transport_header(skb);
1428 vlan_macip_lens = skb_network_offset(skb) <<
1429 WX_TXD_MACLEN_SHIFT;
1430 switch (first->protocol) {
1431 case htons(ETH_P_IP):
1432 tun_prot = ip_hdr(skb)->protocol;
1434 case htons(ETH_P_IPV6):
1435 tun_prot = ipv6_hdr(skb)->nexthdr;
1442 tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_UDP;
1443 tunhdr_eiplen_tunlen |=
1444 ((skb_network_header_len(skb) >> 2) <<
1445 WX_TXD_OUTER_IPLEN_SHIFT) |
1446 (((skb_inner_mac_header(skb) -
1447 skb_transport_header(skb)) >> 1) <<
1448 WX_TXD_TUNNEL_LEN_SHIFT);
1451 tunhdr_eiplen_tunlen = WX_TXD_TUNNEL_GRE;
1452 tunhdr_eiplen_tunlen |= ((skb_network_header_len(skb) >> 2) <<
1453 WX_TXD_OUTER_IPLEN_SHIFT) |
1454 (((skb_inner_mac_header(skb) -
1455 skb_transport_header(skb)) >> 1) <<
1456 WX_TXD_TUNNEL_LEN_SHIFT);
1459 tunhdr_eiplen_tunlen = (((char *)inner_ip_hdr(skb) -
1460 (char *)ip_hdr(skb)) >> 2) <<
1461 WX_TXD_OUTER_IPLEN_SHIFT;
1468 network_hdr.raw = skb_network_header(skb);
1469 transport_hdr.raw = skb_transport_header(skb);
1470 vlan_macip_lens = skb_network_offset(skb) <<
1471 WX_TXD_MACLEN_SHIFT;
1474 switch (network_hdr.ipv4->version) {
1476 vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
1477 l4_prot = network_hdr.ipv4->protocol;
1480 vlan_macip_lens |= (transport_hdr.raw - network_hdr.raw) >> 1;
1481 l4_prot = network_hdr.ipv6->nexthdr;
1489 mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
1493 mss_l4len_idx = sizeof(struct sctphdr) <<
1497 mss_l4len_idx = sizeof(struct udphdr) <<
1504 /* update TX checksum flag */
1505 first->tx_flags |= WX_TX_FLAGS_CSUM;
1507 first->tx_flags |= WX_TX_FLAGS_CC;
1508 /* vlan_macip_lens: MACLEN, VLAN tag */
1509 vlan_macip_lens |= first->tx_flags & WX_TX_FLAGS_VLAN_MASK;
1511 type_tucmd = ptype << 24;
1512 if (skb->vlan_proto == htons(ETH_P_8021AD) &&
1513 netdev->features & NETIF_F_HW_VLAN_STAG_TX)
1514 type_tucmd |= WX_SET_FLAG(first->tx_flags,
1515 WX_TX_FLAGS_HW_VLAN,
1516 0x1 << WX_TXD_TAG_TPID_SEL_SHIFT);
1517 wx_tx_ctxtdesc(tx_ring, vlan_macip_lens, tunhdr_eiplen_tunlen,
1518 type_tucmd, mss_l4len_idx);
1521 static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
1522 struct wx_ring *tx_ring)
1524 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1525 struct wx_tx_buffer *first;
1526 u8 hdr_len = 0, ptype;
1531 /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD,
1532 * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD,
1533 * + 2 desc gap to keep tail from touching head,
1534 * + 1 desc for context descriptor,
1535 * otherwise try next time
1537 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1538 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->
1541 if (wx_maybe_stop_tx(tx_ring, count + 3)) {
1542 tx_ring->tx_stats.tx_busy++;
1543 return NETDEV_TX_BUSY;
1546 /* record the location of the first descriptor for this packet */
1547 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1549 first->bytecount = skb->len;
1550 first->gso_segs = 1;
1552 /* if we have a HW VLAN tag being added default to the HW one */
1553 if (skb_vlan_tag_present(skb)) {
1554 tx_flags |= skb_vlan_tag_get(skb) << WX_TX_FLAGS_VLAN_SHIFT;
1555 tx_flags |= WX_TX_FLAGS_HW_VLAN;
1558 /* record initial flags and protocol */
1559 first->tx_flags = tx_flags;
1560 first->protocol = vlan_get_protocol(skb);
1562 ptype = wx_encode_tx_desc_ptype(first);
1564 tso = wx_tso(tx_ring, first, &hdr_len, ptype);
1568 wx_tx_csum(tx_ring, first, ptype);
1569 wx_tx_map(tx_ring, first, hdr_len);
1571 return NETDEV_TX_OK;
1573 dev_kfree_skb_any(first->skb);
1576 return NETDEV_TX_OK;
1579 netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
1580 struct net_device *netdev)
1582 unsigned int r_idx = skb->queue_mapping;
1583 struct wx *wx = netdev_priv(netdev);
1584 struct wx_ring *tx_ring;
1586 if (!netif_carrier_ok(netdev)) {
1587 dev_kfree_skb_any(skb);
1588 return NETDEV_TX_OK;
1591 /* The minimum packet size for olinfo paylen is 17 so pad the skb
1592 * in order to meet this minimum size requirement.
1594 if (skb_put_padto(skb, 17))
1595 return NETDEV_TX_OK;
1597 if (r_idx >= wx->num_tx_queues)
1598 r_idx = r_idx % wx->num_tx_queues;
1599 tx_ring = wx->tx_ring[r_idx];
1601 return wx_xmit_frame_ring(skb, tx_ring);
1603 EXPORT_SYMBOL(wx_xmit_frame);
1605 void wx_napi_enable_all(struct wx *wx)
1607 struct wx_q_vector *q_vector;
1610 for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
1611 q_vector = wx->q_vector[q_idx];
1612 napi_enable(&q_vector->napi);
1615 EXPORT_SYMBOL(wx_napi_enable_all);
1617 void wx_napi_disable_all(struct wx *wx)
1619 struct wx_q_vector *q_vector;
1622 for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
1623 q_vector = wx->q_vector[q_idx];
1624 napi_disable(&q_vector->napi);
1627 EXPORT_SYMBOL(wx_napi_disable_all);
1630 * wx_set_rss_queues: Allocate queues for RSS
1631 * @wx: board private structure to initialize
1633 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
1634 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
1637 static void wx_set_rss_queues(struct wx *wx)
1639 wx->num_rx_queues = wx->mac.max_rx_queues;
1640 wx->num_tx_queues = wx->mac.max_tx_queues;
1643 static void wx_set_num_queues(struct wx *wx)
1645 /* Start with base case */
1646 wx->num_rx_queues = 1;
1647 wx->num_tx_queues = 1;
1648 wx->queues_per_pool = 1;
1650 wx_set_rss_queues(wx);
1654 * wx_acquire_msix_vectors - acquire MSI-X vectors
1655 * @wx: board private structure
1657 * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
1658 * return a negative error code if unable to acquire MSI-X vectors for any
1661 static int wx_acquire_msix_vectors(struct wx *wx)
1663 struct irq_affinity affd = {0, };
1666 nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);
1668 wx->msix_entries = kcalloc(nvecs,
1669 sizeof(struct msix_entry),
1671 if (!wx->msix_entries)
1674 nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
1676 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
1679 wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
1680 kfree(wx->msix_entries);
1681 wx->msix_entries = NULL;
1685 for (i = 0; i < nvecs; i++) {
1686 wx->msix_entries[i].entry = i;
1687 wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i);
1690 /* one for msix_other */
1692 wx->num_q_vectors = nvecs;
1693 wx->num_rx_queues = nvecs;
1694 wx->num_tx_queues = nvecs;
1700 * wx_set_interrupt_capability - set MSI-X or MSI if supported
1701 * @wx: board private structure to initialize
1703 * Attempt to configure the interrupts using the best available
1704 * capabilities of the hardware and the kernel.
1706 static int wx_set_interrupt_capability(struct wx *wx)
1708 struct pci_dev *pdev = wx->pdev;
1711 /* We will try to get MSI-X interrupts first */
1712 ret = wx_acquire_msix_vectors(wx);
1713 if (ret == 0 || (ret == -ENOMEM))
1716 wx->num_rx_queues = 1;
1717 wx->num_tx_queues = 1;
1718 wx->num_q_vectors = 1;
1720 /* minmum one for queue, one for misc*/
1722 nvecs = pci_alloc_irq_vectors(pdev, nvecs,
1723 nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
1725 if (pdev->msi_enabled)
1726 wx_err(wx, "Fallback to MSI.\n");
1728 wx_err(wx, "Fallback to LEGACY.\n");
1730 wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs);
1734 pdev->irq = pci_irq_vector(pdev, 0);
1740 * wx_cache_ring_rss - Descriptor ring to register mapping for RSS
1741 * @wx: board private structure to initialize
1743 * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV.
1746 static void wx_cache_ring_rss(struct wx *wx)
1750 for (i = 0; i < wx->num_rx_queues; i++)
1751 wx->rx_ring[i]->reg_idx = i;
1753 for (i = 0; i < wx->num_tx_queues; i++)
1754 wx->tx_ring[i]->reg_idx = i;
1757 static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head)
1759 ring->next = head->ring;
1765 * wx_alloc_q_vector - Allocate memory for a single interrupt vector
1766 * @wx: board private structure to initialize
1767 * @v_count: q_vectors allocated on wx, used for ring interleaving
1768 * @v_idx: index of vector in wx struct
1769 * @txr_count: total number of Tx rings to allocate
1770 * @txr_idx: index of first Tx ring to allocate
1771 * @rxr_count: total number of Rx rings to allocate
1772 * @rxr_idx: index of first Rx ring to allocate
1774 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1776 static int wx_alloc_q_vector(struct wx *wx,
1777 unsigned int v_count, unsigned int v_idx,
1778 unsigned int txr_count, unsigned int txr_idx,
1779 unsigned int rxr_count, unsigned int rxr_idx)
1781 struct wx_q_vector *q_vector;
1782 int ring_count, default_itr;
1783 struct wx_ring *ring;
1785 /* note this will allocate space for the ring structure as well! */
1786 ring_count = txr_count + rxr_count;
1788 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
1793 /* initialize NAPI */
1794 netif_napi_add(wx->netdev, &q_vector->napi,
1797 /* tie q_vector and wx together */
1798 wx->q_vector[v_idx] = q_vector;
1800 q_vector->v_idx = v_idx;
1801 if (cpu_online(v_idx))
1802 q_vector->numa_node = cpu_to_node(v_idx);
1804 /* initialize pointer to rings */
1805 ring = q_vector->ring;
1807 if (wx->mac.type == wx_mac_sp)
1808 default_itr = WX_12K_ITR;
1810 default_itr = WX_7K_ITR;
1811 /* initialize ITR */
1812 if (txr_count && !rxr_count)
1813 /* tx only vector */
1814 q_vector->itr = wx->tx_itr_setting ?
1815 default_itr : wx->tx_itr_setting;
1817 /* rx or rx/tx vector */
1818 q_vector->itr = wx->rx_itr_setting ?
1819 default_itr : wx->rx_itr_setting;
1822 /* assign generic ring traits */
1823 ring->dev = &wx->pdev->dev;
1824 ring->netdev = wx->netdev;
1826 /* configure backlink on ring */
1827 ring->q_vector = q_vector;
1829 /* update q_vector Tx values */
1830 wx_add_ring(ring, &q_vector->tx);
1832 /* apply Tx specific ring traits */
1833 ring->count = wx->tx_ring_count;
1835 ring->queue_index = txr_idx;
1837 /* assign ring to wx */
1838 wx->tx_ring[txr_idx] = ring;
1840 /* update count and index */
1844 /* push pointer to next ring */
1849 /* assign generic ring traits */
1850 ring->dev = &wx->pdev->dev;
1851 ring->netdev = wx->netdev;
1853 /* configure backlink on ring */
1854 ring->q_vector = q_vector;
1856 /* update q_vector Rx values */
1857 wx_add_ring(ring, &q_vector->rx);
1859 /* apply Rx specific ring traits */
1860 ring->count = wx->rx_ring_count;
1861 ring->queue_index = rxr_idx;
1863 /* assign ring to wx */
1864 wx->rx_ring[rxr_idx] = ring;
1866 /* update count and index */
1870 /* push pointer to next ring */
1878 * wx_free_q_vector - Free memory allocated for specific interrupt vector
1879 * @wx: board private structure to initialize
1880 * @v_idx: Index of vector to be freed
1882 * This function frees the memory allocated to the q_vector. In addition if
1883 * NAPI is enabled it will delete any references to the NAPI struct prior
1884 * to freeing the q_vector.
1886 static void wx_free_q_vector(struct wx *wx, int v_idx)
1888 struct wx_q_vector *q_vector = wx->q_vector[v_idx];
1889 struct wx_ring *ring;
1891 wx_for_each_ring(ring, q_vector->tx)
1892 wx->tx_ring[ring->queue_index] = NULL;
1894 wx_for_each_ring(ring, q_vector->rx)
1895 wx->rx_ring[ring->queue_index] = NULL;
1897 wx->q_vector[v_idx] = NULL;
1898 netif_napi_del(&q_vector->napi);
1899 kfree_rcu(q_vector, rcu);
1903 * wx_alloc_q_vectors - Allocate memory for interrupt vectors
1904 * @wx: board private structure to initialize
1906 * We allocate one q_vector per queue interrupt. If allocation fails we
1909 static int wx_alloc_q_vectors(struct wx *wx)
1911 unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
1912 unsigned int rxr_remaining = wx->num_rx_queues;
1913 unsigned int txr_remaining = wx->num_tx_queues;
1914 unsigned int q_vectors = wx->num_q_vectors;
1918 for (; v_idx < q_vectors; v_idx++) {
1919 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1920 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1921 err = wx_alloc_q_vector(wx, q_vectors, v_idx,
1928 /* update counts and index */
1929 rxr_remaining -= rqpv;
1930 txr_remaining -= tqpv;
1938 wx->num_tx_queues = 0;
1939 wx->num_rx_queues = 0;
1940 wx->num_q_vectors = 0;
1943 wx_free_q_vector(wx, v_idx);
1949 * wx_free_q_vectors - Free memory allocated for interrupt vectors
1950 * @wx: board private structure to initialize
1952 * This function frees the memory allocated to the q_vectors. In addition if
1953 * NAPI is enabled it will delete any references to the NAPI struct prior
1954 * to freeing the q_vector.
1956 static void wx_free_q_vectors(struct wx *wx)
1958 int v_idx = wx->num_q_vectors;
1960 wx->num_tx_queues = 0;
1961 wx->num_rx_queues = 0;
1962 wx->num_q_vectors = 0;
1965 wx_free_q_vector(wx, v_idx);
1968 void wx_reset_interrupt_capability(struct wx *wx)
1970 struct pci_dev *pdev = wx->pdev;
1972 if (!pdev->msi_enabled && !pdev->msix_enabled)
1975 if (pdev->msix_enabled) {
1976 kfree(wx->msix_entries);
1977 wx->msix_entries = NULL;
1979 pci_free_irq_vectors(wx->pdev);
1981 EXPORT_SYMBOL(wx_reset_interrupt_capability);
1984 * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings
1985 * @wx: board private structure to clear interrupt scheme on
1987 * We go through and clear interrupt specific resources and reset the structure
1988 * to pre-load conditions
1990 void wx_clear_interrupt_scheme(struct wx *wx)
1992 wx_free_q_vectors(wx);
1993 wx_reset_interrupt_capability(wx);
1995 EXPORT_SYMBOL(wx_clear_interrupt_scheme);
1997 int wx_init_interrupt_scheme(struct wx *wx)
2001 /* Number of supported queues */
2002 wx_set_num_queues(wx);
2004 /* Set interrupt mode */
2005 ret = wx_set_interrupt_capability(wx);
2007 wx_err(wx, "Allocate irq vectors for failed.\n");
2011 /* Allocate memory for queues */
2012 ret = wx_alloc_q_vectors(wx);
2014 wx_err(wx, "Unable to allocate memory for queue vectors.\n");
2015 wx_reset_interrupt_capability(wx);
2019 wx_cache_ring_rss(wx);
2023 EXPORT_SYMBOL(wx_init_interrupt_scheme);
2025 irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
2027 struct wx_q_vector *q_vector = data;
2029 /* EIAM disabled interrupts (on this vector) for us */
2030 if (q_vector->rx.ring || q_vector->tx.ring)
2031 napi_schedule_irqoff(&q_vector->napi);
2035 EXPORT_SYMBOL(wx_msix_clean_rings);
2037 void wx_free_irq(struct wx *wx)
2039 struct pci_dev *pdev = wx->pdev;
2042 if (!(pdev->msix_enabled)) {
2043 free_irq(pdev->irq, wx);
2047 for (vector = 0; vector < wx->num_q_vectors; vector++) {
2048 struct wx_q_vector *q_vector = wx->q_vector[vector];
2049 struct msix_entry *entry = &wx->msix_entries[vector];
2051 /* free only the irqs that were actually requested */
2052 if (!q_vector->rx.ring && !q_vector->tx.ring)
2055 free_irq(entry->vector, q_vector);
2058 if (wx->mac.type == wx_mac_em)
2059 free_irq(wx->msix_entries[vector].vector, wx);
2061 EXPORT_SYMBOL(wx_free_irq);
2064 * wx_setup_isb_resources - allocate interrupt status resources
2065 * @wx: board private structure
2067 * Return 0 on success, negative on failure
2069 int wx_setup_isb_resources(struct wx *wx)
2071 struct pci_dev *pdev = wx->pdev;
2073 wx->isb_mem = dma_alloc_coherent(&pdev->dev,
2078 wx_err(wx, "Alloc isb_mem failed\n");
2084 EXPORT_SYMBOL(wx_setup_isb_resources);
2087 * wx_free_isb_resources - allocate all queues Rx resources
2088 * @wx: board private structure
2090 * Return 0 on success, negative on failure
2092 void wx_free_isb_resources(struct wx *wx)
2094 struct pci_dev *pdev = wx->pdev;
2096 dma_free_coherent(&pdev->dev, sizeof(u32) * 4,
2097 wx->isb_mem, wx->isb_dma);
2100 EXPORT_SYMBOL(wx_free_isb_resources);
2102 u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx)
2106 cur_tag = wx->isb_mem[WX_ISB_HEADER];
2107 wx->isb_tag[idx] = cur_tag;
2109 return (__force u32)cpu_to_le32(wx->isb_mem[idx]);
2111 EXPORT_SYMBOL(wx_misc_isb);
2114 * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
2115 * @wx: pointer to wx struct
2116 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
2117 * @queue: queue to map the corresponding interrupt to
2118 * @msix_vector: the vector to map to the corresponding queue
2121 static void wx_set_ivar(struct wx *wx, s8 direction,
2122 u16 queue, u16 msix_vector)
2126 if (direction == -1) {
2128 msix_vector |= WX_PX_IVAR_ALLOC_VAL;
2130 ivar = rd32(wx, WX_PX_MISC_IVAR);
2131 ivar &= ~(0xFF << index);
2132 ivar |= (msix_vector << index);
2133 wr32(wx, WX_PX_MISC_IVAR, ivar);
2135 /* tx or rx causes */
2136 msix_vector |= WX_PX_IVAR_ALLOC_VAL;
2137 index = ((16 * (queue & 1)) + (8 * direction));
2138 ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
2139 ivar &= ~(0xFF << index);
2140 ivar |= (msix_vector << index);
2141 wr32(wx, WX_PX_IVAR(queue >> 1), ivar);
2146 * wx_write_eitr - write EITR register in hardware specific way
2147 * @q_vector: structure containing interrupt and ring information
2149 * This function is made to be called by ethtool and by the driver
2150 * when it needs to update EITR registers at runtime. Hardware
2151 * specific quirks/differences are taken care of here.
2153 static void wx_write_eitr(struct wx_q_vector *q_vector)
2155 struct wx *wx = q_vector->wx;
2156 int v_idx = q_vector->v_idx;
2159 if (wx->mac.type == wx_mac_sp)
2160 itr_reg = q_vector->itr & WX_SP_MAX_EITR;
2162 itr_reg = q_vector->itr & WX_EM_MAX_EITR;
2164 itr_reg |= WX_PX_ITR_CNT_WDIS;
2166 wr32(wx, WX_PX_ITR(v_idx), itr_reg);
2170 * wx_configure_vectors - Configure vectors for hardware
2171 * @wx: board private structure
2173 * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY
2176 void wx_configure_vectors(struct wx *wx)
2178 struct pci_dev *pdev = wx->pdev;
2182 if (pdev->msix_enabled) {
2183 /* Populate MSIX to EITR Select */
2184 wr32(wx, WX_PX_ITRSEL, eitrsel);
2185 /* use EIAM to auto-mask when MSI-X interrupt is asserted
2186 * this saves a register write for every interrupt
2188 wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL);
2190 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2191 * specifically only auto mask tx and rx interrupts.
2193 wr32(wx, WX_PX_GPIE, 0);
2196 /* Populate the IVAR table and set the ITR values to the
2197 * corresponding register.
2199 for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
2200 struct wx_q_vector *q_vector = wx->q_vector[v_idx];
2201 struct wx_ring *ring;
2203 wx_for_each_ring(ring, q_vector->rx)
2204 wx_set_ivar(wx, 0, ring->reg_idx, v_idx);
2206 wx_for_each_ring(ring, q_vector->tx)
2207 wx_set_ivar(wx, 1, ring->reg_idx, v_idx);
2209 wx_write_eitr(q_vector);
2212 wx_set_ivar(wx, -1, 0, v_idx);
2213 if (pdev->msix_enabled)
2214 wr32(wx, WX_PX_ITR(v_idx), 1950);
2216 EXPORT_SYMBOL(wx_configure_vectors);
2219 * wx_clean_rx_ring - Free Rx Buffers per Queue
2220 * @rx_ring: ring to free buffers from
2222 static void wx_clean_rx_ring(struct wx_ring *rx_ring)
2224 struct wx_rx_buffer *rx_buffer;
2225 u16 i = rx_ring->next_to_clean;
2227 rx_buffer = &rx_ring->rx_buffer_info[i];
2229 /* Free all the Rx ring sk_buffs */
2230 while (i != rx_ring->next_to_alloc) {
2231 if (rx_buffer->skb) {
2232 struct sk_buff *skb = rx_buffer->skb;
2234 if (WX_CB(skb)->page_released)
2235 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
2240 /* Invalidate cache lines that may have been written to by
2241 * device so that we avoid corrupting memory.
2243 dma_sync_single_range_for_cpu(rx_ring->dev,
2245 rx_buffer->page_offset,
2249 /* free resources associated with mapping */
2250 page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
2251 __page_frag_cache_drain(rx_buffer->page,
2252 rx_buffer->pagecnt_bias);
2256 if (i == rx_ring->count) {
2258 rx_buffer = rx_ring->rx_buffer_info;
2262 rx_ring->next_to_alloc = 0;
2263 rx_ring->next_to_clean = 0;
2264 rx_ring->next_to_use = 0;
2268 * wx_clean_all_rx_rings - Free Rx Buffers for all queues
2269 * @wx: board private structure
2271 void wx_clean_all_rx_rings(struct wx *wx)
2275 for (i = 0; i < wx->num_rx_queues; i++)
2276 wx_clean_rx_ring(wx->rx_ring[i]);
2278 EXPORT_SYMBOL(wx_clean_all_rx_rings);
2281 * wx_free_rx_resources - Free Rx Resources
2282 * @rx_ring: ring to clean the resources from
2284 * Free all receive software resources
2286 static void wx_free_rx_resources(struct wx_ring *rx_ring)
2288 wx_clean_rx_ring(rx_ring);
2289 kvfree(rx_ring->rx_buffer_info);
2290 rx_ring->rx_buffer_info = NULL;
2292 /* if not set, then don't free */
2296 dma_free_coherent(rx_ring->dev, rx_ring->size,
2297 rx_ring->desc, rx_ring->dma);
2299 rx_ring->desc = NULL;
2301 if (rx_ring->page_pool) {
2302 page_pool_destroy(rx_ring->page_pool);
2303 rx_ring->page_pool = NULL;
2308 * wx_free_all_rx_resources - Free Rx Resources for All Queues
2309 * @wx: pointer to hardware structure
2311 * Free all receive software resources
2313 static void wx_free_all_rx_resources(struct wx *wx)
2317 for (i = 0; i < wx->num_rx_queues; i++)
2318 wx_free_rx_resources(wx->rx_ring[i]);
2322 * wx_clean_tx_ring - Free Tx Buffers
2323 * @tx_ring: ring to be cleaned
2325 static void wx_clean_tx_ring(struct wx_ring *tx_ring)
2327 struct wx_tx_buffer *tx_buffer;
2328 u16 i = tx_ring->next_to_clean;
2330 tx_buffer = &tx_ring->tx_buffer_info[i];
2332 while (i != tx_ring->next_to_use) {
2333 union wx_tx_desc *eop_desc, *tx_desc;
2335 /* Free all the Tx ring sk_buffs */
2336 dev_kfree_skb_any(tx_buffer->skb);
2338 /* unmap skb header data */
2339 dma_unmap_single(tx_ring->dev,
2340 dma_unmap_addr(tx_buffer, dma),
2341 dma_unmap_len(tx_buffer, len),
2344 /* check for eop_desc to determine the end of the packet */
2345 eop_desc = tx_buffer->next_to_watch;
2346 tx_desc = WX_TX_DESC(tx_ring, i);
2348 /* unmap remaining buffers */
2349 while (tx_desc != eop_desc) {
2353 if (unlikely(i == tx_ring->count)) {
2355 tx_buffer = tx_ring->tx_buffer_info;
2356 tx_desc = WX_TX_DESC(tx_ring, 0);
2359 /* unmap any remaining paged data */
2360 if (dma_unmap_len(tx_buffer, len))
2361 dma_unmap_page(tx_ring->dev,
2362 dma_unmap_addr(tx_buffer, dma),
2363 dma_unmap_len(tx_buffer, len),
2367 /* move us one more past the eop_desc for start of next pkt */
2370 if (unlikely(i == tx_ring->count)) {
2372 tx_buffer = tx_ring->tx_buffer_info;
2376 netdev_tx_reset_queue(wx_txring_txq(tx_ring));
2378 /* reset next_to_use and next_to_clean */
2379 tx_ring->next_to_use = 0;
2380 tx_ring->next_to_clean = 0;
2384 * wx_clean_all_tx_rings - Free Tx Buffers for all queues
2385 * @wx: board private structure
2387 void wx_clean_all_tx_rings(struct wx *wx)
2391 for (i = 0; i < wx->num_tx_queues; i++)
2392 wx_clean_tx_ring(wx->tx_ring[i]);
2394 EXPORT_SYMBOL(wx_clean_all_tx_rings);
2397 * wx_free_tx_resources - Free Tx Resources per Queue
2398 * @tx_ring: Tx descriptor ring for a specific queue
2400 * Free all transmit software resources
2402 static void wx_free_tx_resources(struct wx_ring *tx_ring)
2404 wx_clean_tx_ring(tx_ring);
2405 kvfree(tx_ring->tx_buffer_info);
2406 tx_ring->tx_buffer_info = NULL;
2408 /* if not set, then don't free */
2412 dma_free_coherent(tx_ring->dev, tx_ring->size,
2413 tx_ring->desc, tx_ring->dma);
2414 tx_ring->desc = NULL;
2418 * wx_free_all_tx_resources - Free Tx Resources for All Queues
2419 * @wx: pointer to hardware structure
2421 * Free all transmit software resources
2423 static void wx_free_all_tx_resources(struct wx *wx)
2427 for (i = 0; i < wx->num_tx_queues; i++)
2428 wx_free_tx_resources(wx->tx_ring[i]);
2431 void wx_free_resources(struct wx *wx)
2433 wx_free_isb_resources(wx);
2434 wx_free_all_rx_resources(wx);
2435 wx_free_all_tx_resources(wx);
2437 EXPORT_SYMBOL(wx_free_resources);
2439 static int wx_alloc_page_pool(struct wx_ring *rx_ring)
2443 struct page_pool_params pp_params = {
2444 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
2446 .pool_size = rx_ring->size,
2447 .nid = dev_to_node(rx_ring->dev),
2448 .dev = rx_ring->dev,
2449 .dma_dir = DMA_FROM_DEVICE,
2451 .max_len = PAGE_SIZE,
2454 rx_ring->page_pool = page_pool_create(&pp_params);
2455 if (IS_ERR(rx_ring->page_pool)) {
2456 ret = PTR_ERR(rx_ring->page_pool);
2457 rx_ring->page_pool = NULL;
2464 * wx_setup_rx_resources - allocate Rx resources (Descriptors)
2465 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2467 * Returns 0 on success, negative on failure
2469 static int wx_setup_rx_resources(struct wx_ring *rx_ring)
2471 struct device *dev = rx_ring->dev;
2472 int orig_node = dev_to_node(dev);
2473 int numa_node = NUMA_NO_NODE;
2476 size = sizeof(struct wx_rx_buffer) * rx_ring->count;
2478 if (rx_ring->q_vector)
2479 numa_node = rx_ring->q_vector->numa_node;
2481 rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
2482 if (!rx_ring->rx_buffer_info)
2483 rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL);
2484 if (!rx_ring->rx_buffer_info)
2487 /* Round up to nearest 4K */
2488 rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc);
2489 rx_ring->size = ALIGN(rx_ring->size, 4096);
2491 set_dev_node(dev, numa_node);
2492 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
2493 &rx_ring->dma, GFP_KERNEL);
2494 if (!rx_ring->desc) {
2495 set_dev_node(dev, orig_node);
2496 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
2497 &rx_ring->dma, GFP_KERNEL);
2503 rx_ring->next_to_clean = 0;
2504 rx_ring->next_to_use = 0;
2506 ret = wx_alloc_page_pool(rx_ring);
2508 dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret);
2515 dma_free_coherent(dev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2517 kvfree(rx_ring->rx_buffer_info);
2518 rx_ring->rx_buffer_info = NULL;
2519 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
2524 * wx_setup_all_rx_resources - allocate all queues Rx resources
2525 * @wx: pointer to hardware structure
2527 * If this function returns with an error, then it's possible one or
2528 * more of the rings is populated (while the rest are not). It is the
2529 * callers duty to clean those orphaned rings.
2531 * Return 0 on success, negative on failure
2533 static int wx_setup_all_rx_resources(struct wx *wx)
2537 for (i = 0; i < wx->num_rx_queues; i++) {
2538 err = wx_setup_rx_resources(wx->rx_ring[i]);
2542 wx_err(wx, "Allocation for Rx Queue %u failed\n", i);
2548 /* rewind the index freeing the rings as we go */
2550 wx_free_rx_resources(wx->rx_ring[i]);
2555 * wx_setup_tx_resources - allocate Tx resources (Descriptors)
2556 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2558 * Return 0 on success, negative on failure
2560 static int wx_setup_tx_resources(struct wx_ring *tx_ring)
2562 struct device *dev = tx_ring->dev;
2563 int orig_node = dev_to_node(dev);
2564 int numa_node = NUMA_NO_NODE;
2567 size = sizeof(struct wx_tx_buffer) * tx_ring->count;
2569 if (tx_ring->q_vector)
2570 numa_node = tx_ring->q_vector->numa_node;
2572 tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
2573 if (!tx_ring->tx_buffer_info)
2574 tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL);
2575 if (!tx_ring->tx_buffer_info)
2578 /* round up to nearest 4K */
2579 tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc);
2580 tx_ring->size = ALIGN(tx_ring->size, 4096);
2582 set_dev_node(dev, numa_node);
2583 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2584 &tx_ring->dma, GFP_KERNEL);
2585 if (!tx_ring->desc) {
2586 set_dev_node(dev, orig_node);
2587 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
2588 &tx_ring->dma, GFP_KERNEL);
2594 tx_ring->next_to_use = 0;
2595 tx_ring->next_to_clean = 0;
2600 kvfree(tx_ring->tx_buffer_info);
2601 tx_ring->tx_buffer_info = NULL;
2602 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
2607 * wx_setup_all_tx_resources - allocate all queues Tx resources
2608 * @wx: pointer to private structure
2610 * If this function returns with an error, then it's possible one or
2611 * more of the rings is populated (while the rest are not). It is the
2612 * callers duty to clean those orphaned rings.
2614 * Return 0 on success, negative on failure
2616 static int wx_setup_all_tx_resources(struct wx *wx)
2620 for (i = 0; i < wx->num_tx_queues; i++) {
2621 err = wx_setup_tx_resources(wx->tx_ring[i]);
2625 wx_err(wx, "Allocation for Tx Queue %u failed\n", i);
2631 /* rewind the index freeing the rings as we go */
2633 wx_free_tx_resources(wx->tx_ring[i]);
2637 int wx_setup_resources(struct wx *wx)
2641 /* allocate transmit descriptors */
2642 err = wx_setup_all_tx_resources(wx);
2646 /* allocate receive descriptors */
2647 err = wx_setup_all_rx_resources(wx);
2651 err = wx_setup_isb_resources(wx);
2658 wx_free_all_rx_resources(wx);
2660 wx_free_all_tx_resources(wx);
2664 EXPORT_SYMBOL(wx_setup_resources);
2667 * wx_get_stats64 - Get System Network Statistics
2668 * @netdev: network interface device structure
2669 * @stats: storage space for 64bit statistics
2671 void wx_get_stats64(struct net_device *netdev,
2672 struct rtnl_link_stats64 *stats)
2674 struct wx *wx = netdev_priv(netdev);
2675 struct wx_hw_stats *hwstats;
2678 wx_update_stats(wx);
2681 for (i = 0; i < wx->num_rx_queues; i++) {
2682 struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]);
2688 start = u64_stats_fetch_begin(&ring->syncp);
2689 packets = ring->stats.packets;
2690 bytes = ring->stats.bytes;
2691 } while (u64_stats_fetch_retry(&ring->syncp, start));
2692 stats->rx_packets += packets;
2693 stats->rx_bytes += bytes;
2697 for (i = 0; i < wx->num_tx_queues; i++) {
2698 struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]);
2704 start = u64_stats_fetch_begin(&ring->syncp);
2705 packets = ring->stats.packets;
2706 bytes = ring->stats.bytes;
2707 } while (u64_stats_fetch_retry(&ring->syncp,
2709 stats->tx_packets += packets;
2710 stats->tx_bytes += bytes;
2716 hwstats = &wx->stats;
2717 stats->rx_errors = hwstats->crcerrs + hwstats->rlec;
2718 stats->multicast = hwstats->qmprc;
2719 stats->rx_length_errors = hwstats->rlec;
2720 stats->rx_crc_errors = hwstats->crcerrs;
2722 EXPORT_SYMBOL(wx_get_stats64);
2724 int wx_set_features(struct net_device *netdev, netdev_features_t features)
2726 netdev_features_t changed = netdev->features ^ features;
2727 struct wx *wx = netdev_priv(netdev);
2729 if (changed & NETIF_F_RXHASH)
2730 wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
2731 WX_RDB_RA_CTL_RSS_EN);
2733 wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, 0);
2736 (NETIF_F_HW_VLAN_CTAG_RX |
2737 NETIF_F_HW_VLAN_STAG_RX))
2738 wx_set_rx_mode(netdev);
2742 EXPORT_SYMBOL(wx_set_features);
2744 MODULE_LICENSE("GPL");