1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/prefetch.h>
5 #include <linux/bpf_trace.h>
8 #include "i40e_trace.h"
9 #include "i40e_prototype.h"
10 #include "i40e_txrx_common.h"
13 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
15 * i40e_fdir - Generate a Flow Director descriptor based on fdata
16 * @tx_ring: Tx ring to send buffer on
17 * @fdata: Flow director filter data
18 * @add: Indicate if we are adding a rule or deleting one
21 static void i40e_fdir(struct i40e_ring *tx_ring,
22 struct i40e_fdir_filter *fdata, bool add)
24 struct i40e_filter_program_desc *fdir_desc;
25 struct i40e_pf *pf = tx_ring->vsi->back;
26 u32 flex_ptype, dtype_cmd;
29 /* grab the next descriptor */
30 i = tx_ring->next_to_use;
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
36 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
37 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
39 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
40 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
42 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
43 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
45 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
46 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
48 /* Use LAN VSI Id if not programmed by user */
49 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
50 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
51 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
53 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
56 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
57 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
58 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
59 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
61 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
62 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
64 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
65 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
67 if (fdata->cnt_index) {
68 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
69 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
70 ((u32)fdata->cnt_index <<
71 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
74 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
75 fdir_desc->rsvd = cpu_to_le32(0);
76 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
77 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
80 #define I40E_FD_CLEAN_DELAY 10
82 * i40e_program_fdir_filter - Program a Flow Director filter
83 * @fdir_data: Packet data that will be filter parameters
84 * @raw_packet: the pre-allocated packet buffer for FDir
86 * @add: True for add/update, False for remove
88 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
89 u8 *raw_packet, struct i40e_pf *pf,
92 struct i40e_tx_buffer *tx_buf, *first;
93 struct i40e_tx_desc *tx_desc;
94 struct i40e_ring *tx_ring;
101 /* find existing FDIR VSI */
102 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
106 tx_ring = vsi->tx_rings[0];
109 /* we need two descriptors to add/del a filter and we can wait */
110 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
113 msleep_interruptible(1);
116 dma = dma_map_single(dev, raw_packet,
117 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
118 if (dma_mapping_error(dev, dma))
121 /* grab the next descriptor */
122 i = tx_ring->next_to_use;
123 first = &tx_ring->tx_bi[i];
124 i40e_fdir(tx_ring, fdir_data, add);
126 /* Now program a dummy descriptor */
127 i = tx_ring->next_to_use;
128 tx_desc = I40E_TX_DESC(tx_ring, i);
129 tx_buf = &tx_ring->tx_bi[i];
131 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
133 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
135 /* record length, and DMA address */
136 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
137 dma_unmap_addr_set(tx_buf, dma, dma);
139 tx_desc->buffer_addr = cpu_to_le64(dma);
140 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
142 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
143 tx_buf->raw_buf = (void *)raw_packet;
145 tx_desc->cmd_type_offset_bsz =
146 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
148 /* Force memory writes to complete before letting h/w
149 * know there are new descriptors to fetch.
153 /* Mark the data descriptor to be watched */
154 first->next_to_watch = tx_desc;
156 writel(tx_ring->next_to_use, tx_ring->tail);
163 #define IP_HEADER_OFFSET 14
164 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
166 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
167 * @vsi: pointer to the targeted VSI
168 * @fd_data: the flow director data required for the FDir descriptor
169 * @add: true adds a filter, false removes it
171 * Returns 0 if the filters were successfully added or removed
173 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
174 struct i40e_fdir_filter *fd_data,
177 struct i40e_pf *pf = vsi->back;
182 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
183 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
184 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
186 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
189 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
191 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
192 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
193 + sizeof(struct iphdr));
195 ip->daddr = fd_data->dst_ip;
196 udp->dest = fd_data->dst_port;
197 ip->saddr = fd_data->src_ip;
198 udp->source = fd_data->src_port;
200 if (fd_data->flex_filter) {
201 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
202 __be16 pattern = fd_data->flex_word;
203 u16 off = fd_data->flex_offset;
205 *((__force __be16 *)(payload + off)) = pattern;
208 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
209 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
211 dev_info(&pf->pdev->dev,
212 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
213 fd_data->pctype, fd_data->fd_id, ret);
214 /* Free the packet buffer since it wasn't added to the ring */
217 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
219 dev_info(&pf->pdev->dev,
220 "Filter OK for PCTYPE %d loc = %d\n",
221 fd_data->pctype, fd_data->fd_id);
223 dev_info(&pf->pdev->dev,
224 "Filter deleted for PCTYPE %d loc = %d\n",
225 fd_data->pctype, fd_data->fd_id);
229 pf->fd_udp4_filter_cnt++;
231 pf->fd_udp4_filter_cnt--;
236 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
238 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
239 * @vsi: pointer to the targeted VSI
240 * @fd_data: the flow director data required for the FDir descriptor
241 * @add: true adds a filter, false removes it
243 * Returns 0 if the filters were successfully added or removed
245 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
246 struct i40e_fdir_filter *fd_data,
249 struct i40e_pf *pf = vsi->back;
255 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
256 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
258 0x0, 0x72, 0, 0, 0, 0};
260 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
263 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
265 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
266 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
267 + sizeof(struct iphdr));
269 ip->daddr = fd_data->dst_ip;
270 tcp->dest = fd_data->dst_port;
271 ip->saddr = fd_data->src_ip;
272 tcp->source = fd_data->src_port;
274 if (fd_data->flex_filter) {
275 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
276 __be16 pattern = fd_data->flex_word;
277 u16 off = fd_data->flex_offset;
279 *((__force __be16 *)(payload + off)) = pattern;
282 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
283 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
285 dev_info(&pf->pdev->dev,
286 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
287 fd_data->pctype, fd_data->fd_id, ret);
288 /* Free the packet buffer since it wasn't added to the ring */
291 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
293 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
294 fd_data->pctype, fd_data->fd_id);
296 dev_info(&pf->pdev->dev,
297 "Filter deleted for PCTYPE %d loc = %d\n",
298 fd_data->pctype, fd_data->fd_id);
302 pf->fd_tcp4_filter_cnt++;
303 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
304 I40E_DEBUG_FD & pf->hw.debug_mask)
305 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
306 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
308 pf->fd_tcp4_filter_cnt--;
314 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
316 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
317 * a specific flow spec
318 * @vsi: pointer to the targeted VSI
319 * @fd_data: the flow director data required for the FDir descriptor
320 * @add: true adds a filter, false removes it
322 * Returns 0 if the filters were successfully added or removed
324 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
325 struct i40e_fdir_filter *fd_data,
328 struct i40e_pf *pf = vsi->back;
329 struct sctphdr *sctp;
334 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
335 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
336 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
338 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
341 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
343 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
344 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
345 + sizeof(struct iphdr));
347 ip->daddr = fd_data->dst_ip;
348 sctp->dest = fd_data->dst_port;
349 ip->saddr = fd_data->src_ip;
350 sctp->source = fd_data->src_port;
352 if (fd_data->flex_filter) {
353 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
354 __be16 pattern = fd_data->flex_word;
355 u16 off = fd_data->flex_offset;
357 *((__force __be16 *)(payload + off)) = pattern;
360 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
361 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
363 dev_info(&pf->pdev->dev,
364 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
365 fd_data->pctype, fd_data->fd_id, ret);
366 /* Free the packet buffer since it wasn't added to the ring */
369 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
371 dev_info(&pf->pdev->dev,
372 "Filter OK for PCTYPE %d loc = %d\n",
373 fd_data->pctype, fd_data->fd_id);
375 dev_info(&pf->pdev->dev,
376 "Filter deleted for PCTYPE %d loc = %d\n",
377 fd_data->pctype, fd_data->fd_id);
381 pf->fd_sctp4_filter_cnt++;
383 pf->fd_sctp4_filter_cnt--;
388 #define I40E_IP_DUMMY_PACKET_LEN 34
390 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
391 * a specific flow spec
392 * @vsi: pointer to the targeted VSI
393 * @fd_data: the flow director data required for the FDir descriptor
394 * @add: true adds a filter, false removes it
396 * Returns 0 if the filters were successfully added or removed
398 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
399 struct i40e_fdir_filter *fd_data,
402 struct i40e_pf *pf = vsi->back;
407 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
408 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
411 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
412 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
413 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
416 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
417 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
419 ip->saddr = fd_data->src_ip;
420 ip->daddr = fd_data->dst_ip;
423 if (fd_data->flex_filter) {
424 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
425 __be16 pattern = fd_data->flex_word;
426 u16 off = fd_data->flex_offset;
428 *((__force __be16 *)(payload + off)) = pattern;
432 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
434 dev_info(&pf->pdev->dev,
435 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
436 fd_data->pctype, fd_data->fd_id, ret);
437 /* The packet buffer wasn't added to the ring so we
438 * need to free it now.
442 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
444 dev_info(&pf->pdev->dev,
445 "Filter OK for PCTYPE %d loc = %d\n",
446 fd_data->pctype, fd_data->fd_id);
448 dev_info(&pf->pdev->dev,
449 "Filter deleted for PCTYPE %d loc = %d\n",
450 fd_data->pctype, fd_data->fd_id);
455 pf->fd_ip4_filter_cnt++;
457 pf->fd_ip4_filter_cnt--;
463 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
464 * @vsi: pointer to the targeted VSI
465 * @input: filter to add or delete
466 * @add: true adds a filter, false removes it
469 int i40e_add_del_fdir(struct i40e_vsi *vsi,
470 struct i40e_fdir_filter *input, bool add)
472 struct i40e_pf *pf = vsi->back;
475 switch (input->flow_type & ~FLOW_EXT) {
477 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
480 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
483 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
486 switch (input->ip4_proto) {
488 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
491 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
494 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
497 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
500 /* We cannot support masking based on protocol */
501 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
507 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
512 /* The buffer allocated here will be normally be freed by
513 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
514 * completion. In the event of an error adding the buffer to the FDIR
515 * ring, it will immediately be freed. It may also be freed by
516 * i40e_clean_tx_ring() when closing the VSI.
522 * i40e_fd_handle_status - check the Programming Status for FD
523 * @rx_ring: the Rx ring for this descriptor
524 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
525 * @prog_id: the id originally used for programming
527 * This is used to verify if the FD programming or invalidation
528 * requested by SW to the HW is successful or not and take actions accordingly.
530 void i40e_fd_handle_status(struct i40e_ring *rx_ring,
531 union i40e_rx_desc *rx_desc, u8 prog_id)
533 struct i40e_pf *pf = rx_ring->vsi->back;
534 struct pci_dev *pdev = pf->pdev;
535 u32 fcnt_prog, fcnt_avail;
539 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
540 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
541 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
543 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
544 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
545 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
546 (I40E_DEBUG_FD & pf->hw.debug_mask))
547 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
550 /* Check if the programming error is for ATR.
551 * If so, auto disable ATR and set a state for
552 * flush in progress. Next time we come here if flush is in
553 * progress do nothing, once flush is complete the state will
556 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
560 /* store the current atr filter count */
561 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
563 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
564 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
565 /* These set_bit() calls aren't atomic with the
566 * test_bit() here, but worse case we potentially
567 * disable ATR and queue a flush right after SB
568 * support is re-enabled. That shouldn't cause an
571 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
572 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
575 /* filter programming failed most likely due to table full */
576 fcnt_prog = i40e_get_global_fd_count(pf);
577 fcnt_avail = pf->fdir_pf_filter_count;
578 /* If ATR is running fcnt_prog can quickly change,
579 * if we are very close to full, it makes sense to disable
580 * FD ATR/SB and then re-enable it when there is room.
582 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
583 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
584 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
586 if (I40E_DEBUG_FD & pf->hw.debug_mask)
587 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
589 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
590 if (I40E_DEBUG_FD & pf->hw.debug_mask)
591 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
592 rx_desc->wb.qword0.hi_dword.fd_id);
597 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
598 * @ring: the ring that owns the buffer
599 * @tx_buffer: the buffer to free
601 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
602 struct i40e_tx_buffer *tx_buffer)
604 if (tx_buffer->skb) {
605 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
606 kfree(tx_buffer->raw_buf);
607 else if (ring_is_xdp(ring))
608 xdp_return_frame(tx_buffer->xdpf);
610 dev_kfree_skb_any(tx_buffer->skb);
611 if (dma_unmap_len(tx_buffer, len))
612 dma_unmap_single(ring->dev,
613 dma_unmap_addr(tx_buffer, dma),
614 dma_unmap_len(tx_buffer, len),
616 } else if (dma_unmap_len(tx_buffer, len)) {
617 dma_unmap_page(ring->dev,
618 dma_unmap_addr(tx_buffer, dma),
619 dma_unmap_len(tx_buffer, len),
623 tx_buffer->next_to_watch = NULL;
624 tx_buffer->skb = NULL;
625 dma_unmap_len_set(tx_buffer, len, 0);
626 /* tx_buffer must be completely set up in the transmit path */
630 * i40e_clean_tx_ring - Free any empty Tx buffers
631 * @tx_ring: ring to be cleaned
633 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
635 unsigned long bi_size;
638 if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
639 i40e_xsk_clean_tx_ring(tx_ring);
641 /* ring already cleared, nothing to do */
645 /* Free all the Tx ring sk_buffs */
646 for (i = 0; i < tx_ring->count; i++)
647 i40e_unmap_and_free_tx_resource(tx_ring,
651 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
652 memset(tx_ring->tx_bi, 0, bi_size);
654 /* Zero out the descriptor ring */
655 memset(tx_ring->desc, 0, tx_ring->size);
657 tx_ring->next_to_use = 0;
658 tx_ring->next_to_clean = 0;
660 if (!tx_ring->netdev)
663 /* cleanup Tx queue statistics */
664 netdev_tx_reset_queue(txring_txq(tx_ring));
668 * i40e_free_tx_resources - Free Tx resources per queue
669 * @tx_ring: Tx descriptor ring for a specific queue
671 * Free all transmit software resources
673 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
675 i40e_clean_tx_ring(tx_ring);
676 kfree(tx_ring->tx_bi);
677 tx_ring->tx_bi = NULL;
680 dma_free_coherent(tx_ring->dev, tx_ring->size,
681 tx_ring->desc, tx_ring->dma);
682 tx_ring->desc = NULL;
687 * i40e_get_tx_pending - how many tx descriptors not processed
688 * @ring: the ring of descriptors
689 * @in_sw: use SW variables
691 * Since there is no access to the ring head register
692 * in XL710, we need to use our local copies
694 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
699 head = i40e_get_head(ring);
700 tail = readl(ring->tail);
702 head = ring->next_to_clean;
703 tail = ring->next_to_use;
707 return (head < tail) ?
708 tail - head : (tail + ring->count - head);
714 * i40e_detect_recover_hung - Function to detect and recover hung_queues
715 * @vsi: pointer to vsi struct with tx queues
717 * VSI has netdev and netdev has TX queues. This function is to check each of
718 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
720 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
722 struct i40e_ring *tx_ring = NULL;
723 struct net_device *netdev;
730 if (test_bit(__I40E_VSI_DOWN, vsi->state))
733 netdev = vsi->netdev;
737 if (!netif_carrier_ok(netdev))
740 for (i = 0; i < vsi->num_queue_pairs; i++) {
741 tx_ring = vsi->tx_rings[i];
742 if (tx_ring && tx_ring->desc) {
743 /* If packet counter has not changed the queue is
744 * likely stalled, so force an interrupt for this
747 * prev_pkt_ctr would be negative if there was no
750 packets = tx_ring->stats.packets & INT_MAX;
751 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
752 i40e_force_wb(vsi, tx_ring->q_vector);
756 /* Memory barrier between read of packet count and call
757 * to i40e_get_tx_pending()
760 tx_ring->tx_stats.prev_pkt_ctr =
761 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
767 * i40e_clean_tx_irq - Reclaim resources after transmit completes
768 * @vsi: the VSI we care about
769 * @tx_ring: Tx ring to clean
770 * @napi_budget: Used to determine if we are in netpoll
772 * Returns true if there's any budget left (e.g. the clean is finished)
774 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
775 struct i40e_ring *tx_ring, int napi_budget)
777 u16 i = tx_ring->next_to_clean;
778 struct i40e_tx_buffer *tx_buf;
779 struct i40e_tx_desc *tx_head;
780 struct i40e_tx_desc *tx_desc;
781 unsigned int total_bytes = 0, total_packets = 0;
782 unsigned int budget = vsi->work_limit;
784 tx_buf = &tx_ring->tx_bi[i];
785 tx_desc = I40E_TX_DESC(tx_ring, i);
788 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
791 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
793 /* if next_to_watch is not set then there is no work pending */
797 /* prevent any other reads prior to eop_desc */
800 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
801 /* we have caught up to head, no work left to do */
802 if (tx_head == tx_desc)
805 /* clear next_to_watch to prevent false hangs */
806 tx_buf->next_to_watch = NULL;
808 /* update the statistics for this packet */
809 total_bytes += tx_buf->bytecount;
810 total_packets += tx_buf->gso_segs;
812 /* free the skb/XDP data */
813 if (ring_is_xdp(tx_ring))
814 xdp_return_frame(tx_buf->xdpf);
816 napi_consume_skb(tx_buf->skb, napi_budget);
818 /* unmap skb header data */
819 dma_unmap_single(tx_ring->dev,
820 dma_unmap_addr(tx_buf, dma),
821 dma_unmap_len(tx_buf, len),
824 /* clear tx_buffer data */
826 dma_unmap_len_set(tx_buf, len, 0);
828 /* unmap remaining buffers */
829 while (tx_desc != eop_desc) {
830 i40e_trace(clean_tx_irq_unmap,
831 tx_ring, tx_desc, tx_buf);
838 tx_buf = tx_ring->tx_bi;
839 tx_desc = I40E_TX_DESC(tx_ring, 0);
842 /* unmap any remaining paged data */
843 if (dma_unmap_len(tx_buf, len)) {
844 dma_unmap_page(tx_ring->dev,
845 dma_unmap_addr(tx_buf, dma),
846 dma_unmap_len(tx_buf, len),
848 dma_unmap_len_set(tx_buf, len, 0);
852 /* move us one more past the eop_desc for start of next pkt */
858 tx_buf = tx_ring->tx_bi;
859 tx_desc = I40E_TX_DESC(tx_ring, 0);
864 /* update budget accounting */
866 } while (likely(budget));
869 tx_ring->next_to_clean = i;
870 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
871 i40e_arm_wb(tx_ring, vsi, budget);
873 if (ring_is_xdp(tx_ring))
876 /* notify netdev of completed buffers */
877 netdev_tx_completed_queue(txring_txq(tx_ring),
878 total_packets, total_bytes);
880 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
881 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
882 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
883 /* Make sure that anybody stopping the queue after this
884 * sees the new next_to_clean.
887 if (__netif_subqueue_stopped(tx_ring->netdev,
888 tx_ring->queue_index) &&
889 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
890 netif_wake_subqueue(tx_ring->netdev,
891 tx_ring->queue_index);
892 ++tx_ring->tx_stats.restart_queue;
900 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
901 * @vsi: the VSI we care about
902 * @q_vector: the vector on which to enable writeback
905 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
906 struct i40e_q_vector *q_vector)
908 u16 flags = q_vector->tx.ring[0].flags;
911 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
914 if (q_vector->arm_wb_state)
917 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
918 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
919 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
922 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
925 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
926 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
928 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
930 q_vector->arm_wb_state = true;
934 * i40e_force_wb - Issue SW Interrupt so HW does a wb
935 * @vsi: the VSI we care about
936 * @q_vector: the vector on which to force writeback
939 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
941 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
942 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
943 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
944 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
945 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
946 /* allow 00 to be written to the index */
949 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
951 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
952 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
953 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
954 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
955 /* allow 00 to be written to the index */
957 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
961 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
962 struct i40e_ring_container *rc)
964 return &q_vector->rx == rc;
967 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
969 unsigned int divisor;
971 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
972 case I40E_LINK_SPEED_40GB:
973 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
975 case I40E_LINK_SPEED_25GB:
976 case I40E_LINK_SPEED_20GB:
977 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
980 case I40E_LINK_SPEED_10GB:
981 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
983 case I40E_LINK_SPEED_1GB:
984 case I40E_LINK_SPEED_100MB:
985 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
993 * i40e_update_itr - update the dynamic ITR value based on statistics
994 * @q_vector: structure containing interrupt and ring information
995 * @rc: structure containing ring performance data
997 * Stores a new ITR value based on packets and byte
998 * counts during the last interrupt. The advantage of per interrupt
999 * computation is faster updates and more accurate ITR for the current
1000 * traffic pattern. Constants in this function were computed
1001 * based on theoretical maximum wire speed and thresholds were set based
1002 * on testing data as well as attempting to minimize response time
1003 * while increasing bulk throughput.
1005 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1006 struct i40e_ring_container *rc)
1008 unsigned int avg_wire_size, packets, bytes, itr;
1009 unsigned long next_update = jiffies;
1011 /* If we don't have any rings just leave ourselves set for maximum
1012 * possible latency so we take ourselves out of the equation.
1014 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1017 /* For Rx we want to push the delay up and default to low latency.
1018 * for Tx we want to pull the delay down and default to high latency.
1020 itr = i40e_container_is_rx(q_vector, rc) ?
1021 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1022 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1024 /* If we didn't update within up to 1 - 2 jiffies we can assume
1025 * that either packets are coming in so slow there hasn't been
1026 * any work, or that there is so much work that NAPI is dealing
1027 * with interrupt moderation and we don't need to do anything.
1029 if (time_after(next_update, rc->next_update))
1032 /* If itr_countdown is set it means we programmed an ITR within
1033 * the last 4 interrupt cycles. This has a side effect of us
1034 * potentially firing an early interrupt. In order to work around
1035 * this we need to throw out any data received for a few
1036 * interrupts following the update.
1038 if (q_vector->itr_countdown) {
1039 itr = rc->target_itr;
1043 packets = rc->total_packets;
1044 bytes = rc->total_bytes;
1046 if (i40e_container_is_rx(q_vector, rc)) {
1047 /* If Rx there are 1 to 4 packets and bytes are less than
1048 * 9000 assume insufficient data to use bulk rate limiting
1049 * approach unless Tx is already in bulk rate limiting. We
1050 * are likely latency driven.
1052 if (packets && packets < 4 && bytes < 9000 &&
1053 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1054 itr = I40E_ITR_ADAPTIVE_LATENCY;
1055 goto adjust_by_size;
1057 } else if (packets < 4) {
1058 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1059 * bulk mode and we are receiving 4 or fewer packets just
1060 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1061 * that the Rx can relax.
1063 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1064 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1065 I40E_ITR_ADAPTIVE_MAX_USECS)
1067 } else if (packets > 32) {
1068 /* If we have processed over 32 packets in a single interrupt
1069 * for Tx assume we need to switch over to "bulk" mode.
1071 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1074 /* We have no packets to actually measure against. This means
1075 * either one of the other queues on this vector is active or
1076 * we are a Tx queue doing TSO with too high of an interrupt rate.
1078 * Between 4 and 56 we can assume that our current interrupt delay
1079 * is only slightly too low. As such we should increase it by a small
1083 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1084 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1085 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1086 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1091 if (packets <= 256) {
1092 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1093 itr &= I40E_ITR_MASK;
1095 /* Between 56 and 112 is our "goldilocks" zone where we are
1096 * working out "just right". Just report that our current
1097 * ITR is good for us.
1102 /* If packet count is 128 or greater we are likely looking
1103 * at a slight overrun of the delay we want. Try halving
1104 * our delay to see if that will cut the number of packets
1105 * in half per interrupt.
1108 itr &= I40E_ITR_MASK;
1109 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1110 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1115 /* The paths below assume we are dealing with a bulk ITR since
1116 * number of packets is greater than 256. We are just going to have
1117 * to compute a value and try to bring the count under control,
1118 * though for smaller packet sizes there isn't much we can do as
1119 * NAPI polling will likely be kicking in sooner rather than later.
1121 itr = I40E_ITR_ADAPTIVE_BULK;
1124 /* If packet counts are 256 or greater we can assume we have a gross
1125 * overestimation of what the rate should be. Instead of trying to fine
1126 * tune it just use the formula below to try and dial in an exact value
1127 * give the current packet size of the frame.
1129 avg_wire_size = bytes / packets;
1131 /* The following is a crude approximation of:
1132 * wmem_default / (size + overhead) = desired_pkts_per_int
1133 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1134 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1136 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1137 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1140 * (170 * (size + 24)) / (size + 640) = ITR
1142 * We first do some math on the packet size and then finally bitshift
1143 * by 8 after rounding up. We also have to account for PCIe link speed
1144 * difference as ITR scales based on this.
1146 if (avg_wire_size <= 60) {
1147 /* Start at 250k ints/sec */
1148 avg_wire_size = 4096;
1149 } else if (avg_wire_size <= 380) {
1150 /* 250K ints/sec to 60K ints/sec */
1151 avg_wire_size *= 40;
1152 avg_wire_size += 1696;
1153 } else if (avg_wire_size <= 1084) {
1154 /* 60K ints/sec to 36K ints/sec */
1155 avg_wire_size *= 15;
1156 avg_wire_size += 11452;
1157 } else if (avg_wire_size <= 1980) {
1158 /* 36K ints/sec to 30K ints/sec */
1160 avg_wire_size += 22420;
1162 /* plateau at a limit of 30K ints/sec */
1163 avg_wire_size = 32256;
1166 /* If we are in low latency mode halve our delay which doubles the
1167 * rate to somewhere between 100K to 16K ints/sec
1169 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1172 /* Resultant value is 256 times larger than it needs to be. This
1173 * gives us room to adjust the value as needed to either increase
1174 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1176 * Use addition as we have already recorded the new latency flag
1177 * for the ITR value.
1179 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1180 I40E_ITR_ADAPTIVE_MIN_INC;
1182 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1183 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1184 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1188 /* write back value */
1189 rc->target_itr = itr;
1191 /* next update should occur within next jiffy */
1192 rc->next_update = next_update + 1;
1194 rc->total_bytes = 0;
1195 rc->total_packets = 0;
1199 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1200 * @rx_ring: rx descriptor ring to store buffers on
1201 * @old_buff: donor buffer to have page reused
1203 * Synchronizes page for reuse by the adapter
1205 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1206 struct i40e_rx_buffer *old_buff)
1208 struct i40e_rx_buffer *new_buff;
1209 u16 nta = rx_ring->next_to_alloc;
1211 new_buff = &rx_ring->rx_bi[nta];
1213 /* update, and store next to alloc */
1215 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1217 /* transfer page from old buffer to new buffer */
1218 new_buff->dma = old_buff->dma;
1219 new_buff->page = old_buff->page;
1220 new_buff->page_offset = old_buff->page_offset;
1221 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1223 rx_ring->rx_stats.page_reuse_count++;
1225 /* clear contents of buffer_info */
1226 old_buff->page = NULL;
1230 * i40e_rx_is_programming_status - check for programming status descriptor
1231 * @qw: qword representing status_error_len in CPU ordering
1233 * The value of in the descriptor length field indicate if this
1234 * is a programming status descriptor for flow director or FCoE
1235 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1236 * it is a packet descriptor.
1238 static inline bool i40e_rx_is_programming_status(u64 qw)
1240 /* The Rx filter programming status and SPH bit occupy the same
1241 * spot in the descriptor. Since we don't support packet split we
1242 * can just reuse the bit as an indication that this is a
1243 * programming status descriptor.
1245 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1249 * i40e_clean_programming_status - try clean the programming status descriptor
1250 * @rx_ring: the rx ring that has this descriptor
1251 * @rx_desc: the rx descriptor written back by HW
1252 * @qw: qword representing status_error_len in CPU ordering
1254 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1255 * status being successful or not and take actions accordingly. FCoE should
1256 * handle its context/filter programming/invalidation status and take actions.
1258 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1260 struct i40e_rx_buffer *i40e_clean_programming_status(
1261 struct i40e_ring *rx_ring,
1262 union i40e_rx_desc *rx_desc,
1265 struct i40e_rx_buffer *rx_buffer;
1269 if (!i40e_rx_is_programming_status(qw))
1272 ntc = rx_ring->next_to_clean;
1274 /* fetch, update, and store next to clean */
1275 rx_buffer = &rx_ring->rx_bi[ntc++];
1276 ntc = (ntc < rx_ring->count) ? ntc : 0;
1277 rx_ring->next_to_clean = ntc;
1279 prefetch(I40E_RX_DESC(rx_ring, ntc));
1281 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1282 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1284 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1285 i40e_fd_handle_status(rx_ring, rx_desc, id);
1291 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1292 * @tx_ring: the tx ring to set up
1294 * Return 0 on success, negative on error
1296 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1298 struct device *dev = tx_ring->dev;
1304 /* warn if we are about to overwrite the pointer */
1305 WARN_ON(tx_ring->tx_bi);
1306 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1307 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1308 if (!tx_ring->tx_bi)
1311 u64_stats_init(&tx_ring->syncp);
1313 /* round up to nearest 4K */
1314 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1315 /* add u32 for head writeback, align after this takes care of
1316 * guaranteeing this is at least one cache line in size
1318 tx_ring->size += sizeof(u32);
1319 tx_ring->size = ALIGN(tx_ring->size, 4096);
1320 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1321 &tx_ring->dma, GFP_KERNEL);
1322 if (!tx_ring->desc) {
1323 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1328 tx_ring->next_to_use = 0;
1329 tx_ring->next_to_clean = 0;
1330 tx_ring->tx_stats.prev_pkt_ctr = -1;
1334 kfree(tx_ring->tx_bi);
1335 tx_ring->tx_bi = NULL;
1340 * i40e_clean_rx_ring - Free Rx buffers
1341 * @rx_ring: ring to be cleaned
1343 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1345 unsigned long bi_size;
1348 /* ring already cleared, nothing to do */
1349 if (!rx_ring->rx_bi)
1353 dev_kfree_skb(rx_ring->skb);
1354 rx_ring->skb = NULL;
1357 if (rx_ring->xsk_umem) {
1358 i40e_xsk_clean_rx_ring(rx_ring);
1362 /* Free all the Rx ring sk_buffs */
1363 for (i = 0; i < rx_ring->count; i++) {
1364 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1369 /* Invalidate cache lines that may have been written to by
1370 * device so that we avoid corrupting memory.
1372 dma_sync_single_range_for_cpu(rx_ring->dev,
1375 rx_ring->rx_buf_len,
1378 /* free resources associated with mapping */
1379 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1380 i40e_rx_pg_size(rx_ring),
1384 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1387 rx_bi->page_offset = 0;
1391 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1392 memset(rx_ring->rx_bi, 0, bi_size);
1394 /* Zero out the descriptor ring */
1395 memset(rx_ring->desc, 0, rx_ring->size);
1397 rx_ring->next_to_alloc = 0;
1398 rx_ring->next_to_clean = 0;
1399 rx_ring->next_to_use = 0;
1403 * i40e_free_rx_resources - Free Rx resources
1404 * @rx_ring: ring to clean the resources from
1406 * Free all receive software resources
1408 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1410 i40e_clean_rx_ring(rx_ring);
1411 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1412 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1413 rx_ring->xdp_prog = NULL;
1414 kfree(rx_ring->rx_bi);
1415 rx_ring->rx_bi = NULL;
1417 if (rx_ring->desc) {
1418 dma_free_coherent(rx_ring->dev, rx_ring->size,
1419 rx_ring->desc, rx_ring->dma);
1420 rx_ring->desc = NULL;
1425 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1426 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1428 * Returns 0 on success, negative on failure
1430 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1432 struct device *dev = rx_ring->dev;
1436 /* warn if we are about to overwrite the pointer */
1437 WARN_ON(rx_ring->rx_bi);
1438 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1439 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1440 if (!rx_ring->rx_bi)
1443 u64_stats_init(&rx_ring->syncp);
1445 /* Round up to nearest 4K */
1446 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1447 rx_ring->size = ALIGN(rx_ring->size, 4096);
1448 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1449 &rx_ring->dma, GFP_KERNEL);
1451 if (!rx_ring->desc) {
1452 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1457 rx_ring->next_to_alloc = 0;
1458 rx_ring->next_to_clean = 0;
1459 rx_ring->next_to_use = 0;
1461 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1462 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1463 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1464 rx_ring->queue_index);
1469 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1473 kfree(rx_ring->rx_bi);
1474 rx_ring->rx_bi = NULL;
1479 * i40e_release_rx_desc - Store the new tail and head values
1480 * @rx_ring: ring to bump
1481 * @val: new head index
1483 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1485 rx_ring->next_to_use = val;
1487 /* update next to alloc since we have filled the ring */
1488 rx_ring->next_to_alloc = val;
1490 /* Force memory writes to complete before letting h/w
1491 * know there are new descriptors to fetch. (Only
1492 * applicable for weak-ordered memory model archs,
1496 writel(val, rx_ring->tail);
1500 * i40e_rx_offset - Return expected offset into page to access data
1501 * @rx_ring: Ring we are requesting offset of
1503 * Returns the offset value for ring into the data buffer.
1505 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1507 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1511 * i40e_alloc_mapped_page - recycle or make a new page
1512 * @rx_ring: ring to use
1513 * @bi: rx_buffer struct to modify
1515 * Returns true if the page was successfully allocated or
1518 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1519 struct i40e_rx_buffer *bi)
1521 struct page *page = bi->page;
1524 /* since we are recycling buffers we should seldom need to alloc */
1526 rx_ring->rx_stats.page_reuse_count++;
1530 /* alloc new page for storage */
1531 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1532 if (unlikely(!page)) {
1533 rx_ring->rx_stats.alloc_page_failed++;
1537 /* map page for use */
1538 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1539 i40e_rx_pg_size(rx_ring),
1543 /* if mapping failed free memory back to system since
1544 * there isn't much point in holding memory we can't use
1546 if (dma_mapping_error(rx_ring->dev, dma)) {
1547 __free_pages(page, i40e_rx_pg_order(rx_ring));
1548 rx_ring->rx_stats.alloc_page_failed++;
1554 bi->page_offset = i40e_rx_offset(rx_ring);
1555 page_ref_add(page, USHRT_MAX - 1);
1556 bi->pagecnt_bias = USHRT_MAX;
1562 * i40e_receive_skb - Send a completed packet up the stack
1563 * @rx_ring: rx ring in play
1564 * @skb: packet to send up
1565 * @vlan_tag: vlan tag for packet
1567 void i40e_receive_skb(struct i40e_ring *rx_ring,
1568 struct sk_buff *skb, u16 vlan_tag)
1570 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1572 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1573 (vlan_tag & VLAN_VID_MASK))
1574 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1576 napi_gro_receive(&q_vector->napi, skb);
1580 * i40e_alloc_rx_buffers - Replace used receive buffers
1581 * @rx_ring: ring to place buffers on
1582 * @cleaned_count: number of buffers to replace
1584 * Returns false if all allocations were successful, true if any fail
1586 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1588 u16 ntu = rx_ring->next_to_use;
1589 union i40e_rx_desc *rx_desc;
1590 struct i40e_rx_buffer *bi;
1592 /* do nothing if no valid netdev defined */
1593 if (!rx_ring->netdev || !cleaned_count)
1596 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1597 bi = &rx_ring->rx_bi[ntu];
1600 if (!i40e_alloc_mapped_page(rx_ring, bi))
1603 /* sync the buffer for use by the device */
1604 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1606 rx_ring->rx_buf_len,
1609 /* Refresh the desc even if buffer_addrs didn't change
1610 * because each write-back erases this info.
1612 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1617 if (unlikely(ntu == rx_ring->count)) {
1618 rx_desc = I40E_RX_DESC(rx_ring, 0);
1619 bi = rx_ring->rx_bi;
1623 /* clear the status bits for the next_to_use descriptor */
1624 rx_desc->wb.qword1.status_error_len = 0;
1627 } while (cleaned_count);
1629 if (rx_ring->next_to_use != ntu)
1630 i40e_release_rx_desc(rx_ring, ntu);
1635 if (rx_ring->next_to_use != ntu)
1636 i40e_release_rx_desc(rx_ring, ntu);
1638 /* make sure to come back via polling to try again after
1639 * allocation failure
1645 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1646 * @vsi: the VSI we care about
1647 * @skb: skb currently being received and modified
1648 * @rx_desc: the receive descriptor
1650 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1651 struct sk_buff *skb,
1652 union i40e_rx_desc *rx_desc)
1654 struct i40e_rx_ptype_decoded decoded;
1655 u32 rx_error, rx_status;
1660 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1661 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1662 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1663 I40E_RXD_QW1_ERROR_SHIFT;
1664 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1665 I40E_RXD_QW1_STATUS_SHIFT;
1666 decoded = decode_rx_desc_ptype(ptype);
1668 skb->ip_summed = CHECKSUM_NONE;
1670 skb_checksum_none_assert(skb);
1672 /* Rx csum enabled and ip headers found? */
1673 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1676 /* did the hardware decode the packet and checksum? */
1677 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1680 /* both known and outer_ip must be set for the below code to work */
1681 if (!(decoded.known && decoded.outer_ip))
1684 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1685 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1686 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1687 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1690 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1691 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1694 /* likely incorrect csum if alternate IP extension headers found */
1696 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1697 /* don't increment checksum err here, non-fatal err */
1700 /* there was some L4 error, count error and punt packet to the stack */
1701 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1704 /* handle packets that were not able to be checksummed due
1705 * to arrival speed, in this case the stack can compute
1708 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1711 /* If there is an outer header present that might contain a checksum
1712 * we need to bump the checksum level by 1 to reflect the fact that
1713 * we are indicating we validated the inner checksum.
1715 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1716 skb->csum_level = 1;
1718 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1719 switch (decoded.inner_prot) {
1720 case I40E_RX_PTYPE_INNER_PROT_TCP:
1721 case I40E_RX_PTYPE_INNER_PROT_UDP:
1722 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1723 skb->ip_summed = CHECKSUM_UNNECESSARY;
1732 vsi->back->hw_csum_rx_error++;
1736 * i40e_ptype_to_htype - get a hash type
1737 * @ptype: the ptype value from the descriptor
1739 * Returns a hash type to be used by skb_set_hash
1741 static inline int i40e_ptype_to_htype(u8 ptype)
1743 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1746 return PKT_HASH_TYPE_NONE;
1748 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1749 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1750 return PKT_HASH_TYPE_L4;
1751 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1752 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1753 return PKT_HASH_TYPE_L3;
1755 return PKT_HASH_TYPE_L2;
1759 * i40e_rx_hash - set the hash value in the skb
1760 * @ring: descriptor ring
1761 * @rx_desc: specific descriptor
1762 * @skb: skb currently being received and modified
1763 * @rx_ptype: Rx packet type
1765 static inline void i40e_rx_hash(struct i40e_ring *ring,
1766 union i40e_rx_desc *rx_desc,
1767 struct sk_buff *skb,
1771 const __le64 rss_mask =
1772 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1773 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1775 if (!(ring->netdev->features & NETIF_F_RXHASH))
1778 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1779 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1780 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1785 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1786 * @rx_ring: rx descriptor ring packet is being transacted on
1787 * @rx_desc: pointer to the EOP Rx descriptor
1788 * @skb: pointer to current skb being populated
1789 * @rx_ptype: the packet type decoded by hardware
1791 * This function checks the ring, descriptor, and packet information in
1792 * order to populate the hash, checksum, VLAN, protocol, and
1793 * other fields within the skb.
1795 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1796 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1799 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1800 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1801 I40E_RXD_QW1_STATUS_SHIFT;
1802 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1803 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1804 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1806 if (unlikely(tsynvalid))
1807 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1809 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1811 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1813 skb_record_rx_queue(skb, rx_ring->queue_index);
1815 /* modifies the skb - consumes the enet header */
1816 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1820 * i40e_cleanup_headers - Correct empty headers
1821 * @rx_ring: rx descriptor ring packet is being transacted on
1822 * @skb: pointer to current skb being fixed
1823 * @rx_desc: pointer to the EOP Rx descriptor
1825 * Also address the case where we are pulling data in on pages only
1826 * and as such no data is present in the skb header.
1828 * In addition if skb is not at least 60 bytes we need to pad it so that
1829 * it is large enough to qualify as a valid Ethernet frame.
1831 * Returns true if an error was encountered and skb was freed.
1833 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1834 union i40e_rx_desc *rx_desc)
1837 /* XDP packets use error pointer so abort at this point */
1841 /* ERR_MASK will only have valid bits if EOP set, and
1842 * what we are doing here is actually checking
1843 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1846 if (unlikely(i40e_test_staterr(rx_desc,
1847 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1848 dev_kfree_skb_any(skb);
1852 /* if eth_skb_pad returns an error the skb was freed */
1853 if (eth_skb_pad(skb))
1860 * i40e_page_is_reusable - check if any reuse is possible
1861 * @page: page struct to check
1863 * A page is not reusable if it was allocated under low memory
1864 * conditions, or it's not in the same NUMA node as this CPU.
1866 static inline bool i40e_page_is_reusable(struct page *page)
1868 return (page_to_nid(page) == numa_mem_id()) &&
1869 !page_is_pfmemalloc(page);
1873 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1874 * the adapter for another receive
1876 * @rx_buffer: buffer containing the page
1878 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1879 * an unused region in the page.
1881 * For small pages, @truesize will be a constant value, half the size
1882 * of the memory at page. We'll attempt to alternate between high and
1883 * low halves of the page, with one half ready for use by the hardware
1884 * and the other half being consumed by the stack. We use the page
1885 * ref count to determine whether the stack has finished consuming the
1886 * portion of this page that was passed up with a previous packet. If
1887 * the page ref count is >1, we'll assume the "other" half page is
1888 * still busy, and this page cannot be reused.
1890 * For larger pages, @truesize will be the actual space used by the
1891 * received packet (adjusted upward to an even multiple of the cache
1892 * line size). This will advance through the page by the amount
1893 * actually consumed by the received packets while there is still
1894 * space for a buffer. Each region of larger pages will be used at
1895 * most once, after which the page will not be reused.
1897 * In either case, if the page is reusable its refcount is increased.
1899 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1901 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1902 struct page *page = rx_buffer->page;
1904 /* Is any reuse possible? */
1905 if (unlikely(!i40e_page_is_reusable(page)))
1908 #if (PAGE_SIZE < 8192)
1909 /* if we are only owner of page we can reuse it */
1910 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1913 #define I40E_LAST_OFFSET \
1914 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1915 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1919 /* If we have drained the page fragment pool we need to update
1920 * the pagecnt_bias and page count so that we fully restock the
1921 * number of references the driver holds.
1923 if (unlikely(pagecnt_bias == 1)) {
1924 page_ref_add(page, USHRT_MAX - 1);
1925 rx_buffer->pagecnt_bias = USHRT_MAX;
1932 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1933 * @rx_ring: rx descriptor ring to transact packets on
1934 * @rx_buffer: buffer containing page to add
1935 * @skb: sk_buff to place the data into
1936 * @size: packet length from rx_desc
1938 * This function will add the data contained in rx_buffer->page to the skb.
1939 * It will just attach the page as a frag to the skb.
1941 * The function will then update the page offset.
1943 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1944 struct i40e_rx_buffer *rx_buffer,
1945 struct sk_buff *skb,
1948 #if (PAGE_SIZE < 8192)
1949 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1951 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1954 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1955 rx_buffer->page_offset, size, truesize);
1957 /* page is being used so we must update the page offset */
1958 #if (PAGE_SIZE < 8192)
1959 rx_buffer->page_offset ^= truesize;
1961 rx_buffer->page_offset += truesize;
1966 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1967 * @rx_ring: rx descriptor ring to transact packets on
1968 * @size: size of buffer to add to skb
1970 * This function will pull an Rx buffer from the ring and synchronize it
1971 * for use by the CPU.
1973 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1974 const unsigned int size)
1976 struct i40e_rx_buffer *rx_buffer;
1978 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1979 prefetchw(rx_buffer->page);
1981 /* we are reusing so sync this buffer for CPU use */
1982 dma_sync_single_range_for_cpu(rx_ring->dev,
1984 rx_buffer->page_offset,
1988 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1989 rx_buffer->pagecnt_bias--;
1995 * i40e_construct_skb - Allocate skb and populate it
1996 * @rx_ring: rx descriptor ring to transact packets on
1997 * @rx_buffer: rx buffer to pull data from
1998 * @xdp: xdp_buff pointing to the data
2000 * This function allocates an skb. It then populates it with the page
2001 * data from the current receive descriptor, taking care to set up the
2004 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2005 struct i40e_rx_buffer *rx_buffer,
2006 struct xdp_buff *xdp)
2008 unsigned int size = xdp->data_end - xdp->data;
2009 #if (PAGE_SIZE < 8192)
2010 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2012 unsigned int truesize = SKB_DATA_ALIGN(size);
2014 unsigned int headlen;
2015 struct sk_buff *skb;
2017 /* prefetch first cache line of first page */
2018 prefetch(xdp->data);
2019 #if L1_CACHE_BYTES < 128
2020 prefetch(xdp->data + L1_CACHE_BYTES);
2022 /* Note, we get here by enabling legacy-rx via:
2024 * ethtool --set-priv-flags <dev> legacy-rx on
2026 * In this mode, we currently get 0 extra XDP headroom as
2027 * opposed to having legacy-rx off, where we process XDP
2028 * packets going to stack via i40e_build_skb(). The latter
2029 * provides us currently with 192 bytes of headroom.
2031 * For i40e_construct_skb() mode it means that the
2032 * xdp->data_meta will always point to xdp->data, since
2033 * the helper cannot expand the head. Should this ever
2034 * change in future for legacy-rx mode on, then lets also
2035 * add xdp->data_meta handling here.
2038 /* allocate a skb to store the frags */
2039 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2041 GFP_ATOMIC | __GFP_NOWARN);
2045 /* Determine available headroom for copy */
2047 if (headlen > I40E_RX_HDR_SIZE)
2048 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
2050 /* align pull length to size of long to optimize memcpy performance */
2051 memcpy(__skb_put(skb, headlen), xdp->data,
2052 ALIGN(headlen, sizeof(long)));
2054 /* update all of the pointers */
2057 skb_add_rx_frag(skb, 0, rx_buffer->page,
2058 rx_buffer->page_offset + headlen,
2061 /* buffer is used by skb, update page_offset */
2062 #if (PAGE_SIZE < 8192)
2063 rx_buffer->page_offset ^= truesize;
2065 rx_buffer->page_offset += truesize;
2068 /* buffer is unused, reset bias back to rx_buffer */
2069 rx_buffer->pagecnt_bias++;
2076 * i40e_build_skb - Build skb around an existing buffer
2077 * @rx_ring: Rx descriptor ring to transact packets on
2078 * @rx_buffer: Rx buffer to pull data from
2079 * @xdp: xdp_buff pointing to the data
2081 * This function builds an skb around an existing Rx buffer, taking care
2082 * to set up the skb correctly and avoid any memcpy overhead.
2084 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2085 struct i40e_rx_buffer *rx_buffer,
2086 struct xdp_buff *xdp)
2088 unsigned int metasize = xdp->data - xdp->data_meta;
2089 #if (PAGE_SIZE < 8192)
2090 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2092 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2093 SKB_DATA_ALIGN(xdp->data_end -
2094 xdp->data_hard_start);
2096 struct sk_buff *skb;
2098 /* Prefetch first cache line of first page. If xdp->data_meta
2099 * is unused, this points exactly as xdp->data, otherwise we
2100 * likely have a consumer accessing first few bytes of meta
2101 * data, and then actual data.
2103 prefetch(xdp->data_meta);
2104 #if L1_CACHE_BYTES < 128
2105 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2107 /* build an skb around the page buffer */
2108 skb = build_skb(xdp->data_hard_start, truesize);
2112 /* update pointers within the skb to store the data */
2113 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2114 __skb_put(skb, xdp->data_end - xdp->data);
2116 skb_metadata_set(skb, metasize);
2118 /* buffer is used by skb, update page_offset */
2119 #if (PAGE_SIZE < 8192)
2120 rx_buffer->page_offset ^= truesize;
2122 rx_buffer->page_offset += truesize;
2129 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2130 * @rx_ring: rx descriptor ring to transact packets on
2131 * @rx_buffer: rx buffer to pull data from
2133 * This function will clean up the contents of the rx_buffer. It will
2134 * either recycle the buffer or unmap it and free the associated resources.
2136 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2137 struct i40e_rx_buffer *rx_buffer)
2139 if (i40e_can_reuse_rx_page(rx_buffer)) {
2140 /* hand second half of page back to the ring */
2141 i40e_reuse_rx_page(rx_ring, rx_buffer);
2143 /* we are not reusing the buffer so unmap it */
2144 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2145 i40e_rx_pg_size(rx_ring),
2146 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2147 __page_frag_cache_drain(rx_buffer->page,
2148 rx_buffer->pagecnt_bias);
2149 /* clear contents of buffer_info */
2150 rx_buffer->page = NULL;
2155 * i40e_is_non_eop - process handling of non-EOP buffers
2156 * @rx_ring: Rx ring being processed
2157 * @rx_desc: Rx descriptor for current buffer
2158 * @skb: Current socket buffer containing buffer in progress
2160 * This function updates next to clean. If the buffer is an EOP buffer
2161 * this function exits returning false, otherwise it will place the
2162 * sk_buff in the next buffer to be chained and return true indicating
2163 * that this is in fact a non-EOP buffer.
2165 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2166 union i40e_rx_desc *rx_desc,
2167 struct sk_buff *skb)
2169 u32 ntc = rx_ring->next_to_clean + 1;
2171 /* fetch, update, and store next to clean */
2172 ntc = (ntc < rx_ring->count) ? ntc : 0;
2173 rx_ring->next_to_clean = ntc;
2175 prefetch(I40E_RX_DESC(rx_ring, ntc));
2177 /* if we are the last buffer then there is nothing else to do */
2178 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2179 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2182 rx_ring->rx_stats.non_eop_descs++;
2187 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2188 struct i40e_ring *xdp_ring);
2190 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2192 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
2194 if (unlikely(!xdpf))
2195 return I40E_XDP_CONSUMED;
2197 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2201 * i40e_run_xdp - run an XDP program
2202 * @rx_ring: Rx ring being processed
2203 * @xdp: XDP buffer containing the frame
2205 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2206 struct xdp_buff *xdp)
2208 int err, result = I40E_XDP_PASS;
2209 struct i40e_ring *xdp_ring;
2210 struct bpf_prog *xdp_prog;
2214 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2219 prefetchw(xdp->data_hard_start); /* xdp_frame write */
2221 act = bpf_prog_run_xdp(xdp_prog, xdp);
2226 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2227 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2230 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2231 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2234 bpf_warn_invalid_xdp_action(act);
2237 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2238 /* fall through -- handle aborts by dropping packet */
2240 result = I40E_XDP_CONSUMED;
2245 return ERR_PTR(-result);
2249 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2251 * @rx_buffer: Rx buffer to adjust
2252 * @size: Size of adjustment
2254 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2255 struct i40e_rx_buffer *rx_buffer,
2258 #if (PAGE_SIZE < 8192)
2259 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2261 rx_buffer->page_offset ^= truesize;
2263 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2265 rx_buffer->page_offset += truesize;
2270 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2271 * @xdp_ring: XDP Tx ring
2273 * This function updates the XDP Tx ring tail register.
2275 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2277 /* Force memory writes to complete before letting h/w
2278 * know there are new descriptors to fetch.
2281 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2285 * i40e_update_rx_stats - Update Rx ring statistics
2286 * @rx_ring: rx descriptor ring
2287 * @total_rx_bytes: number of bytes received
2288 * @total_rx_packets: number of packets received
2290 * This function updates the Rx ring statistics.
2292 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2293 unsigned int total_rx_bytes,
2294 unsigned int total_rx_packets)
2296 u64_stats_update_begin(&rx_ring->syncp);
2297 rx_ring->stats.packets += total_rx_packets;
2298 rx_ring->stats.bytes += total_rx_bytes;
2299 u64_stats_update_end(&rx_ring->syncp);
2300 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2301 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2305 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2307 * @xdp_res: Result of the receive batch
2309 * This function bumps XDP Tx tail and/or flush redirect map, and
2310 * should be called when a batch of packets has been processed in the
2313 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2315 if (xdp_res & I40E_XDP_REDIR)
2318 if (xdp_res & I40E_XDP_TX) {
2319 struct i40e_ring *xdp_ring =
2320 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2322 i40e_xdp_ring_update_tail(xdp_ring);
2327 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2328 * @rx_ring: rx descriptor ring to transact packets on
2329 * @budget: Total limit on number of packets to process
2331 * This function provides a "bounce buffer" approach to Rx interrupt
2332 * processing. The advantage to this is that on systems that have
2333 * expensive overhead for IOMMU access this provides a means of avoiding
2334 * it by maintaining the mapping of the page to the system.
2336 * Returns amount of work completed
2338 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2340 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2341 struct sk_buff *skb = rx_ring->skb;
2342 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2343 unsigned int xdp_xmit = 0;
2344 bool failure = false;
2345 struct xdp_buff xdp;
2347 xdp.rxq = &rx_ring->xdp_rxq;
2349 while (likely(total_rx_packets < (unsigned int)budget)) {
2350 struct i40e_rx_buffer *rx_buffer;
2351 union i40e_rx_desc *rx_desc;
2357 /* return some buffers to hardware, one at a time is too slow */
2358 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2359 failure = failure ||
2360 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2364 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2366 /* status_error_len will always be zero for unused descriptors
2367 * because it's cleared in cleanup, and overlaps with hdr_addr
2368 * which is always zero because packet split isn't used, if the
2369 * hardware wrote DD then the length will be non-zero
2371 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2373 /* This memory barrier is needed to keep us from reading
2374 * any other fields out of the rx_desc until we have
2375 * verified the descriptor has been written back.
2379 rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc,
2381 if (unlikely(rx_buffer)) {
2382 i40e_reuse_rx_page(rx_ring, rx_buffer);
2387 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2388 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2392 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2393 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2395 /* retrieve a buffer from the ring */
2397 xdp.data = page_address(rx_buffer->page) +
2398 rx_buffer->page_offset;
2399 xdp.data_meta = xdp.data;
2400 xdp.data_hard_start = xdp.data -
2401 i40e_rx_offset(rx_ring);
2402 xdp.data_end = xdp.data + size;
2404 skb = i40e_run_xdp(rx_ring, &xdp);
2408 unsigned int xdp_res = -PTR_ERR(skb);
2410 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2411 xdp_xmit |= xdp_res;
2412 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2414 rx_buffer->pagecnt_bias++;
2416 total_rx_bytes += size;
2419 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2420 } else if (ring_uses_build_skb(rx_ring)) {
2421 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2423 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2426 /* exit if we failed to retrieve a buffer */
2428 rx_ring->rx_stats.alloc_buff_failed++;
2429 rx_buffer->pagecnt_bias++;
2433 i40e_put_rx_buffer(rx_ring, rx_buffer);
2436 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2439 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2444 /* probably a little skewed due to removing CRC */
2445 total_rx_bytes += skb->len;
2447 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2448 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2449 I40E_RXD_QW1_PTYPE_SHIFT;
2451 /* populate checksum, VLAN, and protocol */
2452 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2454 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2455 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2457 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2458 i40e_receive_skb(rx_ring, skb, vlan_tag);
2461 /* update budget accounting */
2465 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2468 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2470 /* guarantee a trip back through this routine if there was a failure */
2471 return failure ? budget : (int)total_rx_packets;
2474 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2478 /* We don't bother with setting the CLEARPBA bit as the data sheet
2479 * points out doing so is "meaningless since it was already
2480 * auto-cleared". The auto-clearing happens when the interrupt is
2483 * Hardware errata 28 for also indicates that writing to a
2484 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2485 * an event in the PBA anyway so we need to rely on the automask
2486 * to hold pending events for us until the interrupt is re-enabled
2488 * The itr value is reported in microseconds, and the register
2489 * value is recorded in 2 microsecond units. For this reason we
2490 * only need to shift by the interval shift - 1 instead of the
2493 itr &= I40E_ITR_MASK;
2495 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2496 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2497 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2502 /* a small macro to shorten up some long lines */
2503 #define INTREG I40E_PFINT_DYN_CTLN
2505 /* The act of updating the ITR will cause it to immediately trigger. In order
2506 * to prevent this from throwing off adaptive update statistics we defer the
2507 * update so that it can only happen so often. So after either Tx or Rx are
2508 * updated we make the adaptive scheme wait until either the ITR completely
2509 * expires via the next_update expiration or we have been through at least
2512 #define ITR_COUNTDOWN_START 3
2515 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2516 * @vsi: the VSI we care about
2517 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2520 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2521 struct i40e_q_vector *q_vector)
2523 struct i40e_hw *hw = &vsi->back->hw;
2526 /* If we don't have MSIX, then we only need to re-enable icr0 */
2527 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2528 i40e_irq_dynamic_enable_icr0(vsi->back);
2532 /* These will do nothing if dynamic updates are not enabled */
2533 i40e_update_itr(q_vector, &q_vector->tx);
2534 i40e_update_itr(q_vector, &q_vector->rx);
2536 /* This block of logic allows us to get away with only updating
2537 * one ITR value with each interrupt. The idea is to perform a
2538 * pseudo-lazy update with the following criteria.
2540 * 1. Rx is given higher priority than Tx if both are in same state
2541 * 2. If we must reduce an ITR that is given highest priority.
2542 * 3. We then give priority to increasing ITR based on amount.
2544 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2545 /* Rx ITR needs to be reduced, this is highest priority */
2546 intval = i40e_buildreg_itr(I40E_RX_ITR,
2547 q_vector->rx.target_itr);
2548 q_vector->rx.current_itr = q_vector->rx.target_itr;
2549 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2550 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2551 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2552 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2553 /* Tx ITR needs to be reduced, this is second priority
2554 * Tx ITR needs to be increased more than Rx, fourth priority
2556 intval = i40e_buildreg_itr(I40E_TX_ITR,
2557 q_vector->tx.target_itr);
2558 q_vector->tx.current_itr = q_vector->tx.target_itr;
2559 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2560 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2561 /* Rx ITR needs to be increased, third priority */
2562 intval = i40e_buildreg_itr(I40E_RX_ITR,
2563 q_vector->rx.target_itr);
2564 q_vector->rx.current_itr = q_vector->rx.target_itr;
2565 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2567 /* No ITR update, lowest priority */
2568 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2569 if (q_vector->itr_countdown)
2570 q_vector->itr_countdown--;
2573 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2574 wr32(hw, INTREG(q_vector->reg_idx), intval);
2578 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2579 * @napi: napi struct with our devices info in it
2580 * @budget: amount of work driver is allowed to do this pass, in packets
2582 * This function will clean all queues associated with a q_vector.
2584 * Returns the amount of work done
2586 int i40e_napi_poll(struct napi_struct *napi, int budget)
2588 struct i40e_q_vector *q_vector =
2589 container_of(napi, struct i40e_q_vector, napi);
2590 struct i40e_vsi *vsi = q_vector->vsi;
2591 struct i40e_ring *ring;
2592 bool clean_complete = true;
2593 bool arm_wb = false;
2594 int budget_per_ring;
2597 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2598 napi_complete(napi);
2602 /* Since the actual Tx work is minimal, we can give the Tx a larger
2603 * budget and be more aggressive about cleaning up the Tx descriptors.
2605 i40e_for_each_ring(ring, q_vector->tx) {
2606 bool wd = ring->xsk_umem ?
2607 i40e_clean_xdp_tx_irq(vsi, ring, budget) :
2608 i40e_clean_tx_irq(vsi, ring, budget);
2611 clean_complete = false;
2614 arm_wb |= ring->arm_wb;
2615 ring->arm_wb = false;
2618 /* Handle case where we are called by netpoll with a budget of 0 */
2622 /* We attempt to distribute budget to each Rx queue fairly, but don't
2623 * allow the budget to go below 1 because that would exit polling early.
2625 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2627 i40e_for_each_ring(ring, q_vector->rx) {
2628 int cleaned = ring->xsk_umem ?
2629 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2630 i40e_clean_rx_irq(ring, budget_per_ring);
2632 work_done += cleaned;
2633 /* if we clean as many as budgeted, we must not be done */
2634 if (cleaned >= budget_per_ring)
2635 clean_complete = false;
2638 /* If work not completed, return budget and polling will return */
2639 if (!clean_complete) {
2640 int cpu_id = smp_processor_id();
2642 /* It is possible that the interrupt affinity has changed but,
2643 * if the cpu is pegged at 100%, polling will never exit while
2644 * traffic continues and the interrupt will be stuck on this
2645 * cpu. We check to make sure affinity is correct before we
2646 * continue to poll, otherwise we must stop polling so the
2647 * interrupt can move to the correct cpu.
2649 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2650 /* Tell napi that we are done polling */
2651 napi_complete_done(napi, work_done);
2653 /* Force an interrupt */
2654 i40e_force_wb(vsi, q_vector);
2656 /* Return budget-1 so that polling stops */
2661 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2662 i40e_enable_wb_on_itr(vsi, q_vector);
2667 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2668 q_vector->arm_wb_state = false;
2670 /* Work is done so exit the polling mode and re-enable the interrupt */
2671 napi_complete_done(napi, work_done);
2673 i40e_update_enable_itr(vsi, q_vector);
2675 return min(work_done, budget - 1);
2679 * i40e_atr - Add a Flow Director ATR filter
2680 * @tx_ring: ring to add programming descriptor to
2682 * @tx_flags: send tx flags
2684 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2687 struct i40e_filter_program_desc *fdir_desc;
2688 struct i40e_pf *pf = tx_ring->vsi->back;
2690 unsigned char *network;
2692 struct ipv6hdr *ipv6;
2696 u32 flex_ptype, dtype_cmd;
2700 /* make sure ATR is enabled */
2701 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2704 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2707 /* if sampling is disabled do nothing */
2708 if (!tx_ring->atr_sample_rate)
2711 /* Currently only IPv4/IPv6 with TCP is supported */
2712 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2715 /* snag network header to get L4 type and address */
2716 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2717 skb_inner_network_header(skb) : skb_network_header(skb);
2719 /* Note: tx_flags gets modified to reflect inner protocols in
2720 * tx_enable_csum function if encap is enabled.
2722 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2723 /* access ihl as u8 to avoid unaligned access on ia64 */
2724 hlen = (hdr.network[0] & 0x0F) << 2;
2725 l4_proto = hdr.ipv4->protocol;
2727 /* find the start of the innermost ipv6 header */
2728 unsigned int inner_hlen = hdr.network - skb->data;
2729 unsigned int h_offset = inner_hlen;
2731 /* this function updates h_offset to the end of the header */
2733 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2734 /* hlen will contain our best estimate of the tcp header */
2735 hlen = h_offset - inner_hlen;
2738 if (l4_proto != IPPROTO_TCP)
2741 th = (struct tcphdr *)(hdr.network + hlen);
2743 /* Due to lack of space, no more new filters can be programmed */
2744 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2746 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2747 /* HW ATR eviction will take care of removing filters on FIN
2750 if (th->fin || th->rst)
2754 tx_ring->atr_count++;
2756 /* sample on all syn/fin/rst packets or once every atr sample rate */
2760 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2763 tx_ring->atr_count = 0;
2765 /* grab the next descriptor */
2766 i = tx_ring->next_to_use;
2767 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2770 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2772 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2773 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2774 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2775 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2776 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2777 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2778 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2780 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2782 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2784 dtype_cmd |= (th->fin || th->rst) ?
2785 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2786 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2787 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2788 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2790 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2791 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2793 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2794 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2796 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2797 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2799 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2800 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2801 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2804 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2805 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2806 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2808 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2809 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2811 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2812 fdir_desc->rsvd = cpu_to_le32(0);
2813 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2814 fdir_desc->fd_id = cpu_to_le32(0);
2818 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2820 * @tx_ring: ring to send buffer on
2821 * @flags: the tx flags to be set
2823 * Checks the skb and set up correspondingly several generic transmit flags
2824 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2826 * Returns error code indicate the frame should be dropped upon error and the
2827 * otherwise returns 0 to indicate the flags has been set properly.
2829 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2830 struct i40e_ring *tx_ring,
2833 __be16 protocol = skb->protocol;
2836 if (protocol == htons(ETH_P_8021Q) &&
2837 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2838 /* When HW VLAN acceleration is turned off by the user the
2839 * stack sets the protocol to 8021q so that the driver
2840 * can take any steps required to support the SW only
2841 * VLAN handling. In our case the driver doesn't need
2842 * to take any further steps so just set the protocol
2843 * to the encapsulated ethertype.
2845 skb->protocol = vlan_get_protocol(skb);
2849 /* if we have a HW VLAN tag being added, default to the HW one */
2850 if (skb_vlan_tag_present(skb)) {
2851 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2852 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2853 /* else if it is a SW VLAN, check the next protocol and store the tag */
2854 } else if (protocol == htons(ETH_P_8021Q)) {
2855 struct vlan_hdr *vhdr, _vhdr;
2857 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2861 protocol = vhdr->h_vlan_encapsulated_proto;
2862 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2863 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2866 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2869 /* Insert 802.1p priority into VLAN header */
2870 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2871 (skb->priority != TC_PRIO_CONTROL)) {
2872 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2873 tx_flags |= (skb->priority & 0x7) <<
2874 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2875 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2876 struct vlan_ethhdr *vhdr;
2879 rc = skb_cow_head(skb, 0);
2882 vhdr = (struct vlan_ethhdr *)skb->data;
2883 vhdr->h_vlan_TCI = htons(tx_flags >>
2884 I40E_TX_FLAGS_VLAN_SHIFT);
2886 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2896 * i40e_tso - set up the tso context descriptor
2897 * @first: pointer to first Tx buffer for xmit
2898 * @hdr_len: ptr to the size of the packet header
2899 * @cd_type_cmd_tso_mss: Quad Word 1
2901 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2903 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2904 u64 *cd_type_cmd_tso_mss)
2906 struct sk_buff *skb = first->skb;
2907 u64 cd_cmd, cd_tso_len, cd_mss;
2918 u32 paylen, l4_offset;
2919 u16 gso_segs, gso_size;
2922 if (skb->ip_summed != CHECKSUM_PARTIAL)
2925 if (!skb_is_gso(skb))
2928 err = skb_cow_head(skb, 0);
2932 ip.hdr = skb_network_header(skb);
2933 l4.hdr = skb_transport_header(skb);
2935 /* initialize outer IP header fields */
2936 if (ip.v4->version == 4) {
2940 ip.v6->payload_len = 0;
2943 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2947 SKB_GSO_UDP_TUNNEL |
2948 SKB_GSO_UDP_TUNNEL_CSUM)) {
2949 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2950 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2953 /* determine offset of outer transport header */
2954 l4_offset = l4.hdr - skb->data;
2956 /* remove payload length from outer checksum */
2957 paylen = skb->len - l4_offset;
2958 csum_replace_by_diff(&l4.udp->check,
2959 (__force __wsum)htonl(paylen));
2962 /* reset pointers to inner headers */
2963 ip.hdr = skb_inner_network_header(skb);
2964 l4.hdr = skb_inner_transport_header(skb);
2966 /* initialize inner IP header fields */
2967 if (ip.v4->version == 4) {
2971 ip.v6->payload_len = 0;
2975 /* determine offset of inner transport header */
2976 l4_offset = l4.hdr - skb->data;
2978 /* remove payload length from inner checksum */
2979 paylen = skb->len - l4_offset;
2980 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2982 /* compute length of segmentation header */
2983 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2985 /* pull values out of skb_shinfo */
2986 gso_size = skb_shinfo(skb)->gso_size;
2987 gso_segs = skb_shinfo(skb)->gso_segs;
2989 /* update GSO size and bytecount with header size */
2990 first->gso_segs = gso_segs;
2991 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2993 /* find the field values */
2994 cd_cmd = I40E_TX_CTX_DESC_TSO;
2995 cd_tso_len = skb->len - *hdr_len;
2997 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2998 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2999 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
3004 * i40e_tsyn - set up the tsyn context descriptor
3005 * @tx_ring: ptr to the ring to send
3006 * @skb: ptr to the skb we're sending
3007 * @tx_flags: the collected send information
3008 * @cd_type_cmd_tso_mss: Quad Word 1
3010 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3012 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3013 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3017 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3020 /* Tx timestamps cannot be sampled when doing TSO */
3021 if (tx_flags & I40E_TX_FLAGS_TSO)
3024 /* only timestamp the outbound packet if the user has requested it and
3025 * we are not already transmitting a packet to be timestamped
3027 pf = i40e_netdev_to_pf(tx_ring->netdev);
3028 if (!(pf->flags & I40E_FLAG_PTP))
3032 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3033 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3034 pf->ptp_tx_start = jiffies;
3035 pf->ptp_tx_skb = skb_get(skb);
3037 pf->tx_hwtstamp_skipped++;
3041 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3042 I40E_TXD_CTX_QW1_CMD_SHIFT;
3048 * i40e_tx_enable_csum - Enable Tx checksum offloads
3050 * @tx_flags: pointer to Tx flags currently set
3051 * @td_cmd: Tx descriptor command bits to set
3052 * @td_offset: Tx descriptor header offsets to set
3053 * @tx_ring: Tx descriptor ring
3054 * @cd_tunneling: ptr to context desc bits
3056 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3057 u32 *td_cmd, u32 *td_offset,
3058 struct i40e_ring *tx_ring,
3071 unsigned char *exthdr;
3072 u32 offset, cmd = 0;
3076 if (skb->ip_summed != CHECKSUM_PARTIAL)
3079 ip.hdr = skb_network_header(skb);
3080 l4.hdr = skb_transport_header(skb);
3082 /* compute outer L2 header size */
3083 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3085 if (skb->encapsulation) {
3087 /* define outer network header type */
3088 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3089 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3090 I40E_TX_CTX_EXT_IP_IPV4 :
3091 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3093 l4_proto = ip.v4->protocol;
3094 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3095 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3097 exthdr = ip.hdr + sizeof(*ip.v6);
3098 l4_proto = ip.v6->nexthdr;
3099 if (l4.hdr != exthdr)
3100 ipv6_skip_exthdr(skb, exthdr - skb->data,
3101 &l4_proto, &frag_off);
3104 /* define outer transport */
3107 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3108 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3111 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3112 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3116 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3117 l4.hdr = skb_inner_network_header(skb);
3120 if (*tx_flags & I40E_TX_FLAGS_TSO)
3123 skb_checksum_help(skb);
3127 /* compute outer L3 header size */
3128 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3129 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3131 /* switch IP header pointer from outer to inner header */
3132 ip.hdr = skb_inner_network_header(skb);
3134 /* compute tunnel header size */
3135 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3136 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3138 /* indicate if we need to offload outer UDP header */
3139 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3140 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3141 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3142 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3144 /* record tunnel offload values */
3145 *cd_tunneling |= tunnel;
3147 /* switch L4 header pointer from outer to inner */
3148 l4.hdr = skb_inner_transport_header(skb);
3151 /* reset type as we transition from outer to inner headers */
3152 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3153 if (ip.v4->version == 4)
3154 *tx_flags |= I40E_TX_FLAGS_IPV4;
3155 if (ip.v6->version == 6)
3156 *tx_flags |= I40E_TX_FLAGS_IPV6;
3159 /* Enable IP checksum offloads */
3160 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3161 l4_proto = ip.v4->protocol;
3162 /* the stack computes the IP header already, the only time we
3163 * need the hardware to recompute it is in the case of TSO.
3165 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3166 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3167 I40E_TX_DESC_CMD_IIPT_IPV4;
3168 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3169 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3171 exthdr = ip.hdr + sizeof(*ip.v6);
3172 l4_proto = ip.v6->nexthdr;
3173 if (l4.hdr != exthdr)
3174 ipv6_skip_exthdr(skb, exthdr - skb->data,
3175 &l4_proto, &frag_off);
3178 /* compute inner L3 header size */
3179 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3181 /* Enable L4 checksum offloads */
3184 /* enable checksum offloads */
3185 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3186 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3189 /* enable SCTP checksum offload */
3190 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3191 offset |= (sizeof(struct sctphdr) >> 2) <<
3192 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3195 /* enable UDP checksum offload */
3196 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3197 offset |= (sizeof(struct udphdr) >> 2) <<
3198 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3201 if (*tx_flags & I40E_TX_FLAGS_TSO)
3203 skb_checksum_help(skb);
3208 *td_offset |= offset;
3214 * i40e_create_tx_ctx Build the Tx context descriptor
3215 * @tx_ring: ring to create the descriptor on
3216 * @cd_type_cmd_tso_mss: Quad Word 1
3217 * @cd_tunneling: Quad Word 0 - bits 0-31
3218 * @cd_l2tag2: Quad Word 0 - bits 32-63
3220 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3221 const u64 cd_type_cmd_tso_mss,
3222 const u32 cd_tunneling, const u32 cd_l2tag2)
3224 struct i40e_tx_context_desc *context_desc;
3225 int i = tx_ring->next_to_use;
3227 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3228 !cd_tunneling && !cd_l2tag2)
3231 /* grab the next descriptor */
3232 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3235 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3237 /* cpu_to_le32 and assign to struct fields */
3238 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3239 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3240 context_desc->rsvd = cpu_to_le16(0);
3241 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3245 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3246 * @tx_ring: the ring to be checked
3247 * @size: the size buffer we want to assure is available
3249 * Returns -EBUSY if a stop is needed, else 0
3251 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3253 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3254 /* Memory barrier before checking head and tail */
3257 /* Check again in a case another CPU has just made room available. */
3258 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3261 /* A reprieve! - use start_queue because it doesn't call schedule */
3262 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3263 ++tx_ring->tx_stats.restart_queue;
3268 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3271 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3272 * and so we need to figure out the cases where we need to linearize the skb.
3274 * For TSO we need to count the TSO header and segment payload separately.
3275 * As such we need to check cases where we have 7 fragments or more as we
3276 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3277 * the segment payload in the first descriptor, and another 7 for the
3280 bool __i40e_chk_linearize(struct sk_buff *skb)
3282 const struct skb_frag_struct *frag, *stale;
3285 /* no need to check if number of frags is less than 7 */
3286 nr_frags = skb_shinfo(skb)->nr_frags;
3287 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3290 /* We need to walk through the list and validate that each group
3291 * of 6 fragments totals at least gso_size.
3293 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3294 frag = &skb_shinfo(skb)->frags[0];
3296 /* Initialize size to the negative value of gso_size minus 1. We
3297 * use this as the worst case scenerio in which the frag ahead
3298 * of us only provides one byte which is why we are limited to 6
3299 * descriptors for a single transmit as the header and previous
3300 * fragment are already consuming 2 descriptors.
3302 sum = 1 - skb_shinfo(skb)->gso_size;
3304 /* Add size of frags 0 through 4 to create our initial sum */
3305 sum += skb_frag_size(frag++);
3306 sum += skb_frag_size(frag++);
3307 sum += skb_frag_size(frag++);
3308 sum += skb_frag_size(frag++);
3309 sum += skb_frag_size(frag++);
3311 /* Walk through fragments adding latest fragment, testing it, and
3312 * then removing stale fragments from the sum.
3314 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3315 int stale_size = skb_frag_size(stale);
3317 sum += skb_frag_size(frag++);
3319 /* The stale fragment may present us with a smaller
3320 * descriptor than the actual fragment size. To account
3321 * for that we need to remove all the data on the front and
3322 * figure out what the remainder would be in the last
3323 * descriptor associated with the fragment.
3325 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3326 int align_pad = -(stale->page_offset) &
3327 (I40E_MAX_READ_REQ_SIZE - 1);
3330 stale_size -= align_pad;
3333 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3334 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3335 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3338 /* if sum is negative we failed to make sufficient progress */
3352 * i40e_tx_map - Build the Tx descriptor
3353 * @tx_ring: ring to send buffer on
3355 * @first: first buffer info buffer to use
3356 * @tx_flags: collected send information
3357 * @hdr_len: size of the packet header
3358 * @td_cmd: the command field in the descriptor
3359 * @td_offset: offset for checksum or crc
3361 * Returns 0 on success, -1 on failure to DMA
3363 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3364 struct i40e_tx_buffer *first, u32 tx_flags,
3365 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3367 unsigned int data_len = skb->data_len;
3368 unsigned int size = skb_headlen(skb);
3369 struct skb_frag_struct *frag;
3370 struct i40e_tx_buffer *tx_bi;
3371 struct i40e_tx_desc *tx_desc;
3372 u16 i = tx_ring->next_to_use;
3377 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3378 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3379 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3380 I40E_TX_FLAGS_VLAN_SHIFT;
3383 first->tx_flags = tx_flags;
3385 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3387 tx_desc = I40E_TX_DESC(tx_ring, i);
3390 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3391 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3393 if (dma_mapping_error(tx_ring->dev, dma))
3396 /* record length, and DMA address */
3397 dma_unmap_len_set(tx_bi, len, size);
3398 dma_unmap_addr_set(tx_bi, dma, dma);
3400 /* align size to end of page */
3401 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3402 tx_desc->buffer_addr = cpu_to_le64(dma);
3404 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3405 tx_desc->cmd_type_offset_bsz =
3406 build_ctob(td_cmd, td_offset,
3413 if (i == tx_ring->count) {
3414 tx_desc = I40E_TX_DESC(tx_ring, 0);
3421 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3422 tx_desc->buffer_addr = cpu_to_le64(dma);
3425 if (likely(!data_len))
3428 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3435 if (i == tx_ring->count) {
3436 tx_desc = I40E_TX_DESC(tx_ring, 0);
3440 size = skb_frag_size(frag);
3443 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3446 tx_bi = &tx_ring->tx_bi[i];
3449 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3452 if (i == tx_ring->count)
3455 tx_ring->next_to_use = i;
3457 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3459 /* write last descriptor with EOP bit */
3460 td_cmd |= I40E_TX_DESC_CMD_EOP;
3462 /* We OR these values together to check both against 4 (WB_STRIDE)
3463 * below. This is safe since we don't re-use desc_count afterwards.
3465 desc_count |= ++tx_ring->packet_stride;
3467 if (desc_count >= WB_STRIDE) {
3468 /* write last descriptor with RS bit set */
3469 td_cmd |= I40E_TX_DESC_CMD_RS;
3470 tx_ring->packet_stride = 0;
3473 tx_desc->cmd_type_offset_bsz =
3474 build_ctob(td_cmd, td_offset, size, td_tag);
3476 /* Force memory writes to complete before letting h/w know there
3477 * are new descriptors to fetch.
3479 * We also use this memory barrier to make certain all of the
3480 * status bits have been updated before next_to_watch is written.
3484 /* set next_to_watch value indicating a packet is present */
3485 first->next_to_watch = tx_desc;
3487 /* notify HW of packet */
3488 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3489 writel(i, tx_ring->tail);
3491 /* we need this if more than one processor can write to our tail
3492 * at a time, it synchronizes IO on IA64/Altix systems
3500 dev_info(tx_ring->dev, "TX DMA map failed\n");
3502 /* clear dma mappings for failed tx_bi map */
3504 tx_bi = &tx_ring->tx_bi[i];
3505 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3513 tx_ring->next_to_use = i;
3519 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3520 * @xdp: data to transmit
3521 * @xdp_ring: XDP Tx ring
3523 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3524 struct i40e_ring *xdp_ring)
3526 u16 i = xdp_ring->next_to_use;
3527 struct i40e_tx_buffer *tx_bi;
3528 struct i40e_tx_desc *tx_desc;
3529 u32 size = xdpf->len;
3532 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3533 xdp_ring->tx_stats.tx_busy++;
3534 return I40E_XDP_CONSUMED;
3537 dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
3538 if (dma_mapping_error(xdp_ring->dev, dma))
3539 return I40E_XDP_CONSUMED;
3541 tx_bi = &xdp_ring->tx_bi[i];
3542 tx_bi->bytecount = size;
3543 tx_bi->gso_segs = 1;
3546 /* record length, and DMA address */
3547 dma_unmap_len_set(tx_bi, len, size);
3548 dma_unmap_addr_set(tx_bi, dma, dma);
3550 tx_desc = I40E_TX_DESC(xdp_ring, i);
3551 tx_desc->buffer_addr = cpu_to_le64(dma);
3552 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3556 /* Make certain all of the status bits have been updated
3557 * before next_to_watch is written.
3562 if (i == xdp_ring->count)
3565 tx_bi->next_to_watch = tx_desc;
3566 xdp_ring->next_to_use = i;
3572 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3574 * @tx_ring: ring to send buffer on
3576 * Returns NETDEV_TX_OK if sent, else an error code
3578 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3579 struct i40e_ring *tx_ring)
3581 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3582 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3583 struct i40e_tx_buffer *first;
3592 /* prefetch the data, we'll need it later */
3593 prefetch(skb->data);
3595 i40e_trace(xmit_frame_ring, skb, tx_ring);
3597 count = i40e_xmit_descriptor_count(skb);
3598 if (i40e_chk_linearize(skb, count)) {
3599 if (__skb_linearize(skb)) {
3600 dev_kfree_skb_any(skb);
3601 return NETDEV_TX_OK;
3603 count = i40e_txd_use_count(skb->len);
3604 tx_ring->tx_stats.tx_linearize++;
3607 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3608 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3609 * + 4 desc gap to avoid the cache line where head is,
3610 * + 1 desc for context descriptor,
3611 * otherwise try next time
3613 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3614 tx_ring->tx_stats.tx_busy++;
3615 return NETDEV_TX_BUSY;
3618 /* record the location of the first descriptor for this packet */
3619 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3621 first->bytecount = skb->len;
3622 first->gso_segs = 1;
3624 /* prepare the xmit flags */
3625 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3628 /* obtain protocol of skb */
3629 protocol = vlan_get_protocol(skb);
3631 /* setup IPv4/IPv6 offloads */
3632 if (protocol == htons(ETH_P_IP))
3633 tx_flags |= I40E_TX_FLAGS_IPV4;
3634 else if (protocol == htons(ETH_P_IPV6))
3635 tx_flags |= I40E_TX_FLAGS_IPV6;
3637 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3642 tx_flags |= I40E_TX_FLAGS_TSO;
3644 /* Always offload the checksum, since it's in the data descriptor */
3645 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3646 tx_ring, &cd_tunneling);
3650 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3653 tx_flags |= I40E_TX_FLAGS_TSYN;
3655 skb_tx_timestamp(skb);
3657 /* always enable CRC insertion offload */
3658 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3660 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3661 cd_tunneling, cd_l2tag2);
3663 /* Add Flow Director ATR if it's enabled.
3665 * NOTE: this must always be directly before the data descriptor.
3667 i40e_atr(tx_ring, skb, tx_flags);
3669 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3671 goto cleanup_tx_tstamp;
3673 return NETDEV_TX_OK;
3676 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3677 dev_kfree_skb_any(first->skb);
3680 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3681 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3683 dev_kfree_skb_any(pf->ptp_tx_skb);
3684 pf->ptp_tx_skb = NULL;
3685 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3688 return NETDEV_TX_OK;
3692 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3694 * @netdev: network interface device structure
3696 * Returns NETDEV_TX_OK if sent, else an error code
3698 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3700 struct i40e_netdev_priv *np = netdev_priv(netdev);
3701 struct i40e_vsi *vsi = np->vsi;
3702 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3704 /* hardware can't handle really short frames, hardware padding works
3707 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3708 return NETDEV_TX_OK;
3710 return i40e_xmit_frame_ring(skb, tx_ring);
3714 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3718 * Returns number of frames successfully sent. Frames that fail are
3719 * free'ed via XDP return API.
3721 * For error cases, a negative errno code is returned and no-frames
3722 * are transmitted (caller must handle freeing frames).
3724 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3727 struct i40e_netdev_priv *np = netdev_priv(dev);
3728 unsigned int queue_index = smp_processor_id();
3729 struct i40e_vsi *vsi = np->vsi;
3730 struct i40e_ring *xdp_ring;
3734 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3737 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3740 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3743 xdp_ring = vsi->xdp_rings[queue_index];
3745 for (i = 0; i < n; i++) {
3746 struct xdp_frame *xdpf = frames[i];
3749 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3750 if (err != I40E_XDP_TX) {
3751 xdp_return_frame_rx_napi(xdpf);
3756 if (unlikely(flags & XDP_XMIT_FLUSH))
3757 i40e_xdp_ring_update_tail(xdp_ring);