1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <linux/prefetch.h>
6 #include <linux/sctp.h>
9 #include "i40e_txrx_common.h"
10 #include "i40e_trace.h"
13 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
15 * i40e_fdir - Generate a Flow Director descriptor based on fdata
16 * @tx_ring: Tx ring to send buffer on
17 * @fdata: Flow director filter data
18 * @add: Indicate if we are adding a rule or deleting one
21 static void i40e_fdir(struct i40e_ring *tx_ring,
22 struct i40e_fdir_filter *fdata, bool add)
24 struct i40e_filter_program_desc *fdir_desc;
25 struct i40e_pf *pf = tx_ring->vsi->back;
26 u32 flex_ptype, dtype_cmd;
29 /* grab the next descriptor */
30 i = tx_ring->next_to_use;
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
36 flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK, fdata->q_index);
38 flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_FLEXOFF_MASK,
41 flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
43 /* Use LAN VSI Id if not programmed by user */
44 flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK,
45 fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id);
47 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
50 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
51 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
52 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
53 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
55 dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_DEST_MASK, fdata->dest_ctl);
57 dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_FD_STATUS_MASK,
60 if (fdata->cnt_index) {
61 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
62 dtype_cmd |= FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
66 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
67 fdir_desc->rsvd = cpu_to_le32(0);
68 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
69 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
72 #define I40E_FD_CLEAN_DELAY 10
74 * i40e_program_fdir_filter - Program a Flow Director filter
75 * @fdir_data: Packet data that will be filter parameters
76 * @raw_packet: the pre-allocated packet buffer for FDir
78 * @add: True for add/update, False for remove
80 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
81 u8 *raw_packet, struct i40e_pf *pf,
84 struct i40e_tx_buffer *tx_buf, *first;
85 struct i40e_tx_desc *tx_desc;
86 struct i40e_ring *tx_ring;
93 /* find existing FDIR VSI */
94 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
98 tx_ring = vsi->tx_rings[0];
101 /* we need two descriptors to add/del a filter and we can wait */
102 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
105 msleep_interruptible(1);
108 dma = dma_map_single(dev, raw_packet,
109 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
110 if (dma_mapping_error(dev, dma))
113 /* grab the next descriptor */
114 i = tx_ring->next_to_use;
115 first = &tx_ring->tx_bi[i];
116 i40e_fdir(tx_ring, fdir_data, add);
118 /* Now program a dummy descriptor */
119 i = tx_ring->next_to_use;
120 tx_desc = I40E_TX_DESC(tx_ring, i);
121 tx_buf = &tx_ring->tx_bi[i];
123 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
125 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
127 /* record length, and DMA address */
128 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
129 dma_unmap_addr_set(tx_buf, dma, dma);
131 tx_desc->buffer_addr = cpu_to_le64(dma);
132 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
134 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
135 tx_buf->raw_buf = (void *)raw_packet;
137 tx_desc->cmd_type_offset_bsz =
138 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
140 /* Force memory writes to complete before letting h/w
141 * know there are new descriptors to fetch.
145 /* Mark the data descriptor to be watched */
146 first->next_to_watch = tx_desc;
148 writel(tx_ring->next_to_use, tx_ring->tail);
156 * i40e_create_dummy_packet - Constructs dummy packet for HW
157 * @dummy_packet: preallocated space for dummy packet
158 * @ipv4: is layer 3 packet of version 4 or 6
159 * @l4proto: next level protocol used in data portion of l3
162 * Returns address of layer 4 protocol dummy packet.
164 static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
165 struct i40e_fdir_filter *data)
167 bool is_vlan = !!data->vlan_tag;
168 struct vlan_hdr vlan = {};
169 struct ipv6hdr ipv6 = {};
170 struct ethhdr eth = {};
171 struct iphdr ip = {};
175 eth.h_proto = cpu_to_be16(ETH_P_IP);
176 ip.protocol = l4proto;
180 ip.daddr = data->dst_ip;
181 ip.saddr = data->src_ip;
183 eth.h_proto = cpu_to_be16(ETH_P_IPV6);
184 ipv6.nexthdr = l4proto;
187 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6,
189 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6,
194 vlan.h_vlan_TCI = data->vlan_tag;
195 vlan.h_vlan_encapsulated_proto = eth.h_proto;
196 eth.h_proto = data->vlan_etype;
200 memcpy(tmp, ð, sizeof(eth));
204 memcpy(tmp, &vlan, sizeof(vlan));
209 memcpy(tmp, &ip, sizeof(ip));
212 memcpy(tmp, &ipv6, sizeof(ipv6));
220 * i40e_create_dummy_udp_packet - helper function to create UDP packet
221 * @raw_packet: preallocated space for dummy packet
222 * @ipv4: is layer 3 packet of version 4 or 6
223 * @l4proto: next level protocol used in data portion of l3
226 * Helper function to populate udp fields.
228 static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
229 struct i40e_fdir_filter *data)
234 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data);
235 udp = (struct udphdr *)(tmp);
236 udp->dest = data->dst_port;
237 udp->source = data->src_port;
241 * i40e_create_dummy_tcp_packet - helper function to create TCP packet
242 * @raw_packet: preallocated space for dummy packet
243 * @ipv4: is layer 3 packet of version 4 or 6
244 * @l4proto: next level protocol used in data portion of l3
247 * Helper function to populate tcp fields.
249 static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
250 struct i40e_fdir_filter *data)
254 /* Dummy tcp packet */
255 static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
256 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0};
258 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data);
260 tcp = (struct tcphdr *)tmp;
261 memcpy(tcp, tcp_packet, sizeof(tcp_packet));
262 tcp->dest = data->dst_port;
263 tcp->source = data->src_port;
267 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
268 * @raw_packet: preallocated space for dummy packet
269 * @ipv4: is layer 3 packet of version 4 or 6
270 * @l4proto: next level protocol used in data portion of l3
273 * Helper function to populate sctp fields.
275 static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4,
277 struct i40e_fdir_filter *data)
279 struct sctphdr *sctp;
282 tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data);
284 sctp = (struct sctphdr *)tmp;
285 sctp->dest = data->dst_port;
286 sctp->source = data->src_port;
290 * i40e_prepare_fdir_filter - Prepare and program fdir filter
291 * @pf: physical function to attach filter to
292 * @fd_data: filter data
293 * @add: add or delete filter
294 * @packet_addr: address of dummy packet, used in filtering
295 * @payload_offset: offset from dummy packet address to user defined data
296 * @pctype: Packet type for which filter is used
298 * Helper function to offset data of dummy packet, program it and
301 static int i40e_prepare_fdir_filter(struct i40e_pf *pf,
302 struct i40e_fdir_filter *fd_data,
303 bool add, char *packet_addr,
304 int payload_offset, u8 pctype)
308 if (fd_data->flex_filter) {
310 __be16 pattern = fd_data->flex_word;
311 u16 off = fd_data->flex_offset;
313 payload = packet_addr + payload_offset;
315 /* If user provided vlan, offset payload by vlan header length */
316 if (!!fd_data->vlan_tag)
317 payload += VLAN_HLEN;
319 *((__force __be16 *)(payload + off)) = pattern;
322 fd_data->pctype = pctype;
323 ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add);
325 dev_info(&pf->pdev->dev,
326 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
327 fd_data->pctype, fd_data->fd_id, ret);
328 /* Free the packet buffer since it wasn't added to the ring */
330 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
332 dev_info(&pf->pdev->dev,
333 "Filter OK for PCTYPE %d loc = %d\n",
334 fd_data->pctype, fd_data->fd_id);
336 dev_info(&pf->pdev->dev,
337 "Filter deleted for PCTYPE %d loc = %d\n",
338 fd_data->pctype, fd_data->fd_id);
345 * i40e_change_filter_num - Prepare and program fdir filter
346 * @ipv4: is layer 3 packet of version 4 or 6
347 * @add: add or delete filter
348 * @ipv4_filter_num: field to update
349 * @ipv6_filter_num: field to update
351 * Update filter number field for pf.
353 static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
354 u16 *ipv6_filter_num)
358 (*ipv4_filter_num)++;
360 (*ipv6_filter_num)++;
363 (*ipv4_filter_num)--;
365 (*ipv6_filter_num)--;
369 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
370 #define I40E_UDPIP6_DUMMY_PACKET_LEN 62
372 * i40e_add_del_fdir_udp - Add/Remove UDP filters
373 * @vsi: pointer to the targeted VSI
374 * @fd_data: the flow director data required for the FDir descriptor
375 * @add: true adds a filter, false removes it
376 * @ipv4: true is v4, false is v6
378 * Returns 0 if the filters were successfully added or removed
380 static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
381 struct i40e_fdir_filter *fd_data,
385 struct i40e_pf *pf = vsi->back;
389 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
393 i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data);
396 ret = i40e_prepare_fdir_filter
397 (pf, fd_data, add, raw_packet,
398 I40E_UDPIP_DUMMY_PACKET_LEN,
399 I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
401 ret = i40e_prepare_fdir_filter
402 (pf, fd_data, add, raw_packet,
403 I40E_UDPIP6_DUMMY_PACKET_LEN,
404 I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
411 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt,
412 &pf->fd_udp6_filter_cnt);
417 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
418 #define I40E_TCPIP6_DUMMY_PACKET_LEN 74
420 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
421 * @vsi: pointer to the targeted VSI
422 * @fd_data: the flow director data required for the FDir descriptor
423 * @add: true adds a filter, false removes it
424 * @ipv4: true is v4, false is v6
426 * Returns 0 if the filters were successfully added or removed
428 static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
429 struct i40e_fdir_filter *fd_data,
433 struct i40e_pf *pf = vsi->back;
437 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
441 i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data);
443 ret = i40e_prepare_fdir_filter
444 (pf, fd_data, add, raw_packet,
445 I40E_TCPIP_DUMMY_PACKET_LEN,
446 I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
448 ret = i40e_prepare_fdir_filter
449 (pf, fd_data, add, raw_packet,
450 I40E_TCPIP6_DUMMY_PACKET_LEN,
451 I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
458 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt,
459 &pf->fd_tcp6_filter_cnt);
462 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) &&
463 I40E_DEBUG_FD & pf->hw.debug_mask)
464 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
465 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
470 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
471 #define I40E_SCTPIP6_DUMMY_PACKET_LEN 66
473 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
474 * a specific flow spec
475 * @vsi: pointer to the targeted VSI
476 * @fd_data: the flow director data required for the FDir descriptor
477 * @add: true adds a filter, false removes it
478 * @ipv4: true is v4, false is v6
480 * Returns 0 if the filters were successfully added or removed
482 static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
483 struct i40e_fdir_filter *fd_data,
487 struct i40e_pf *pf = vsi->back;
491 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
495 i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data);
498 ret = i40e_prepare_fdir_filter
499 (pf, fd_data, add, raw_packet,
500 I40E_SCTPIP_DUMMY_PACKET_LEN,
501 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
503 ret = i40e_prepare_fdir_filter
504 (pf, fd_data, add, raw_packet,
505 I40E_SCTPIP6_DUMMY_PACKET_LEN,
506 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
513 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt,
514 &pf->fd_sctp6_filter_cnt);
519 #define I40E_IP_DUMMY_PACKET_LEN 34
520 #define I40E_IP6_DUMMY_PACKET_LEN 54
522 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
523 * a specific flow spec
524 * @vsi: pointer to the targeted VSI
525 * @fd_data: the flow director data required for the FDir descriptor
526 * @add: true adds a filter, false removes it
527 * @ipv4: true is v4, false is v6
529 * Returns 0 if the filters were successfully added or removed
531 static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
532 struct i40e_fdir_filter *fd_data,
536 struct i40e_pf *pf = vsi->back;
545 iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
546 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
548 iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
549 iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
552 for (i = iter_start; i <= iter_end; i++) {
553 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
557 /* IPv6 no header option differs from IPv4 */
558 (void)i40e_create_dummy_packet
559 (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE,
562 payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN :
563 I40E_IP6_DUMMY_PACKET_LEN;
564 ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet,
570 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt,
571 &pf->fd_ip6_filter_cnt);
580 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
581 * @vsi: pointer to the targeted VSI
582 * @input: filter to add or delete
583 * @add: true adds a filter, false removes it
586 int i40e_add_del_fdir(struct i40e_vsi *vsi,
587 struct i40e_fdir_filter *input, bool add)
589 enum ip_ver { ipv6 = 0, ipv4 = 1 };
590 struct i40e_pf *pf = vsi->back;
593 switch (input->flow_type & ~FLOW_EXT) {
595 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
598 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
601 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
604 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
607 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
610 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
613 switch (input->ipl4_proto) {
615 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
618 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
621 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
624 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4);
627 /* We cannot support masking based on protocol */
628 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
634 switch (input->ipl4_proto) {
636 ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
639 ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
642 ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
645 ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6);
648 /* We cannot support masking based on protocol */
649 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n",
655 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
660 /* The buffer allocated here will be normally be freed by
661 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
662 * completion. In the event of an error adding the buffer to the FDIR
663 * ring, it will immediately be freed. It may also be freed by
664 * i40e_clean_tx_ring() when closing the VSI.
670 * i40e_fd_handle_status - check the Programming Status for FD
671 * @rx_ring: the Rx ring for this descriptor
672 * @qword0_raw: qword0
673 * @qword1: qword1 after le_to_cpu
674 * @prog_id: the id originally used for programming
676 * This is used to verify if the FD programming or invalidation
677 * requested by SW to the HW is successful or not and take actions accordingly.
679 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
680 u64 qword1, u8 prog_id)
682 struct i40e_pf *pf = rx_ring->vsi->back;
683 struct pci_dev *pdev = pf->pdev;
684 struct i40e_16b_rx_wb_qw0 *qw0;
685 u32 fcnt_prog, fcnt_avail;
688 qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
689 error = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK, qword1);
691 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
692 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
693 if (qw0->hi_dword.fd_id != 0 ||
694 (I40E_DEBUG_FD & pf->hw.debug_mask))
695 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
698 /* Check if the programming error is for ATR.
699 * If so, auto disable ATR and set a state for
700 * flush in progress. Next time we come here if flush is in
701 * progress do nothing, once flush is complete the state will
704 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
708 /* store the current atr filter count */
709 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
711 if (qw0->hi_dword.fd_id == 0 &&
712 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
713 /* These set_bit() calls aren't atomic with the
714 * test_bit() here, but worse case we potentially
715 * disable ATR and queue a flush right after SB
716 * support is re-enabled. That shouldn't cause an
719 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
720 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
723 /* filter programming failed most likely due to table full */
724 fcnt_prog = i40e_get_global_fd_count(pf);
725 fcnt_avail = pf->fdir_pf_filter_count;
726 /* If ATR is running fcnt_prog can quickly change,
727 * if we are very close to full, it makes sense to disable
728 * FD ATR/SB and then re-enable it when there is room.
730 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
731 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) &&
732 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
734 if (I40E_DEBUG_FD & pf->hw.debug_mask)
735 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
737 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
738 if (I40E_DEBUG_FD & pf->hw.debug_mask)
739 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
740 qw0->hi_dword.fd_id);
745 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
746 * @ring: the ring that owns the buffer
747 * @tx_buffer: the buffer to free
749 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
750 struct i40e_tx_buffer *tx_buffer)
752 if (tx_buffer->skb) {
753 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
754 kfree(tx_buffer->raw_buf);
755 else if (ring_is_xdp(ring))
756 xdp_return_frame(tx_buffer->xdpf);
758 dev_kfree_skb_any(tx_buffer->skb);
759 if (dma_unmap_len(tx_buffer, len))
760 dma_unmap_single(ring->dev,
761 dma_unmap_addr(tx_buffer, dma),
762 dma_unmap_len(tx_buffer, len),
764 } else if (dma_unmap_len(tx_buffer, len)) {
765 dma_unmap_page(ring->dev,
766 dma_unmap_addr(tx_buffer, dma),
767 dma_unmap_len(tx_buffer, len),
771 tx_buffer->next_to_watch = NULL;
772 tx_buffer->skb = NULL;
773 dma_unmap_len_set(tx_buffer, len, 0);
774 /* tx_buffer must be completely set up in the transmit path */
778 * i40e_clean_tx_ring - Free any empty Tx buffers
779 * @tx_ring: ring to be cleaned
781 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
783 unsigned long bi_size;
786 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
787 i40e_xsk_clean_tx_ring(tx_ring);
789 /* ring already cleared, nothing to do */
793 /* Free all the Tx ring sk_buffs */
794 for (i = 0; i < tx_ring->count; i++)
795 i40e_unmap_and_free_tx_resource(tx_ring,
799 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
800 memset(tx_ring->tx_bi, 0, bi_size);
802 /* Zero out the descriptor ring */
803 memset(tx_ring->desc, 0, tx_ring->size);
805 tx_ring->next_to_use = 0;
806 tx_ring->next_to_clean = 0;
808 if (!tx_ring->netdev)
811 /* cleanup Tx queue statistics */
812 netdev_tx_reset_queue(txring_txq(tx_ring));
816 * i40e_free_tx_resources - Free Tx resources per queue
817 * @tx_ring: Tx descriptor ring for a specific queue
819 * Free all transmit software resources
821 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
823 i40e_clean_tx_ring(tx_ring);
824 kfree(tx_ring->tx_bi);
825 tx_ring->tx_bi = NULL;
828 dma_free_coherent(tx_ring->dev, tx_ring->size,
829 tx_ring->desc, tx_ring->dma);
830 tx_ring->desc = NULL;
835 * i40e_get_tx_pending - how many tx descriptors not processed
836 * @ring: the ring of descriptors
837 * @in_sw: use SW variables
839 * Since there is no access to the ring head register
840 * in XL710, we need to use our local copies
842 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
847 head = i40e_get_head(ring);
848 tail = readl(ring->tail);
850 head = ring->next_to_clean;
851 tail = ring->next_to_use;
855 return (head < tail) ?
856 tail - head : (tail + ring->count - head);
862 * i40e_detect_recover_hung - Function to detect and recover hung_queues
863 * @vsi: pointer to vsi struct with tx queues
865 * VSI has netdev and netdev has TX queues. This function is to check each of
866 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
868 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
870 struct i40e_ring *tx_ring = NULL;
871 struct net_device *netdev;
878 if (test_bit(__I40E_VSI_DOWN, vsi->state))
881 netdev = vsi->netdev;
885 if (!netif_carrier_ok(netdev))
888 for (i = 0; i < vsi->num_queue_pairs; i++) {
889 tx_ring = vsi->tx_rings[i];
890 if (tx_ring && tx_ring->desc) {
891 /* If packet counter has not changed the queue is
892 * likely stalled, so force an interrupt for this
895 * prev_pkt_ctr would be negative if there was no
898 packets = tx_ring->stats.packets & INT_MAX;
899 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
900 i40e_force_wb(vsi, tx_ring->q_vector);
904 /* Memory barrier between read of packet count and call
905 * to i40e_get_tx_pending()
908 tx_ring->tx_stats.prev_pkt_ctr =
909 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
915 * i40e_clean_tx_irq - Reclaim resources after transmit completes
916 * @vsi: the VSI we care about
917 * @tx_ring: Tx ring to clean
918 * @napi_budget: Used to determine if we are in netpoll
919 * @tx_cleaned: Out parameter set to the number of TXes cleaned
921 * Returns true if there's any budget left (e.g. the clean is finished)
923 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
924 struct i40e_ring *tx_ring, int napi_budget,
925 unsigned int *tx_cleaned)
927 int i = tx_ring->next_to_clean;
928 struct i40e_tx_buffer *tx_buf;
929 struct i40e_tx_desc *tx_head;
930 struct i40e_tx_desc *tx_desc;
931 unsigned int total_bytes = 0, total_packets = 0;
932 unsigned int budget = vsi->work_limit;
934 tx_buf = &tx_ring->tx_bi[i];
935 tx_desc = I40E_TX_DESC(tx_ring, i);
938 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
941 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
943 /* if next_to_watch is not set then there is no work pending */
947 /* prevent any other reads prior to eop_desc */
950 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
951 /* we have caught up to head, no work left to do */
952 if (tx_head == tx_desc)
955 /* clear next_to_watch to prevent false hangs */
956 tx_buf->next_to_watch = NULL;
958 /* update the statistics for this packet */
959 total_bytes += tx_buf->bytecount;
960 total_packets += tx_buf->gso_segs;
962 /* free the skb/XDP data */
963 if (ring_is_xdp(tx_ring))
964 xdp_return_frame(tx_buf->xdpf);
966 napi_consume_skb(tx_buf->skb, napi_budget);
968 /* unmap skb header data */
969 dma_unmap_single(tx_ring->dev,
970 dma_unmap_addr(tx_buf, dma),
971 dma_unmap_len(tx_buf, len),
974 /* clear tx_buffer data */
976 dma_unmap_len_set(tx_buf, len, 0);
978 /* unmap remaining buffers */
979 while (tx_desc != eop_desc) {
980 i40e_trace(clean_tx_irq_unmap,
981 tx_ring, tx_desc, tx_buf);
988 tx_buf = tx_ring->tx_bi;
989 tx_desc = I40E_TX_DESC(tx_ring, 0);
992 /* unmap any remaining paged data */
993 if (dma_unmap_len(tx_buf, len)) {
994 dma_unmap_page(tx_ring->dev,
995 dma_unmap_addr(tx_buf, dma),
996 dma_unmap_len(tx_buf, len),
998 dma_unmap_len_set(tx_buf, len, 0);
1002 /* move us one more past the eop_desc for start of next pkt */
1007 i -= tx_ring->count;
1008 tx_buf = tx_ring->tx_bi;
1009 tx_desc = I40E_TX_DESC(tx_ring, 0);
1014 /* update budget accounting */
1016 } while (likely(budget));
1018 i += tx_ring->count;
1019 tx_ring->next_to_clean = i;
1020 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
1021 i40e_arm_wb(tx_ring, vsi, budget);
1023 if (ring_is_xdp(tx_ring))
1026 /* notify netdev of completed buffers */
1027 netdev_tx_completed_queue(txring_txq(tx_ring),
1028 total_packets, total_bytes);
1030 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
1031 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
1032 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
1033 /* Make sure that anybody stopping the queue after this
1034 * sees the new next_to_clean.
1037 if (__netif_subqueue_stopped(tx_ring->netdev,
1038 tx_ring->queue_index) &&
1039 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
1040 netif_wake_subqueue(tx_ring->netdev,
1041 tx_ring->queue_index);
1042 ++tx_ring->tx_stats.restart_queue;
1046 *tx_cleaned = total_packets;
1051 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
1052 * @vsi: the VSI we care about
1053 * @q_vector: the vector on which to enable writeback
1056 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
1057 struct i40e_q_vector *q_vector)
1059 u16 flags = q_vector->tx.ring[0].flags;
1062 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
1065 if (q_vector->arm_wb_state)
1068 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
1069 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
1070 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
1072 wr32(&vsi->back->hw,
1073 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
1076 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
1077 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
1079 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1081 q_vector->arm_wb_state = true;
1085 * i40e_force_wb - Issue SW Interrupt so HW does a wb
1086 * @vsi: the VSI we care about
1087 * @q_vector: the vector on which to force writeback
1090 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
1092 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
1093 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1094 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
1095 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
1096 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
1097 /* allow 00 to be written to the index */
1099 wr32(&vsi->back->hw,
1100 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
1102 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1103 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
1104 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
1105 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
1106 /* allow 00 to be written to the index */
1108 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
1112 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
1113 struct i40e_ring_container *rc)
1115 return &q_vector->rx == rc;
1118 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
1120 unsigned int divisor;
1122 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
1123 case I40E_LINK_SPEED_40GB:
1124 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1126 case I40E_LINK_SPEED_25GB:
1127 case I40E_LINK_SPEED_20GB:
1128 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1131 case I40E_LINK_SPEED_10GB:
1132 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1134 case I40E_LINK_SPEED_1GB:
1135 case I40E_LINK_SPEED_100MB:
1136 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1144 * i40e_update_itr - update the dynamic ITR value based on statistics
1145 * @q_vector: structure containing interrupt and ring information
1146 * @rc: structure containing ring performance data
1148 * Stores a new ITR value based on packets and byte
1149 * counts during the last interrupt. The advantage of per interrupt
1150 * computation is faster updates and more accurate ITR for the current
1151 * traffic pattern. Constants in this function were computed
1152 * based on theoretical maximum wire speed and thresholds were set based
1153 * on testing data as well as attempting to minimize response time
1154 * while increasing bulk throughput.
1156 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1157 struct i40e_ring_container *rc)
1159 unsigned int avg_wire_size, packets, bytes, itr;
1160 unsigned long next_update = jiffies;
1162 /* If we don't have any rings just leave ourselves set for maximum
1163 * possible latency so we take ourselves out of the equation.
1165 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1168 /* For Rx we want to push the delay up and default to low latency.
1169 * for Tx we want to pull the delay down and default to high latency.
1171 itr = i40e_container_is_rx(q_vector, rc) ?
1172 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1173 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1175 /* If we didn't update within up to 1 - 2 jiffies we can assume
1176 * that either packets are coming in so slow there hasn't been
1177 * any work, or that there is so much work that NAPI is dealing
1178 * with interrupt moderation and we don't need to do anything.
1180 if (time_after(next_update, rc->next_update))
1183 /* If itr_countdown is set it means we programmed an ITR within
1184 * the last 4 interrupt cycles. This has a side effect of us
1185 * potentially firing an early interrupt. In order to work around
1186 * this we need to throw out any data received for a few
1187 * interrupts following the update.
1189 if (q_vector->itr_countdown) {
1190 itr = rc->target_itr;
1194 packets = rc->total_packets;
1195 bytes = rc->total_bytes;
1197 if (i40e_container_is_rx(q_vector, rc)) {
1198 /* If Rx there are 1 to 4 packets and bytes are less than
1199 * 9000 assume insufficient data to use bulk rate limiting
1200 * approach unless Tx is already in bulk rate limiting. We
1201 * are likely latency driven.
1203 if (packets && packets < 4 && bytes < 9000 &&
1204 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1205 itr = I40E_ITR_ADAPTIVE_LATENCY;
1206 goto adjust_by_size;
1208 } else if (packets < 4) {
1209 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1210 * bulk mode and we are receiving 4 or fewer packets just
1211 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1212 * that the Rx can relax.
1214 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1215 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1216 I40E_ITR_ADAPTIVE_MAX_USECS)
1218 } else if (packets > 32) {
1219 /* If we have processed over 32 packets in a single interrupt
1220 * for Tx assume we need to switch over to "bulk" mode.
1222 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1225 /* We have no packets to actually measure against. This means
1226 * either one of the other queues on this vector is active or
1227 * we are a Tx queue doing TSO with too high of an interrupt rate.
1229 * Between 4 and 56 we can assume that our current interrupt delay
1230 * is only slightly too low. As such we should increase it by a small
1234 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1235 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1236 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1237 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1242 if (packets <= 256) {
1243 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1244 itr &= I40E_ITR_MASK;
1246 /* Between 56 and 112 is our "goldilocks" zone where we are
1247 * working out "just right". Just report that our current
1248 * ITR is good for us.
1253 /* If packet count is 128 or greater we are likely looking
1254 * at a slight overrun of the delay we want. Try halving
1255 * our delay to see if that will cut the number of packets
1256 * in half per interrupt.
1259 itr &= I40E_ITR_MASK;
1260 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1261 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1266 /* The paths below assume we are dealing with a bulk ITR since
1267 * number of packets is greater than 256. We are just going to have
1268 * to compute a value and try to bring the count under control,
1269 * though for smaller packet sizes there isn't much we can do as
1270 * NAPI polling will likely be kicking in sooner rather than later.
1272 itr = I40E_ITR_ADAPTIVE_BULK;
1275 /* If packet counts are 256 or greater we can assume we have a gross
1276 * overestimation of what the rate should be. Instead of trying to fine
1277 * tune it just use the formula below to try and dial in an exact value
1278 * give the current packet size of the frame.
1280 avg_wire_size = bytes / packets;
1282 /* The following is a crude approximation of:
1283 * wmem_default / (size + overhead) = desired_pkts_per_int
1284 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1285 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1287 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1288 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1291 * (170 * (size + 24)) / (size + 640) = ITR
1293 * We first do some math on the packet size and then finally bitshift
1294 * by 8 after rounding up. We also have to account for PCIe link speed
1295 * difference as ITR scales based on this.
1297 if (avg_wire_size <= 60) {
1298 /* Start at 250k ints/sec */
1299 avg_wire_size = 4096;
1300 } else if (avg_wire_size <= 380) {
1301 /* 250K ints/sec to 60K ints/sec */
1302 avg_wire_size *= 40;
1303 avg_wire_size += 1696;
1304 } else if (avg_wire_size <= 1084) {
1305 /* 60K ints/sec to 36K ints/sec */
1306 avg_wire_size *= 15;
1307 avg_wire_size += 11452;
1308 } else if (avg_wire_size <= 1980) {
1309 /* 36K ints/sec to 30K ints/sec */
1311 avg_wire_size += 22420;
1313 /* plateau at a limit of 30K ints/sec */
1314 avg_wire_size = 32256;
1317 /* If we are in low latency mode halve our delay which doubles the
1318 * rate to somewhere between 100K to 16K ints/sec
1320 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1323 /* Resultant value is 256 times larger than it needs to be. This
1324 * gives us room to adjust the value as needed to either increase
1325 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1327 * Use addition as we have already recorded the new latency flag
1328 * for the ITR value.
1330 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1331 I40E_ITR_ADAPTIVE_MIN_INC;
1333 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1334 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1335 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1339 /* write back value */
1340 rc->target_itr = itr;
1342 /* next update should occur within next jiffy */
1343 rc->next_update = next_update + 1;
1345 rc->total_bytes = 0;
1346 rc->total_packets = 0;
1349 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
1351 return &rx_ring->rx_bi[idx];
1355 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1356 * @rx_ring: rx descriptor ring to store buffers on
1357 * @old_buff: donor buffer to have page reused
1359 * Synchronizes page for reuse by the adapter
1361 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1362 struct i40e_rx_buffer *old_buff)
1364 struct i40e_rx_buffer *new_buff;
1365 u16 nta = rx_ring->next_to_alloc;
1367 new_buff = i40e_rx_bi(rx_ring, nta);
1369 /* update, and store next to alloc */
1371 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1373 /* transfer page from old buffer to new buffer */
1374 new_buff->dma = old_buff->dma;
1375 new_buff->page = old_buff->page;
1376 new_buff->page_offset = old_buff->page_offset;
1377 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1379 /* clear contents of buffer_info */
1380 old_buff->page = NULL;
1384 * i40e_clean_programming_status - clean the programming status descriptor
1385 * @rx_ring: the rx ring that has this descriptor
1386 * @qword0_raw: qword0
1387 * @qword1: qword1 representing status_error_len in CPU ordering
1389 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1390 * status being successful or not and take actions accordingly. FCoE should
1391 * handle its context/filter programming/invalidation status and take actions.
1393 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1395 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
1400 id = FIELD_GET(I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK, qword1);
1402 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1403 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1407 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1408 * @tx_ring: the tx ring to set up
1410 * Return 0 on success, negative on error
1412 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1414 struct device *dev = tx_ring->dev;
1420 /* warn if we are about to overwrite the pointer */
1421 WARN_ON(tx_ring->tx_bi);
1422 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1423 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1424 if (!tx_ring->tx_bi)
1427 u64_stats_init(&tx_ring->syncp);
1429 /* round up to nearest 4K */
1430 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1431 /* add u32 for head writeback, align after this takes care of
1432 * guaranteeing this is at least one cache line in size
1434 tx_ring->size += sizeof(u32);
1435 tx_ring->size = ALIGN(tx_ring->size, 4096);
1436 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1437 &tx_ring->dma, GFP_KERNEL);
1438 if (!tx_ring->desc) {
1439 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1444 tx_ring->next_to_use = 0;
1445 tx_ring->next_to_clean = 0;
1446 tx_ring->tx_stats.prev_pkt_ctr = -1;
1450 kfree(tx_ring->tx_bi);
1451 tx_ring->tx_bi = NULL;
1455 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
1457 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
1461 * i40e_clean_rx_ring - Free Rx buffers
1462 * @rx_ring: ring to be cleaned
1464 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1468 /* ring already cleared, nothing to do */
1469 if (!rx_ring->rx_bi)
1472 if (rx_ring->xsk_pool) {
1473 i40e_xsk_clean_rx_ring(rx_ring);
1477 /* Free all the Rx ring sk_buffs */
1478 for (i = 0; i < rx_ring->count; i++) {
1479 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1484 /* Invalidate cache lines that may have been written to by
1485 * device so that we avoid corrupting memory.
1487 dma_sync_single_range_for_cpu(rx_ring->dev,
1490 rx_ring->rx_buf_len,
1493 /* free resources associated with mapping */
1494 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1495 i40e_rx_pg_size(rx_ring),
1499 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1502 rx_bi->page_offset = 0;
1506 if (rx_ring->xsk_pool)
1507 i40e_clear_rx_bi_zc(rx_ring);
1509 i40e_clear_rx_bi(rx_ring);
1511 /* Zero out the descriptor ring */
1512 memset(rx_ring->desc, 0, rx_ring->size);
1514 rx_ring->next_to_alloc = 0;
1515 rx_ring->next_to_clean = 0;
1516 rx_ring->next_to_process = 0;
1517 rx_ring->next_to_use = 0;
1521 * i40e_free_rx_resources - Free Rx resources
1522 * @rx_ring: ring to clean the resources from
1524 * Free all receive software resources
1526 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1528 i40e_clean_rx_ring(rx_ring);
1529 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1530 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1531 rx_ring->xdp_prog = NULL;
1532 kfree(rx_ring->rx_bi);
1533 rx_ring->rx_bi = NULL;
1535 if (rx_ring->desc) {
1536 dma_free_coherent(rx_ring->dev, rx_ring->size,
1537 rx_ring->desc, rx_ring->dma);
1538 rx_ring->desc = NULL;
1543 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1544 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1546 * Returns 0 on success, negative on failure
1548 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1550 struct device *dev = rx_ring->dev;
1553 u64_stats_init(&rx_ring->syncp);
1555 /* Round up to nearest 4K */
1556 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
1557 rx_ring->size = ALIGN(rx_ring->size, 4096);
1558 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1559 &rx_ring->dma, GFP_KERNEL);
1561 if (!rx_ring->desc) {
1562 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1567 rx_ring->next_to_alloc = 0;
1568 rx_ring->next_to_clean = 0;
1569 rx_ring->next_to_process = 0;
1570 rx_ring->next_to_use = 0;
1572 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1573 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1574 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1575 rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
1580 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1583 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
1584 if (!rx_ring->rx_bi)
1591 * i40e_release_rx_desc - Store the new tail and head values
1592 * @rx_ring: ring to bump
1593 * @val: new head index
1595 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1597 rx_ring->next_to_use = val;
1599 /* update next to alloc since we have filled the ring */
1600 rx_ring->next_to_alloc = val;
1602 /* Force memory writes to complete before letting h/w
1603 * know there are new descriptors to fetch. (Only
1604 * applicable for weak-ordered memory model archs,
1608 writel(val, rx_ring->tail);
1611 #if (PAGE_SIZE >= 8192)
1612 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1615 unsigned int truesize;
1617 truesize = rx_ring->rx_offset ?
1618 SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
1619 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1620 SKB_DATA_ALIGN(size);
1626 * i40e_alloc_mapped_page - recycle or make a new page
1627 * @rx_ring: ring to use
1628 * @bi: rx_buffer struct to modify
1630 * Returns true if the page was successfully allocated or
1633 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1634 struct i40e_rx_buffer *bi)
1636 struct page *page = bi->page;
1639 /* since we are recycling buffers we should seldom need to alloc */
1641 rx_ring->rx_stats.page_reuse_count++;
1645 /* alloc new page for storage */
1646 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1647 if (unlikely(!page)) {
1648 rx_ring->rx_stats.alloc_page_failed++;
1652 rx_ring->rx_stats.page_alloc_count++;
1654 /* map page for use */
1655 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1656 i40e_rx_pg_size(rx_ring),
1660 /* if mapping failed free memory back to system since
1661 * there isn't much point in holding memory we can't use
1663 if (dma_mapping_error(rx_ring->dev, dma)) {
1664 __free_pages(page, i40e_rx_pg_order(rx_ring));
1665 rx_ring->rx_stats.alloc_page_failed++;
1671 bi->page_offset = rx_ring->rx_offset;
1672 page_ref_add(page, USHRT_MAX - 1);
1673 bi->pagecnt_bias = USHRT_MAX;
1679 * i40e_alloc_rx_buffers - Replace used receive buffers
1680 * @rx_ring: ring to place buffers on
1681 * @cleaned_count: number of buffers to replace
1683 * Returns false if all allocations were successful, true if any fail
1685 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1687 u16 ntu = rx_ring->next_to_use;
1688 union i40e_rx_desc *rx_desc;
1689 struct i40e_rx_buffer *bi;
1691 /* do nothing if no valid netdev defined */
1692 if (!rx_ring->netdev || !cleaned_count)
1695 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1696 bi = i40e_rx_bi(rx_ring, ntu);
1699 if (!i40e_alloc_mapped_page(rx_ring, bi))
1702 /* sync the buffer for use by the device */
1703 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1705 rx_ring->rx_buf_len,
1708 /* Refresh the desc even if buffer_addrs didn't change
1709 * because each write-back erases this info.
1711 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1716 if (unlikely(ntu == rx_ring->count)) {
1717 rx_desc = I40E_RX_DESC(rx_ring, 0);
1718 bi = i40e_rx_bi(rx_ring, 0);
1722 /* clear the status bits for the next_to_use descriptor */
1723 rx_desc->wb.qword1.status_error_len = 0;
1726 } while (cleaned_count);
1728 if (rx_ring->next_to_use != ntu)
1729 i40e_release_rx_desc(rx_ring, ntu);
1734 if (rx_ring->next_to_use != ntu)
1735 i40e_release_rx_desc(rx_ring, ntu);
1737 /* make sure to come back via polling to try again after
1738 * allocation failure
1744 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1745 * @vsi: the VSI we care about
1746 * @skb: skb currently being received and modified
1747 * @rx_desc: the receive descriptor
1749 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1750 struct sk_buff *skb,
1751 union i40e_rx_desc *rx_desc)
1753 struct i40e_rx_ptype_decoded decoded;
1754 u32 rx_error, rx_status;
1759 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1760 ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
1761 rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
1762 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
1763 decoded = decode_rx_desc_ptype(ptype);
1765 skb->ip_summed = CHECKSUM_NONE;
1767 skb_checksum_none_assert(skb);
1769 /* Rx csum enabled and ip headers found? */
1770 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1773 /* did the hardware decode the packet and checksum? */
1774 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1777 /* both known and outer_ip must be set for the below code to work */
1778 if (!(decoded.known && decoded.outer_ip))
1781 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1782 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1783 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1784 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1787 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1788 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1791 /* likely incorrect csum if alternate IP extension headers found */
1793 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1794 /* don't increment checksum err here, non-fatal err */
1797 /* there was some L4 error, count error and punt packet to the stack */
1798 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1801 /* handle packets that were not able to be checksummed due
1802 * to arrival speed, in this case the stack can compute
1805 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1808 /* If there is an outer header present that might contain a checksum
1809 * we need to bump the checksum level by 1 to reflect the fact that
1810 * we are indicating we validated the inner checksum.
1812 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1813 skb->csum_level = 1;
1815 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1816 switch (decoded.inner_prot) {
1817 case I40E_RX_PTYPE_INNER_PROT_TCP:
1818 case I40E_RX_PTYPE_INNER_PROT_UDP:
1819 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1820 skb->ip_summed = CHECKSUM_UNNECESSARY;
1829 vsi->back->hw_csum_rx_error++;
1833 * i40e_ptype_to_htype - get a hash type
1834 * @ptype: the ptype value from the descriptor
1836 * Returns a hash type to be used by skb_set_hash
1838 static inline int i40e_ptype_to_htype(u8 ptype)
1840 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1843 return PKT_HASH_TYPE_NONE;
1845 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1846 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1847 return PKT_HASH_TYPE_L4;
1848 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1849 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1850 return PKT_HASH_TYPE_L3;
1852 return PKT_HASH_TYPE_L2;
1856 * i40e_rx_hash - set the hash value in the skb
1857 * @ring: descriptor ring
1858 * @rx_desc: specific descriptor
1859 * @skb: skb currently being received and modified
1860 * @rx_ptype: Rx packet type
1862 static inline void i40e_rx_hash(struct i40e_ring *ring,
1863 union i40e_rx_desc *rx_desc,
1864 struct sk_buff *skb,
1868 const __le64 rss_mask =
1869 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1870 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1872 if (!(ring->netdev->features & NETIF_F_RXHASH))
1875 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1876 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1877 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1882 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1883 * @rx_ring: rx descriptor ring packet is being transacted on
1884 * @rx_desc: pointer to the EOP Rx descriptor
1885 * @skb: pointer to current skb being populated
1887 * This function checks the ring, descriptor, and packet information in
1888 * order to populate the hash, checksum, VLAN, protocol, and
1889 * other fields within the skb.
1891 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1892 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1894 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1895 u32 rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
1896 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1897 u32 tsyn = FIELD_GET(I40E_RXD_QW1_STATUS_TSYNINDX_MASK, rx_status);
1898 u8 rx_ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
1900 if (unlikely(tsynvalid))
1901 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1903 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1905 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1907 skb_record_rx_queue(skb, rx_ring->queue_index);
1909 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1910 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1912 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1913 le16_to_cpu(vlan_tag));
1916 /* modifies the skb - consumes the enet header */
1917 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1921 * i40e_cleanup_headers - Correct empty headers
1922 * @rx_ring: rx descriptor ring packet is being transacted on
1923 * @skb: pointer to current skb being fixed
1924 * @rx_desc: pointer to the EOP Rx descriptor
1926 * In addition if skb is not at least 60 bytes we need to pad it so that
1927 * it is large enough to qualify as a valid Ethernet frame.
1929 * Returns true if an error was encountered and skb was freed.
1931 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1932 union i40e_rx_desc *rx_desc)
1935 /* ERR_MASK will only have valid bits if EOP set, and
1936 * what we are doing here is actually checking
1937 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1940 if (unlikely(i40e_test_staterr(rx_desc,
1941 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1942 dev_kfree_skb_any(skb);
1946 /* if eth_skb_pad returns an error the skb was freed */
1947 if (eth_skb_pad(skb))
1954 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
1955 * @rx_buffer: buffer containing the page
1956 * @rx_stats: rx stats structure for the rx ring
1958 * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
1959 * which will assign the current buffer to the buffer that next_to_alloc is
1960 * pointing to; otherwise, the DMA mapping needs to be destroyed and
1963 * rx_stats will be updated to indicate whether the page was waived
1964 * or busy if it could not be reused.
1966 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1967 struct i40e_rx_queue_stats *rx_stats)
1969 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1970 struct page *page = rx_buffer->page;
1972 /* Is any reuse possible? */
1973 if (!dev_page_is_reusable(page)) {
1974 rx_stats->page_waive_count++;
1978 #if (PAGE_SIZE < 8192)
1979 /* if we are only owner of page we can reuse it */
1980 if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) {
1981 rx_stats->page_busy_count++;
1985 #define I40E_LAST_OFFSET \
1986 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1987 if (rx_buffer->page_offset > I40E_LAST_OFFSET) {
1988 rx_stats->page_busy_count++;
1993 /* If we have drained the page fragment pool we need to update
1994 * the pagecnt_bias and page count so that we fully restock the
1995 * number of references the driver holds.
1997 if (unlikely(pagecnt_bias == 1)) {
1998 page_ref_add(page, USHRT_MAX - 1);
1999 rx_buffer->pagecnt_bias = USHRT_MAX;
2006 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2007 * @rx_buffer: Rx buffer to adjust
2008 * @truesize: Size of adjustment
2010 static void i40e_rx_buffer_flip(struct i40e_rx_buffer *rx_buffer,
2011 unsigned int truesize)
2013 #if (PAGE_SIZE < 8192)
2014 rx_buffer->page_offset ^= truesize;
2016 rx_buffer->page_offset += truesize;
2021 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2022 * @rx_ring: rx descriptor ring to transact packets on
2023 * @size: size of buffer to add to skb
2025 * This function will pull an Rx buffer from the ring and synchronize it
2026 * for use by the CPU.
2028 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
2029 const unsigned int size)
2031 struct i40e_rx_buffer *rx_buffer;
2033 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process);
2034 rx_buffer->page_count =
2035 #if (PAGE_SIZE < 8192)
2036 page_count(rx_buffer->page);
2040 prefetch_page_address(rx_buffer->page);
2042 /* we are reusing so sync this buffer for CPU use */
2043 dma_sync_single_range_for_cpu(rx_ring->dev,
2045 rx_buffer->page_offset,
2049 /* We have pulled a buffer for use, so decrement pagecnt_bias */
2050 rx_buffer->pagecnt_bias--;
2056 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2057 * @rx_ring: rx descriptor ring to transact packets on
2058 * @rx_buffer: rx buffer to pull data from
2060 * This function will clean up the contents of the rx_buffer. It will
2061 * either recycle the buffer or unmap it and free the associated resources.
2063 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2064 struct i40e_rx_buffer *rx_buffer)
2066 if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) {
2067 /* hand second half of page back to the ring */
2068 i40e_reuse_rx_page(rx_ring, rx_buffer);
2070 /* we are not reusing the buffer so unmap it */
2071 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2072 i40e_rx_pg_size(rx_ring),
2073 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2074 __page_frag_cache_drain(rx_buffer->page,
2075 rx_buffer->pagecnt_bias);
2076 /* clear contents of buffer_info */
2077 rx_buffer->page = NULL;
2082 * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error
2083 * @rx_ring: Rx descriptor ring to transact packets on
2084 * @xdp_res: Result of the XDP program
2085 * @xdp: xdp_buff pointing to the data
2087 static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
2088 struct xdp_buff *xdp)
2090 u32 next = rx_ring->next_to_clean;
2091 struct i40e_rx_buffer *rx_buffer;
2096 rx_buffer = i40e_rx_bi(rx_ring, next);
2097 if (++next == rx_ring->count)
2100 if (!rx_buffer->page)
2103 if (xdp_res == I40E_XDP_CONSUMED)
2104 rx_buffer->pagecnt_bias++;
2106 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2108 /* EOP buffer will be put in i40e_clean_rx_irq() */
2109 if (next == rx_ring->next_to_process)
2112 i40e_put_rx_buffer(rx_ring, rx_buffer);
2117 * i40e_construct_skb - Allocate skb and populate it
2118 * @rx_ring: rx descriptor ring to transact packets on
2119 * @xdp: xdp_buff pointing to the data
2120 * @nr_frags: number of buffers for the packet
2122 * This function allocates an skb. It then populates it with the page
2123 * data from the current receive descriptor, taking care to set up the
2126 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2127 struct xdp_buff *xdp,
2130 unsigned int size = xdp->data_end - xdp->data;
2131 struct i40e_rx_buffer *rx_buffer;
2132 unsigned int headlen;
2133 struct sk_buff *skb;
2135 /* prefetch first cache line of first page */
2136 net_prefetch(xdp->data);
2138 /* Note, we get here by enabling legacy-rx via:
2140 * ethtool --set-priv-flags <dev> legacy-rx on
2142 * In this mode, we currently get 0 extra XDP headroom as
2143 * opposed to having legacy-rx off, where we process XDP
2144 * packets going to stack via i40e_build_skb(). The latter
2145 * provides us currently with 192 bytes of headroom.
2147 * For i40e_construct_skb() mode it means that the
2148 * xdp->data_meta will always point to xdp->data, since
2149 * the helper cannot expand the head. Should this ever
2150 * change in future for legacy-rx mode on, then lets also
2151 * add xdp->data_meta handling here.
2154 /* allocate a skb to store the frags */
2155 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2157 GFP_ATOMIC | __GFP_NOWARN);
2161 /* Determine available headroom for copy */
2163 if (headlen > I40E_RX_HDR_SIZE)
2164 headlen = eth_get_headlen(skb->dev, xdp->data,
2167 /* align pull length to size of long to optimize memcpy performance */
2168 memcpy(__skb_put(skb, headlen), xdp->data,
2169 ALIGN(headlen, sizeof(long)));
2171 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2172 /* update all of the pointers */
2175 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2179 skb_add_rx_frag(skb, 0, rx_buffer->page,
2180 rx_buffer->page_offset + headlen,
2181 size, xdp->frame_sz);
2182 /* buffer is used by skb, update page_offset */
2183 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2185 /* buffer is unused, reset bias back to rx_buffer */
2186 rx_buffer->pagecnt_bias++;
2189 if (unlikely(xdp_buff_has_frags(xdp))) {
2190 struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
2192 sinfo = xdp_get_shared_info_from_buff(xdp);
2193 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
2194 sizeof(skb_frag_t) * nr_frags);
2196 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
2197 sinfo->xdp_frags_size,
2198 nr_frags * xdp->frame_sz,
2199 xdp_buff_is_frag_pfmemalloc(xdp));
2201 /* First buffer has already been processed, so bump ntc */
2202 if (++rx_ring->next_to_clean == rx_ring->count)
2203 rx_ring->next_to_clean = 0;
2205 i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
2212 * i40e_build_skb - Build skb around an existing buffer
2213 * @rx_ring: Rx descriptor ring to transact packets on
2214 * @xdp: xdp_buff pointing to the data
2215 * @nr_frags: number of buffers for the packet
2217 * This function builds an skb around an existing Rx buffer, taking care
2218 * to set up the skb correctly and avoid any memcpy overhead.
2220 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2221 struct xdp_buff *xdp,
2224 unsigned int metasize = xdp->data - xdp->data_meta;
2225 struct sk_buff *skb;
2227 /* Prefetch first cache line of first page. If xdp->data_meta
2228 * is unused, this points exactly as xdp->data, otherwise we
2229 * likely have a consumer accessing first few bytes of meta
2230 * data, and then actual data.
2232 net_prefetch(xdp->data_meta);
2234 /* build an skb around the page buffer */
2235 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
2239 /* update pointers within the skb to store the data */
2240 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2241 __skb_put(skb, xdp->data_end - xdp->data);
2243 skb_metadata_set(skb, metasize);
2245 if (unlikely(xdp_buff_has_frags(xdp))) {
2246 struct skb_shared_info *sinfo;
2248 sinfo = xdp_get_shared_info_from_buff(xdp);
2249 xdp_update_skb_shared_info(skb, nr_frags,
2250 sinfo->xdp_frags_size,
2251 nr_frags * xdp->frame_sz,
2252 xdp_buff_is_frag_pfmemalloc(xdp));
2254 i40e_process_rx_buffs(rx_ring, I40E_XDP_PASS, xdp);
2256 struct i40e_rx_buffer *rx_buffer;
2258 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2259 /* buffer is used by skb, update page_offset */
2260 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2267 * i40e_is_non_eop - process handling of non-EOP buffers
2268 * @rx_ring: Rx ring being processed
2269 * @rx_desc: Rx descriptor for current buffer
2271 * If the buffer is an EOP buffer, this function exits returning false,
2272 * otherwise return true indicating that this is in fact a non-EOP buffer.
2274 bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2275 union i40e_rx_desc *rx_desc)
2277 /* if we are the last buffer then there is nothing else to do */
2278 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2279 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2282 rx_ring->rx_stats.non_eop_descs++;
2287 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2288 struct i40e_ring *xdp_ring);
2290 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2292 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2294 if (unlikely(!xdpf))
2295 return I40E_XDP_CONSUMED;
2297 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2301 * i40e_run_xdp - run an XDP program
2302 * @rx_ring: Rx ring being processed
2303 * @xdp: XDP buffer containing the frame
2304 * @xdp_prog: XDP program to run
2306 static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
2308 int err, result = I40E_XDP_PASS;
2309 struct i40e_ring *xdp_ring;
2315 prefetchw(xdp->data_hard_start); /* xdp_frame write */
2317 act = bpf_prog_run_xdp(xdp_prog, xdp);
2322 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2323 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2324 if (result == I40E_XDP_CONSUMED)
2328 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2331 result = I40E_XDP_REDIR;
2334 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
2338 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2339 fallthrough; /* handle aborts by dropping packet */
2341 result = I40E_XDP_CONSUMED;
2349 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2350 * @xdp_ring: XDP Tx ring
2352 * This function updates the XDP Tx ring tail register.
2354 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2356 /* Force memory writes to complete before letting h/w
2357 * know there are new descriptors to fetch.
2360 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2364 * i40e_update_rx_stats - Update Rx ring statistics
2365 * @rx_ring: rx descriptor ring
2366 * @total_rx_bytes: number of bytes received
2367 * @total_rx_packets: number of packets received
2369 * This function updates the Rx ring statistics.
2371 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2372 unsigned int total_rx_bytes,
2373 unsigned int total_rx_packets)
2375 u64_stats_update_begin(&rx_ring->syncp);
2376 rx_ring->stats.packets += total_rx_packets;
2377 rx_ring->stats.bytes += total_rx_bytes;
2378 u64_stats_update_end(&rx_ring->syncp);
2379 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2380 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2384 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2386 * @xdp_res: Result of the receive batch
2388 * This function bumps XDP Tx tail and/or flush redirect map, and
2389 * should be called when a batch of packets has been processed in the
2392 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2394 if (xdp_res & I40E_XDP_REDIR)
2397 if (xdp_res & I40E_XDP_TX) {
2398 struct i40e_ring *xdp_ring =
2399 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2401 i40e_xdp_ring_update_tail(xdp_ring);
2406 * i40e_inc_ntp: Advance the next_to_process index
2409 static void i40e_inc_ntp(struct i40e_ring *rx_ring)
2411 u32 ntp = rx_ring->next_to_process + 1;
2413 ntp = (ntp < rx_ring->count) ? ntp : 0;
2414 rx_ring->next_to_process = ntp;
2415 prefetch(I40E_RX_DESC(rx_ring, ntp));
2419 * i40e_add_xdp_frag: Add a frag to xdp_buff
2420 * @xdp: xdp_buff pointing to the data
2421 * @nr_frags: return number of buffers for the packet
2422 * @rx_buffer: rx_buffer holding data of the current frag
2423 * @size: size of data of current frag
2425 static int i40e_add_xdp_frag(struct xdp_buff *xdp, u32 *nr_frags,
2426 struct i40e_rx_buffer *rx_buffer, u32 size)
2428 struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
2430 if (!xdp_buff_has_frags(xdp)) {
2431 sinfo->nr_frags = 0;
2432 sinfo->xdp_frags_size = 0;
2433 xdp_buff_set_frags_flag(xdp);
2434 } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
2435 /* Overflowing packet: All frags need to be dropped */
2439 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page,
2440 rx_buffer->page_offset, size);
2442 sinfo->xdp_frags_size += size;
2444 if (page_is_pfmemalloc(rx_buffer->page))
2445 xdp_buff_set_frag_pfmemalloc(xdp);
2446 *nr_frags = sinfo->nr_frags;
2452 * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc
2453 * @rx_ring: rx descriptor ring to transact packets on
2454 * @xdp: xdp_buff pointing to the data
2455 * @rx_buffer: rx_buffer of eop desc
2457 static void i40e_consume_xdp_buff(struct i40e_ring *rx_ring,
2458 struct xdp_buff *xdp,
2459 struct i40e_rx_buffer *rx_buffer)
2461 i40e_process_rx_buffs(rx_ring, I40E_XDP_CONSUMED, xdp);
2462 i40e_put_rx_buffer(rx_ring, rx_buffer);
2463 rx_ring->next_to_clean = rx_ring->next_to_process;
2468 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2469 * @rx_ring: rx descriptor ring to transact packets on
2470 * @budget: Total limit on number of packets to process
2471 * @rx_cleaned: Out parameter of the number of packets processed
2473 * This function provides a "bounce buffer" approach to Rx interrupt
2474 * processing. The advantage to this is that on systems that have
2475 * expensive overhead for IOMMU access this provides a means of avoiding
2476 * it by maintaining the mapping of the page to the system.
2478 * Returns amount of work completed
2480 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
2481 unsigned int *rx_cleaned)
2483 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2484 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2485 u16 clean_threshold = rx_ring->count / 2;
2486 unsigned int offset = rx_ring->rx_offset;
2487 struct xdp_buff *xdp = &rx_ring->xdp;
2488 unsigned int xdp_xmit = 0;
2489 struct bpf_prog *xdp_prog;
2490 bool failure = false;
2493 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2495 while (likely(total_rx_packets < (unsigned int)budget)) {
2496 u16 ntp = rx_ring->next_to_process;
2497 struct i40e_rx_buffer *rx_buffer;
2498 union i40e_rx_desc *rx_desc;
2499 struct sk_buff *skb;
2505 /* return some buffers to hardware, one at a time is too slow */
2506 if (cleaned_count >= clean_threshold) {
2507 failure = failure ||
2508 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2512 rx_desc = I40E_RX_DESC(rx_ring, ntp);
2514 /* status_error_len will always be zero for unused descriptors
2515 * because it's cleared in cleanup, and overlaps with hdr_addr
2516 * which is always zero because packet split isn't used, if the
2517 * hardware wrote DD then the length will be non-zero
2519 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2521 /* This memory barrier is needed to keep us from reading
2522 * any other fields out of the rx_desc until we have
2523 * verified the descriptor has been written back.
2527 if (i40e_rx_is_programming_status(qword)) {
2528 i40e_clean_programming_status(rx_ring,
2529 rx_desc->raw.qword[0],
2531 rx_buffer = i40e_rx_bi(rx_ring, ntp);
2532 i40e_inc_ntp(rx_ring);
2533 i40e_reuse_rx_page(rx_ring, rx_buffer);
2534 /* Update ntc and bump cleaned count if not in the
2535 * middle of mb packet.
2537 if (rx_ring->next_to_clean == ntp) {
2538 rx_ring->next_to_clean =
2539 rx_ring->next_to_process;
2545 size = FIELD_GET(I40E_RXD_QW1_LENGTH_PBUF_MASK, qword);
2549 i40e_trace(clean_rx_irq, rx_ring, rx_desc, xdp);
2550 /* retrieve a buffer from the ring */
2551 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2553 neop = i40e_is_non_eop(rx_ring, rx_desc);
2554 i40e_inc_ntp(rx_ring);
2557 unsigned char *hard_start;
2559 hard_start = page_address(rx_buffer->page) +
2560 rx_buffer->page_offset - offset;
2561 xdp_prepare_buff(xdp, hard_start, offset, size, true);
2562 #if (PAGE_SIZE > 4096)
2563 /* At larger PAGE_SIZE, frame_sz depend on len size */
2564 xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size);
2566 } else if (i40e_add_xdp_frag(xdp, &nfrags, rx_buffer, size) &&
2568 /* Overflowing packet: Drop all frags on EOP */
2569 i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
2576 xdp_res = i40e_run_xdp(rx_ring, xdp, xdp_prog);
2579 xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
2581 if (unlikely(xdp_buff_has_frags(xdp))) {
2582 i40e_process_rx_buffs(rx_ring, xdp_res, xdp);
2583 size = xdp_get_buff_len(xdp);
2584 } else if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2585 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
2587 rx_buffer->pagecnt_bias++;
2589 total_rx_bytes += size;
2591 if (ring_uses_build_skb(rx_ring))
2592 skb = i40e_build_skb(rx_ring, xdp, nfrags);
2594 skb = i40e_construct_skb(rx_ring, xdp, nfrags);
2596 /* drop if we failed to retrieve a buffer */
2598 rx_ring->rx_stats.alloc_buff_failed++;
2599 i40e_consume_xdp_buff(rx_ring, xdp, rx_buffer);
2603 if (i40e_cleanup_headers(rx_ring, skb, rx_desc))
2606 /* probably a little skewed due to removing CRC */
2607 total_rx_bytes += skb->len;
2609 /* populate checksum, VLAN, and protocol */
2610 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2612 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, xdp);
2613 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2616 /* update budget accounting */
2619 cleaned_count += nfrags + 1;
2620 i40e_put_rx_buffer(rx_ring, rx_buffer);
2621 rx_ring->next_to_clean = rx_ring->next_to_process;
2626 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2628 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2630 *rx_cleaned = total_rx_packets;
2632 /* guarantee a trip back through this routine if there was a failure */
2633 return failure ? budget : (int)total_rx_packets;
2636 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2640 /* We don't bother with setting the CLEARPBA bit as the data sheet
2641 * points out doing so is "meaningless since it was already
2642 * auto-cleared". The auto-clearing happens when the interrupt is
2645 * Hardware errata 28 for also indicates that writing to a
2646 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2647 * an event in the PBA anyway so we need to rely on the automask
2648 * to hold pending events for us until the interrupt is re-enabled
2650 * The itr value is reported in microseconds, and the register
2651 * value is recorded in 2 microsecond units. For this reason we
2652 * only need to shift by the interval shift - 1 instead of the
2655 itr &= I40E_ITR_MASK;
2657 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2658 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2659 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2664 /* a small macro to shorten up some long lines */
2665 #define INTREG I40E_PFINT_DYN_CTLN
2667 /* The act of updating the ITR will cause it to immediately trigger. In order
2668 * to prevent this from throwing off adaptive update statistics we defer the
2669 * update so that it can only happen so often. So after either Tx or Rx are
2670 * updated we make the adaptive scheme wait until either the ITR completely
2671 * expires via the next_update expiration or we have been through at least
2674 #define ITR_COUNTDOWN_START 3
2677 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2678 * @vsi: the VSI we care about
2679 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2682 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2683 struct i40e_q_vector *q_vector)
2685 struct i40e_hw *hw = &vsi->back->hw;
2688 /* If we don't have MSIX, then we only need to re-enable icr0 */
2689 if (!test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) {
2690 i40e_irq_dynamic_enable_icr0(vsi->back);
2694 /* These will do nothing if dynamic updates are not enabled */
2695 i40e_update_itr(q_vector, &q_vector->tx);
2696 i40e_update_itr(q_vector, &q_vector->rx);
2698 /* This block of logic allows us to get away with only updating
2699 * one ITR value with each interrupt. The idea is to perform a
2700 * pseudo-lazy update with the following criteria.
2702 * 1. Rx is given higher priority than Tx if both are in same state
2703 * 2. If we must reduce an ITR that is given highest priority.
2704 * 3. We then give priority to increasing ITR based on amount.
2706 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2707 /* Rx ITR needs to be reduced, this is highest priority */
2708 intval = i40e_buildreg_itr(I40E_RX_ITR,
2709 q_vector->rx.target_itr);
2710 q_vector->rx.current_itr = q_vector->rx.target_itr;
2711 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2712 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2713 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2714 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2715 /* Tx ITR needs to be reduced, this is second priority
2716 * Tx ITR needs to be increased more than Rx, fourth priority
2718 intval = i40e_buildreg_itr(I40E_TX_ITR,
2719 q_vector->tx.target_itr);
2720 q_vector->tx.current_itr = q_vector->tx.target_itr;
2721 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2722 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2723 /* Rx ITR needs to be increased, third priority */
2724 intval = i40e_buildreg_itr(I40E_RX_ITR,
2725 q_vector->rx.target_itr);
2726 q_vector->rx.current_itr = q_vector->rx.target_itr;
2727 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2729 /* No ITR update, lowest priority */
2730 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2731 if (q_vector->itr_countdown)
2732 q_vector->itr_countdown--;
2735 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2736 wr32(hw, INTREG(q_vector->reg_idx), intval);
2740 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2741 * @napi: napi struct with our devices info in it
2742 * @budget: amount of work driver is allowed to do this pass, in packets
2744 * This function will clean all queues associated with a q_vector.
2746 * Returns the amount of work done
2748 int i40e_napi_poll(struct napi_struct *napi, int budget)
2750 struct i40e_q_vector *q_vector =
2751 container_of(napi, struct i40e_q_vector, napi);
2752 struct i40e_vsi *vsi = q_vector->vsi;
2753 struct i40e_ring *ring;
2754 bool tx_clean_complete = true;
2755 bool rx_clean_complete = true;
2756 unsigned int tx_cleaned = 0;
2757 unsigned int rx_cleaned = 0;
2758 bool clean_complete = true;
2759 bool arm_wb = false;
2760 int budget_per_ring;
2763 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2764 napi_complete(napi);
2768 /* Since the actual Tx work is minimal, we can give the Tx a larger
2769 * budget and be more aggressive about cleaning up the Tx descriptors.
2771 i40e_for_each_ring(ring, q_vector->tx) {
2772 bool wd = ring->xsk_pool ?
2773 i40e_clean_xdp_tx_irq(vsi, ring) :
2774 i40e_clean_tx_irq(vsi, ring, budget, &tx_cleaned);
2777 clean_complete = tx_clean_complete = false;
2780 arm_wb |= ring->arm_wb;
2781 ring->arm_wb = false;
2784 /* Handle case where we are called by netpoll with a budget of 0 */
2788 /* normally we have 1 Rx ring per q_vector */
2789 if (unlikely(q_vector->num_ringpairs > 1))
2790 /* We attempt to distribute budget to each Rx queue fairly, but
2791 * don't allow the budget to go below 1 because that would exit
2794 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2796 /* Max of 1 Rx ring in this q_vector so give it the budget */
2797 budget_per_ring = budget;
2799 i40e_for_each_ring(ring, q_vector->rx) {
2800 int cleaned = ring->xsk_pool ?
2801 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2802 i40e_clean_rx_irq(ring, budget_per_ring, &rx_cleaned);
2804 work_done += cleaned;
2805 /* if we clean as many as budgeted, we must not be done */
2806 if (cleaned >= budget_per_ring)
2807 clean_complete = rx_clean_complete = false;
2810 if (!i40e_enabled_xdp_vsi(vsi))
2811 trace_i40e_napi_poll(napi, q_vector, budget, budget_per_ring, rx_cleaned,
2812 tx_cleaned, rx_clean_complete, tx_clean_complete);
2814 /* If work not completed, return budget and polling will return */
2815 if (!clean_complete) {
2816 int cpu_id = smp_processor_id();
2818 /* It is possible that the interrupt affinity has changed but,
2819 * if the cpu is pegged at 100%, polling will never exit while
2820 * traffic continues and the interrupt will be stuck on this
2821 * cpu. We check to make sure affinity is correct before we
2822 * continue to poll, otherwise we must stop polling so the
2823 * interrupt can move to the correct cpu.
2825 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2826 /* Tell napi that we are done polling */
2827 napi_complete_done(napi, work_done);
2829 /* Force an interrupt */
2830 i40e_force_wb(vsi, q_vector);
2832 /* Return budget-1 so that polling stops */
2837 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2838 i40e_enable_wb_on_itr(vsi, q_vector);
2843 if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
2844 q_vector->arm_wb_state = false;
2846 /* Exit the polling mode, but don't re-enable interrupts if stack might
2847 * poll us due to busy-polling
2849 if (likely(napi_complete_done(napi, work_done)))
2850 i40e_update_enable_itr(vsi, q_vector);
2852 return min(work_done, budget - 1);
2856 * i40e_atr - Add a Flow Director ATR filter
2857 * @tx_ring: ring to add programming descriptor to
2859 * @tx_flags: send tx flags
2861 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2864 struct i40e_filter_program_desc *fdir_desc;
2865 struct i40e_pf *pf = tx_ring->vsi->back;
2867 unsigned char *network;
2869 struct ipv6hdr *ipv6;
2873 u32 flex_ptype, dtype_cmd;
2877 /* make sure ATR is enabled */
2878 if (!test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
2881 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2884 /* if sampling is disabled do nothing */
2885 if (!tx_ring->atr_sample_rate)
2888 /* Currently only IPv4/IPv6 with TCP is supported */
2889 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2892 /* snag network header to get L4 type and address */
2893 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2894 skb_inner_network_header(skb) : skb_network_header(skb);
2896 /* Note: tx_flags gets modified to reflect inner protocols in
2897 * tx_enable_csum function if encap is enabled.
2899 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2900 /* access ihl as u8 to avoid unaligned access on ia64 */
2901 hlen = (hdr.network[0] & 0x0F) << 2;
2902 l4_proto = hdr.ipv4->protocol;
2904 /* find the start of the innermost ipv6 header */
2905 unsigned int inner_hlen = hdr.network - skb->data;
2906 unsigned int h_offset = inner_hlen;
2908 /* this function updates h_offset to the end of the header */
2910 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2911 /* hlen will contain our best estimate of the tcp header */
2912 hlen = h_offset - inner_hlen;
2915 if (l4_proto != IPPROTO_TCP)
2918 th = (struct tcphdr *)(hdr.network + hlen);
2920 /* Due to lack of space, no more new filters can be programmed */
2921 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2923 if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags)) {
2924 /* HW ATR eviction will take care of removing filters on FIN
2927 if (th->fin || th->rst)
2931 tx_ring->atr_count++;
2933 /* sample on all syn/fin/rst packets or once every atr sample rate */
2937 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2940 tx_ring->atr_count = 0;
2942 /* grab the next descriptor */
2943 i = tx_ring->next_to_use;
2944 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2947 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2949 flex_ptype = FIELD_PREP(I40E_TXD_FLTR_QW0_QINDEX_MASK,
2950 tx_ring->queue_index);
2951 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2952 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2953 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2954 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2955 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2957 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2959 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2961 dtype_cmd |= (th->fin || th->rst) ?
2962 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2963 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2964 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2965 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2967 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2968 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2970 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2971 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2973 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2974 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2976 FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
2977 I40E_FD_ATR_STAT_IDX(pf->hw.pf_id));
2980 FIELD_PREP(I40E_TXD_FLTR_QW1_CNTINDEX_MASK,
2981 I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id));
2983 if (test_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags))
2984 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2986 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2987 fdir_desc->rsvd = cpu_to_le32(0);
2988 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2989 fdir_desc->fd_id = cpu_to_le32(0);
2993 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2995 * @tx_ring: ring to send buffer on
2996 * @flags: the tx flags to be set
2998 * Checks the skb and set up correspondingly several generic transmit flags
2999 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
3001 * Returns error code indicate the frame should be dropped upon error and the
3002 * otherwise returns 0 to indicate the flags has been set properly.
3004 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
3005 struct i40e_ring *tx_ring,
3008 __be16 protocol = skb->protocol;
3011 if (protocol == htons(ETH_P_8021Q) &&
3012 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
3013 /* When HW VLAN acceleration is turned off by the user the
3014 * stack sets the protocol to 8021q so that the driver
3015 * can take any steps required to support the SW only
3016 * VLAN handling. In our case the driver doesn't need
3017 * to take any further steps so just set the protocol
3018 * to the encapsulated ethertype.
3020 skb->protocol = vlan_get_protocol(skb);
3024 /* if we have a HW VLAN tag being added, default to the HW one */
3025 if (skb_vlan_tag_present(skb)) {
3026 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
3027 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
3028 /* else if it is a SW VLAN, check the next protocol and store the tag */
3029 } else if (protocol == htons(ETH_P_8021Q)) {
3030 struct vlan_hdr *vhdr, _vhdr;
3032 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
3036 protocol = vhdr->h_vlan_encapsulated_proto;
3037 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
3038 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
3041 if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
3044 /* Insert 802.1p priority into VLAN header */
3045 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
3046 (skb->priority != TC_PRIO_CONTROL)) {
3047 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
3048 tx_flags |= (skb->priority & 0x7) <<
3049 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
3050 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
3051 struct vlan_ethhdr *vhdr;
3054 rc = skb_cow_head(skb, 0);
3057 vhdr = skb_vlan_eth_hdr(skb);
3058 vhdr->h_vlan_TCI = htons(tx_flags >>
3059 I40E_TX_FLAGS_VLAN_SHIFT);
3061 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
3071 * i40e_tso - set up the tso context descriptor
3072 * @first: pointer to first Tx buffer for xmit
3073 * @hdr_len: ptr to the size of the packet header
3074 * @cd_type_cmd_tso_mss: Quad Word 1
3076 * Returns 0 if no TSO can happen, 1 if tso is going, or error
3078 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
3079 u64 *cd_type_cmd_tso_mss)
3081 struct sk_buff *skb = first->skb;
3082 u64 cd_cmd, cd_tso_len, cd_mss;
3094 u32 paylen, l4_offset;
3098 if (skb->ip_summed != CHECKSUM_PARTIAL)
3101 if (!skb_is_gso(skb))
3104 err = skb_cow_head(skb, 0);
3108 protocol = vlan_get_protocol(skb);
3110 if (eth_p_mpls(protocol))
3111 ip.hdr = skb_inner_network_header(skb);
3113 ip.hdr = skb_network_header(skb);
3114 l4.hdr = skb_checksum_start(skb);
3116 /* initialize outer IP header fields */
3117 if (ip.v4->version == 4) {
3121 first->tx_flags |= I40E_TX_FLAGS_TSO;
3123 ip.v6->payload_len = 0;
3124 first->tx_flags |= I40E_TX_FLAGS_TSO;
3127 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
3131 SKB_GSO_UDP_TUNNEL |
3132 SKB_GSO_UDP_TUNNEL_CSUM)) {
3133 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3134 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
3137 /* determine offset of outer transport header */
3138 l4_offset = l4.hdr - skb->data;
3140 /* remove payload length from outer checksum */
3141 paylen = skb->len - l4_offset;
3142 csum_replace_by_diff(&l4.udp->check,
3143 (__force __wsum)htonl(paylen));
3146 /* reset pointers to inner headers */
3147 ip.hdr = skb_inner_network_header(skb);
3148 l4.hdr = skb_inner_transport_header(skb);
3150 /* initialize inner IP header fields */
3151 if (ip.v4->version == 4) {
3155 ip.v6->payload_len = 0;
3159 /* determine offset of inner transport header */
3160 l4_offset = l4.hdr - skb->data;
3162 /* remove payload length from inner checksum */
3163 paylen = skb->len - l4_offset;
3165 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
3166 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
3167 /* compute length of segmentation header */
3168 *hdr_len = sizeof(*l4.udp) + l4_offset;
3170 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3171 /* compute length of segmentation header */
3172 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3175 /* pull values out of skb_shinfo */
3176 gso_size = skb_shinfo(skb)->gso_size;
3178 /* update GSO size and bytecount with header size */
3179 first->gso_segs = skb_shinfo(skb)->gso_segs;
3180 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3182 /* find the field values */
3183 cd_cmd = I40E_TX_CTX_DESC_TSO;
3184 cd_tso_len = skb->len - *hdr_len;
3186 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
3187 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
3188 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
3193 * i40e_tsyn - set up the tsyn context descriptor
3194 * @tx_ring: ptr to the ring to send
3195 * @skb: ptr to the skb we're sending
3196 * @tx_flags: the collected send information
3197 * @cd_type_cmd_tso_mss: Quad Word 1
3199 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3201 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3202 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3206 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3209 /* Tx timestamps cannot be sampled when doing TSO */
3210 if (tx_flags & I40E_TX_FLAGS_TSO)
3213 /* only timestamp the outbound packet if the user has requested it and
3214 * we are not already transmitting a packet to be timestamped
3216 pf = i40e_netdev_to_pf(tx_ring->netdev);
3217 if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags))
3221 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3222 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3223 pf->ptp_tx_start = jiffies;
3224 pf->ptp_tx_skb = skb_get(skb);
3226 pf->tx_hwtstamp_skipped++;
3230 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3231 I40E_TXD_CTX_QW1_CMD_SHIFT;
3237 * i40e_tx_enable_csum - Enable Tx checksum offloads
3239 * @tx_flags: pointer to Tx flags currently set
3240 * @td_cmd: Tx descriptor command bits to set
3241 * @td_offset: Tx descriptor header offsets to set
3242 * @tx_ring: Tx descriptor ring
3243 * @cd_tunneling: ptr to context desc bits
3245 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3246 u32 *td_cmd, u32 *td_offset,
3247 struct i40e_ring *tx_ring,
3260 unsigned char *exthdr;
3261 u32 offset, cmd = 0;
3266 if (skb->ip_summed != CHECKSUM_PARTIAL)
3269 protocol = vlan_get_protocol(skb);
3271 if (eth_p_mpls(protocol)) {
3272 ip.hdr = skb_inner_network_header(skb);
3273 l4.hdr = skb_checksum_start(skb);
3275 ip.hdr = skb_network_header(skb);
3276 l4.hdr = skb_transport_header(skb);
3279 /* set the tx_flags to indicate the IP protocol type. this is
3280 * required so that checksum header computation below is accurate.
3282 if (ip.v4->version == 4)
3283 *tx_flags |= I40E_TX_FLAGS_IPV4;
3285 *tx_flags |= I40E_TX_FLAGS_IPV6;
3287 /* compute outer L2 header size */
3288 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3290 if (skb->encapsulation) {
3292 /* define outer network header type */
3293 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3294 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3295 I40E_TX_CTX_EXT_IP_IPV4 :
3296 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3298 l4_proto = ip.v4->protocol;
3299 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3302 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3304 exthdr = ip.hdr + sizeof(*ip.v6);
3305 l4_proto = ip.v6->nexthdr;
3306 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
3307 &l4_proto, &frag_off);
3312 /* define outer transport */
3315 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3316 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3319 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3320 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3324 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3325 l4.hdr = skb_inner_network_header(skb);
3328 if (*tx_flags & I40E_TX_FLAGS_TSO)
3331 skb_checksum_help(skb);
3335 /* compute outer L3 header size */
3336 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3337 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3339 /* switch IP header pointer from outer to inner header */
3340 ip.hdr = skb_inner_network_header(skb);
3342 /* compute tunnel header size */
3343 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3344 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3346 /* indicate if we need to offload outer UDP header */
3347 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3348 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3349 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3350 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3352 /* record tunnel offload values */
3353 *cd_tunneling |= tunnel;
3355 /* switch L4 header pointer from outer to inner */
3356 l4.hdr = skb_inner_transport_header(skb);
3359 /* reset type as we transition from outer to inner headers */
3360 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3361 if (ip.v4->version == 4)
3362 *tx_flags |= I40E_TX_FLAGS_IPV4;
3363 if (ip.v6->version == 6)
3364 *tx_flags |= I40E_TX_FLAGS_IPV6;
3367 /* Enable IP checksum offloads */
3368 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3369 l4_proto = ip.v4->protocol;
3370 /* the stack computes the IP header already, the only time we
3371 * need the hardware to recompute it is in the case of TSO.
3373 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3374 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3375 I40E_TX_DESC_CMD_IIPT_IPV4;
3376 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3377 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3379 exthdr = ip.hdr + sizeof(*ip.v6);
3380 l4_proto = ip.v6->nexthdr;
3381 if (l4.hdr != exthdr)
3382 ipv6_skip_exthdr(skb, exthdr - skb->data,
3383 &l4_proto, &frag_off);
3386 /* compute inner L3 header size */
3387 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3389 /* Enable L4 checksum offloads */
3392 /* enable checksum offloads */
3393 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3394 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3397 /* enable SCTP checksum offload */
3398 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3399 offset |= (sizeof(struct sctphdr) >> 2) <<
3400 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3403 /* enable UDP checksum offload */
3404 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3405 offset |= (sizeof(struct udphdr) >> 2) <<
3406 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3409 if (*tx_flags & I40E_TX_FLAGS_TSO)
3411 skb_checksum_help(skb);
3416 *td_offset |= offset;
3422 * i40e_create_tx_ctx - Build the Tx context descriptor
3423 * @tx_ring: ring to create the descriptor on
3424 * @cd_type_cmd_tso_mss: Quad Word 1
3425 * @cd_tunneling: Quad Word 0 - bits 0-31
3426 * @cd_l2tag2: Quad Word 0 - bits 32-63
3428 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3429 const u64 cd_type_cmd_tso_mss,
3430 const u32 cd_tunneling, const u32 cd_l2tag2)
3432 struct i40e_tx_context_desc *context_desc;
3433 int i = tx_ring->next_to_use;
3435 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3436 !cd_tunneling && !cd_l2tag2)
3439 /* grab the next descriptor */
3440 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3443 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3445 /* cpu_to_le32 and assign to struct fields */
3446 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3447 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3448 context_desc->rsvd = cpu_to_le16(0);
3449 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3453 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3454 * @tx_ring: the ring to be checked
3455 * @size: the size buffer we want to assure is available
3457 * Returns -EBUSY if a stop is needed, else 0
3459 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3461 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3462 /* Memory barrier before checking head and tail */
3465 ++tx_ring->tx_stats.tx_stopped;
3467 /* Check again in a case another CPU has just made room available. */
3468 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3471 /* A reprieve! - use start_queue because it doesn't call schedule */
3472 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3473 ++tx_ring->tx_stats.restart_queue;
3478 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3481 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3482 * and so we need to figure out the cases where we need to linearize the skb.
3484 * For TSO we need to count the TSO header and segment payload separately.
3485 * As such we need to check cases where we have 7 fragments or more as we
3486 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3487 * the segment payload in the first descriptor, and another 7 for the
3490 bool __i40e_chk_linearize(struct sk_buff *skb)
3492 const skb_frag_t *frag, *stale;
3495 /* no need to check if number of frags is less than 7 */
3496 nr_frags = skb_shinfo(skb)->nr_frags;
3497 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3500 /* We need to walk through the list and validate that each group
3501 * of 6 fragments totals at least gso_size.
3503 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3504 frag = &skb_shinfo(skb)->frags[0];
3506 /* Initialize size to the negative value of gso_size minus 1. We
3507 * use this as the worst case scenerio in which the frag ahead
3508 * of us only provides one byte which is why we are limited to 6
3509 * descriptors for a single transmit as the header and previous
3510 * fragment are already consuming 2 descriptors.
3512 sum = 1 - skb_shinfo(skb)->gso_size;
3514 /* Add size of frags 0 through 4 to create our initial sum */
3515 sum += skb_frag_size(frag++);
3516 sum += skb_frag_size(frag++);
3517 sum += skb_frag_size(frag++);
3518 sum += skb_frag_size(frag++);
3519 sum += skb_frag_size(frag++);
3521 /* Walk through fragments adding latest fragment, testing it, and
3522 * then removing stale fragments from the sum.
3524 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3525 int stale_size = skb_frag_size(stale);
3527 sum += skb_frag_size(frag++);
3529 /* The stale fragment may present us with a smaller
3530 * descriptor than the actual fragment size. To account
3531 * for that we need to remove all the data on the front and
3532 * figure out what the remainder would be in the last
3533 * descriptor associated with the fragment.
3535 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3536 int align_pad = -(skb_frag_off(stale)) &
3537 (I40E_MAX_READ_REQ_SIZE - 1);
3540 stale_size -= align_pad;
3543 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3544 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3545 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3548 /* if sum is negative we failed to make sufficient progress */
3562 * i40e_tx_map - Build the Tx descriptor
3563 * @tx_ring: ring to send buffer on
3565 * @first: first buffer info buffer to use
3566 * @tx_flags: collected send information
3567 * @hdr_len: size of the packet header
3568 * @td_cmd: the command field in the descriptor
3569 * @td_offset: offset for checksum or crc
3571 * Returns 0 on success, -1 on failure to DMA
3573 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3574 struct i40e_tx_buffer *first, u32 tx_flags,
3575 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3577 unsigned int data_len = skb->data_len;
3578 unsigned int size = skb_headlen(skb);
3580 struct i40e_tx_buffer *tx_bi;
3581 struct i40e_tx_desc *tx_desc;
3582 u16 i = tx_ring->next_to_use;
3587 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3588 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3589 td_tag = FIELD_GET(I40E_TX_FLAGS_VLAN_MASK, tx_flags);
3592 first->tx_flags = tx_flags;
3594 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3596 tx_desc = I40E_TX_DESC(tx_ring, i);
3599 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3600 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3602 if (dma_mapping_error(tx_ring->dev, dma))
3605 /* record length, and DMA address */
3606 dma_unmap_len_set(tx_bi, len, size);
3607 dma_unmap_addr_set(tx_bi, dma, dma);
3609 /* align size to end of page */
3610 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3611 tx_desc->buffer_addr = cpu_to_le64(dma);
3613 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3614 tx_desc->cmd_type_offset_bsz =
3615 build_ctob(td_cmd, td_offset,
3622 if (i == tx_ring->count) {
3623 tx_desc = I40E_TX_DESC(tx_ring, 0);
3630 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3631 tx_desc->buffer_addr = cpu_to_le64(dma);
3634 if (likely(!data_len))
3637 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3644 if (i == tx_ring->count) {
3645 tx_desc = I40E_TX_DESC(tx_ring, 0);
3649 size = skb_frag_size(frag);
3652 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3655 tx_bi = &tx_ring->tx_bi[i];
3658 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3661 if (i == tx_ring->count)
3664 tx_ring->next_to_use = i;
3666 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3668 /* write last descriptor with EOP bit */
3669 td_cmd |= I40E_TX_DESC_CMD_EOP;
3671 /* We OR these values together to check both against 4 (WB_STRIDE)
3672 * below. This is safe since we don't re-use desc_count afterwards.
3674 desc_count |= ++tx_ring->packet_stride;
3676 if (desc_count >= WB_STRIDE) {
3677 /* write last descriptor with RS bit set */
3678 td_cmd |= I40E_TX_DESC_CMD_RS;
3679 tx_ring->packet_stride = 0;
3682 tx_desc->cmd_type_offset_bsz =
3683 build_ctob(td_cmd, td_offset, size, td_tag);
3685 skb_tx_timestamp(skb);
3687 /* Force memory writes to complete before letting h/w know there
3688 * are new descriptors to fetch.
3690 * We also use this memory barrier to make certain all of the
3691 * status bits have been updated before next_to_watch is written.
3695 /* set next_to_watch value indicating a packet is present */
3696 first->next_to_watch = tx_desc;
3698 /* notify HW of packet */
3699 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3700 writel(i, tx_ring->tail);
3706 dev_info(tx_ring->dev, "TX DMA map failed\n");
3708 /* clear dma mappings for failed tx_bi map */
3710 tx_bi = &tx_ring->tx_bi[i];
3711 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3719 tx_ring->next_to_use = i;
3724 static u16 i40e_swdcb_skb_tx_hash(struct net_device *dev,
3725 const struct sk_buff *skb,
3728 u32 jhash_initval_salt = 0xd631614b;
3731 if (skb->sk && skb->sk->sk_hash)
3732 hash = skb->sk->sk_hash;
3734 hash = (__force u16)skb->protocol ^ skb->hash;
3736 hash = jhash_1word(hash, jhash_initval_salt);
3738 return (u16)(((u64)hash * num_tx_queues) >> 32);
3741 u16 i40e_lan_select_queue(struct net_device *netdev,
3742 struct sk_buff *skb,
3743 struct net_device __always_unused *sb_dev)
3745 struct i40e_netdev_priv *np = netdev_priv(netdev);
3746 struct i40e_vsi *vsi = np->vsi;
3754 /* is DCB enabled at all? */
3755 if (vsi->tc_config.numtc == 1 ||
3756 i40e_is_tc_mqprio_enabled(vsi->back))
3757 return netdev_pick_tx(netdev, skb, sb_dev);
3759 prio = skb->priority;
3760 hw = &vsi->back->hw;
3761 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio];
3763 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass))))
3766 /* select a queue assigned for the given TC */
3767 qcount = vsi->tc_config.tc_info[tclass].qcount;
3768 hash = i40e_swdcb_skb_tx_hash(netdev, skb, qcount);
3770 qoffset = vsi->tc_config.tc_info[tclass].qoffset;
3771 return qoffset + hash;
3775 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3776 * @xdpf: data to transmit
3777 * @xdp_ring: XDP Tx ring
3779 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3780 struct i40e_ring *xdp_ring)
3782 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
3783 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
3784 u16 i = 0, index = xdp_ring->next_to_use;
3785 struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index];
3786 struct i40e_tx_buffer *tx_bi = tx_head;
3787 struct i40e_tx_desc *tx_desc = I40E_TX_DESC(xdp_ring, index);
3788 void *data = xdpf->data;
3789 u32 size = xdpf->len;
3791 if (unlikely(I40E_DESC_UNUSED(xdp_ring) < 1 + nr_frags)) {
3792 xdp_ring->tx_stats.tx_busy++;
3793 return I40E_XDP_CONSUMED;
3796 tx_head->bytecount = xdp_get_frame_len(xdpf);
3797 tx_head->gso_segs = 1;
3798 tx_head->xdpf = xdpf;
3803 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3804 if (dma_mapping_error(xdp_ring->dev, dma))
3807 /* record length, and DMA address */
3808 dma_unmap_len_set(tx_bi, len, size);
3809 dma_unmap_addr_set(tx_bi, dma, dma);
3811 tx_desc->buffer_addr = cpu_to_le64(dma);
3812 tx_desc->cmd_type_offset_bsz =
3813 build_ctob(I40E_TX_DESC_CMD_ICRC, 0, size, 0);
3815 if (++index == xdp_ring->count)
3821 tx_bi = &xdp_ring->tx_bi[index];
3822 tx_desc = I40E_TX_DESC(xdp_ring, index);
3824 data = skb_frag_address(&sinfo->frags[i]);
3825 size = skb_frag_size(&sinfo->frags[i]);
3829 tx_desc->cmd_type_offset_bsz |=
3830 cpu_to_le64(I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT);
3832 /* Make certain all of the status bits have been updated
3833 * before next_to_watch is written.
3837 xdp_ring->xdp_tx_active++;
3839 tx_head->next_to_watch = tx_desc;
3840 xdp_ring->next_to_use = index;
3846 tx_bi = &xdp_ring->tx_bi[index];
3847 if (dma_unmap_len(tx_bi, len))
3848 dma_unmap_page(xdp_ring->dev,
3849 dma_unmap_addr(tx_bi, dma),
3850 dma_unmap_len(tx_bi, len),
3852 dma_unmap_len_set(tx_bi, len, 0);
3853 if (tx_bi == tx_head)
3857 index += xdp_ring->count;
3861 return I40E_XDP_CONSUMED;
3865 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3867 * @tx_ring: ring to send buffer on
3869 * Returns NETDEV_TX_OK if sent, else an error code
3871 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3872 struct i40e_ring *tx_ring)
3874 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3875 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3876 struct i40e_tx_buffer *first;
3884 /* prefetch the data, we'll need it later */
3885 prefetch(skb->data);
3887 i40e_trace(xmit_frame_ring, skb, tx_ring);
3889 count = i40e_xmit_descriptor_count(skb);
3890 if (i40e_chk_linearize(skb, count)) {
3891 if (__skb_linearize(skb)) {
3892 dev_kfree_skb_any(skb);
3893 return NETDEV_TX_OK;
3895 count = i40e_txd_use_count(skb->len);
3896 tx_ring->tx_stats.tx_linearize++;
3899 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3900 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3901 * + 4 desc gap to avoid the cache line where head is,
3902 * + 1 desc for context descriptor,
3903 * otherwise try next time
3905 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3906 tx_ring->tx_stats.tx_busy++;
3907 return NETDEV_TX_BUSY;
3910 /* record the location of the first descriptor for this packet */
3911 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3913 first->bytecount = skb->len;
3914 first->gso_segs = 1;
3916 /* prepare the xmit flags */
3917 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3920 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3925 tx_flags |= I40E_TX_FLAGS_TSO;
3927 /* Always offload the checksum, since it's in the data descriptor */
3928 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3929 tx_ring, &cd_tunneling);
3933 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3936 tx_flags |= I40E_TX_FLAGS_TSYN;
3938 /* always enable CRC insertion offload */
3939 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3941 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3942 cd_tunneling, cd_l2tag2);
3944 /* Add Flow Director ATR if it's enabled.
3946 * NOTE: this must always be directly before the data descriptor.
3948 i40e_atr(tx_ring, skb, tx_flags);
3950 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3952 goto cleanup_tx_tstamp;
3954 return NETDEV_TX_OK;
3957 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3958 dev_kfree_skb_any(first->skb);
3961 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3962 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3964 dev_kfree_skb_any(pf->ptp_tx_skb);
3965 pf->ptp_tx_skb = NULL;
3966 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3969 return NETDEV_TX_OK;
3973 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3975 * @netdev: network interface device structure
3977 * Returns NETDEV_TX_OK if sent, else an error code
3979 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3981 struct i40e_netdev_priv *np = netdev_priv(netdev);
3982 struct i40e_vsi *vsi = np->vsi;
3983 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3985 /* hardware can't handle really short frames, hardware padding works
3988 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3989 return NETDEV_TX_OK;
3991 return i40e_xmit_frame_ring(skb, tx_ring);
3995 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3997 * @n: number of frames
3998 * @frames: array of XDP buffer pointers
3999 * @flags: XDP extra info
4001 * Returns number of frames successfully sent. Failed frames
4002 * will be free'ed by XDP core.
4004 * For error cases, a negative errno code is returned and no-frames
4005 * are transmitted (caller must handle freeing frames).
4007 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
4010 struct i40e_netdev_priv *np = netdev_priv(dev);
4011 unsigned int queue_index = smp_processor_id();
4012 struct i40e_vsi *vsi = np->vsi;
4013 struct i40e_pf *pf = vsi->back;
4014 struct i40e_ring *xdp_ring;
4018 if (test_bit(__I40E_VSI_DOWN, vsi->state))
4021 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
4022 test_bit(__I40E_CONFIG_BUSY, pf->state))
4025 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
4028 xdp_ring = vsi->xdp_rings[queue_index];
4030 for (i = 0; i < n; i++) {
4031 struct xdp_frame *xdpf = frames[i];
4034 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
4035 if (err != I40E_XDP_TX)
4040 if (unlikely(flags & XDP_XMIT_FLUSH))
4041 i40e_xdp_ring_update_tail(xdp_ring);