1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/prefetch.h>
5 #include <linux/bpf_trace.h>
8 #include "i40e_trace.h"
9 #include "i40e_prototype.h"
10 #include "i40e_txrx_common.h"
13 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
15 * i40e_fdir - Generate a Flow Director descriptor based on fdata
16 * @tx_ring: Tx ring to send buffer on
17 * @fdata: Flow director filter data
18 * @add: Indicate if we are adding a rule or deleting one
21 static void i40e_fdir(struct i40e_ring *tx_ring,
22 struct i40e_fdir_filter *fdata, bool add)
24 struct i40e_filter_program_desc *fdir_desc;
25 struct i40e_pf *pf = tx_ring->vsi->back;
26 u32 flex_ptype, dtype_cmd;
29 /* grab the next descriptor */
30 i = tx_ring->next_to_use;
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
36 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
37 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
39 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
40 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
42 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
43 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
45 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
46 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
48 /* Use LAN VSI Id if not programmed by user */
49 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
50 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
51 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
53 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
56 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
57 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
58 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
59 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
61 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
62 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
64 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
65 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
67 if (fdata->cnt_index) {
68 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
69 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
70 ((u32)fdata->cnt_index <<
71 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
74 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
75 fdir_desc->rsvd = cpu_to_le32(0);
76 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
77 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
80 #define I40E_FD_CLEAN_DELAY 10
82 * i40e_program_fdir_filter - Program a Flow Director filter
83 * @fdir_data: Packet data that will be filter parameters
84 * @raw_packet: the pre-allocated packet buffer for FDir
86 * @add: True for add/update, False for remove
88 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
89 u8 *raw_packet, struct i40e_pf *pf,
92 struct i40e_tx_buffer *tx_buf, *first;
93 struct i40e_tx_desc *tx_desc;
94 struct i40e_ring *tx_ring;
101 /* find existing FDIR VSI */
102 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
106 tx_ring = vsi->tx_rings[0];
109 /* we need two descriptors to add/del a filter and we can wait */
110 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
113 msleep_interruptible(1);
116 dma = dma_map_single(dev, raw_packet,
117 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
118 if (dma_mapping_error(dev, dma))
121 /* grab the next descriptor */
122 i = tx_ring->next_to_use;
123 first = &tx_ring->tx_bi[i];
124 i40e_fdir(tx_ring, fdir_data, add);
126 /* Now program a dummy descriptor */
127 i = tx_ring->next_to_use;
128 tx_desc = I40E_TX_DESC(tx_ring, i);
129 tx_buf = &tx_ring->tx_bi[i];
131 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
133 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
135 /* record length, and DMA address */
136 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
137 dma_unmap_addr_set(tx_buf, dma, dma);
139 tx_desc->buffer_addr = cpu_to_le64(dma);
140 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
142 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
143 tx_buf->raw_buf = (void *)raw_packet;
145 tx_desc->cmd_type_offset_bsz =
146 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
148 /* Force memory writes to complete before letting h/w
149 * know there are new descriptors to fetch.
153 /* Mark the data descriptor to be watched */
154 first->next_to_watch = tx_desc;
156 writel(tx_ring->next_to_use, tx_ring->tail);
163 #define IP_HEADER_OFFSET 14
164 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
166 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
167 * @vsi: pointer to the targeted VSI
168 * @fd_data: the flow director data required for the FDir descriptor
169 * @add: true adds a filter, false removes it
171 * Returns 0 if the filters were successfully added or removed
173 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
174 struct i40e_fdir_filter *fd_data,
177 struct i40e_pf *pf = vsi->back;
182 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
183 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
184 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
186 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
189 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
191 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
192 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
193 + sizeof(struct iphdr));
195 ip->daddr = fd_data->dst_ip;
196 udp->dest = fd_data->dst_port;
197 ip->saddr = fd_data->src_ip;
198 udp->source = fd_data->src_port;
200 if (fd_data->flex_filter) {
201 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
202 __be16 pattern = fd_data->flex_word;
203 u16 off = fd_data->flex_offset;
205 *((__force __be16 *)(payload + off)) = pattern;
208 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
209 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
211 dev_info(&pf->pdev->dev,
212 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
213 fd_data->pctype, fd_data->fd_id, ret);
214 /* Free the packet buffer since it wasn't added to the ring */
217 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
219 dev_info(&pf->pdev->dev,
220 "Filter OK for PCTYPE %d loc = %d\n",
221 fd_data->pctype, fd_data->fd_id);
223 dev_info(&pf->pdev->dev,
224 "Filter deleted for PCTYPE %d loc = %d\n",
225 fd_data->pctype, fd_data->fd_id);
229 pf->fd_udp4_filter_cnt++;
231 pf->fd_udp4_filter_cnt--;
236 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
238 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
239 * @vsi: pointer to the targeted VSI
240 * @fd_data: the flow director data required for the FDir descriptor
241 * @add: true adds a filter, false removes it
243 * Returns 0 if the filters were successfully added or removed
245 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
246 struct i40e_fdir_filter *fd_data,
249 struct i40e_pf *pf = vsi->back;
255 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
256 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
258 0x0, 0x72, 0, 0, 0, 0};
260 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
263 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
265 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
266 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
267 + sizeof(struct iphdr));
269 ip->daddr = fd_data->dst_ip;
270 tcp->dest = fd_data->dst_port;
271 ip->saddr = fd_data->src_ip;
272 tcp->source = fd_data->src_port;
274 if (fd_data->flex_filter) {
275 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
276 __be16 pattern = fd_data->flex_word;
277 u16 off = fd_data->flex_offset;
279 *((__force __be16 *)(payload + off)) = pattern;
282 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
283 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
285 dev_info(&pf->pdev->dev,
286 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
287 fd_data->pctype, fd_data->fd_id, ret);
288 /* Free the packet buffer since it wasn't added to the ring */
291 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
293 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
294 fd_data->pctype, fd_data->fd_id);
296 dev_info(&pf->pdev->dev,
297 "Filter deleted for PCTYPE %d loc = %d\n",
298 fd_data->pctype, fd_data->fd_id);
302 pf->fd_tcp4_filter_cnt++;
303 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
304 I40E_DEBUG_FD & pf->hw.debug_mask)
305 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
306 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
308 pf->fd_tcp4_filter_cnt--;
314 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
316 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
317 * a specific flow spec
318 * @vsi: pointer to the targeted VSI
319 * @fd_data: the flow director data required for the FDir descriptor
320 * @add: true adds a filter, false removes it
322 * Returns 0 if the filters were successfully added or removed
324 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
325 struct i40e_fdir_filter *fd_data,
328 struct i40e_pf *pf = vsi->back;
329 struct sctphdr *sctp;
334 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
335 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
336 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
338 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
341 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
343 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
344 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
345 + sizeof(struct iphdr));
347 ip->daddr = fd_data->dst_ip;
348 sctp->dest = fd_data->dst_port;
349 ip->saddr = fd_data->src_ip;
350 sctp->source = fd_data->src_port;
352 if (fd_data->flex_filter) {
353 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
354 __be16 pattern = fd_data->flex_word;
355 u16 off = fd_data->flex_offset;
357 *((__force __be16 *)(payload + off)) = pattern;
360 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
361 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
363 dev_info(&pf->pdev->dev,
364 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
365 fd_data->pctype, fd_data->fd_id, ret);
366 /* Free the packet buffer since it wasn't added to the ring */
369 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
371 dev_info(&pf->pdev->dev,
372 "Filter OK for PCTYPE %d loc = %d\n",
373 fd_data->pctype, fd_data->fd_id);
375 dev_info(&pf->pdev->dev,
376 "Filter deleted for PCTYPE %d loc = %d\n",
377 fd_data->pctype, fd_data->fd_id);
381 pf->fd_sctp4_filter_cnt++;
383 pf->fd_sctp4_filter_cnt--;
388 #define I40E_IP_DUMMY_PACKET_LEN 34
390 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
391 * a specific flow spec
392 * @vsi: pointer to the targeted VSI
393 * @fd_data: the flow director data required for the FDir descriptor
394 * @add: true adds a filter, false removes it
396 * Returns 0 if the filters were successfully added or removed
398 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
399 struct i40e_fdir_filter *fd_data,
402 struct i40e_pf *pf = vsi->back;
407 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
408 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
411 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
412 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
413 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
416 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
417 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
419 ip->saddr = fd_data->src_ip;
420 ip->daddr = fd_data->dst_ip;
423 if (fd_data->flex_filter) {
424 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
425 __be16 pattern = fd_data->flex_word;
426 u16 off = fd_data->flex_offset;
428 *((__force __be16 *)(payload + off)) = pattern;
432 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
434 dev_info(&pf->pdev->dev,
435 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
436 fd_data->pctype, fd_data->fd_id, ret);
437 /* The packet buffer wasn't added to the ring so we
438 * need to free it now.
442 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
444 dev_info(&pf->pdev->dev,
445 "Filter OK for PCTYPE %d loc = %d\n",
446 fd_data->pctype, fd_data->fd_id);
448 dev_info(&pf->pdev->dev,
449 "Filter deleted for PCTYPE %d loc = %d\n",
450 fd_data->pctype, fd_data->fd_id);
455 pf->fd_ip4_filter_cnt++;
457 pf->fd_ip4_filter_cnt--;
463 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
464 * @vsi: pointer to the targeted VSI
465 * @input: filter to add or delete
466 * @add: true adds a filter, false removes it
469 int i40e_add_del_fdir(struct i40e_vsi *vsi,
470 struct i40e_fdir_filter *input, bool add)
472 struct i40e_pf *pf = vsi->back;
475 switch (input->flow_type & ~FLOW_EXT) {
477 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
480 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
483 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
486 switch (input->ip4_proto) {
488 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
491 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
494 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
497 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
500 /* We cannot support masking based on protocol */
501 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
507 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
512 /* The buffer allocated here will be normally be freed by
513 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
514 * completion. In the event of an error adding the buffer to the FDIR
515 * ring, it will immediately be freed. It may also be freed by
516 * i40e_clean_tx_ring() when closing the VSI.
522 * i40e_fd_handle_status - check the Programming Status for FD
523 * @rx_ring: the Rx ring for this descriptor
524 * @qword0_raw: qword0
525 * @qword1: qword1 after le_to_cpu
526 * @prog_id: the id originally used for programming
528 * This is used to verify if the FD programming or invalidation
529 * requested by SW to the HW is successful or not and take actions accordingly.
531 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
532 u64 qword1, u8 prog_id)
534 struct i40e_pf *pf = rx_ring->vsi->back;
535 struct pci_dev *pdev = pf->pdev;
536 struct i40e_16b_rx_wb_qw0 *qw0;
537 u32 fcnt_prog, fcnt_avail;
540 qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
541 error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
542 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
544 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
545 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
546 if (qw0->hi_dword.fd_id != 0 ||
547 (I40E_DEBUG_FD & pf->hw.debug_mask))
548 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
551 /* Check if the programming error is for ATR.
552 * If so, auto disable ATR and set a state for
553 * flush in progress. Next time we come here if flush is in
554 * progress do nothing, once flush is complete the state will
557 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
561 /* store the current atr filter count */
562 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
564 if (qw0->hi_dword.fd_id == 0 &&
565 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
566 /* These set_bit() calls aren't atomic with the
567 * test_bit() here, but worse case we potentially
568 * disable ATR and queue a flush right after SB
569 * support is re-enabled. That shouldn't cause an
572 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
573 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
576 /* filter programming failed most likely due to table full */
577 fcnt_prog = i40e_get_global_fd_count(pf);
578 fcnt_avail = pf->fdir_pf_filter_count;
579 /* If ATR is running fcnt_prog can quickly change,
580 * if we are very close to full, it makes sense to disable
581 * FD ATR/SB and then re-enable it when there is room.
583 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
584 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
585 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
587 if (I40E_DEBUG_FD & pf->hw.debug_mask)
588 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
590 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
591 if (I40E_DEBUG_FD & pf->hw.debug_mask)
592 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
593 qw0->hi_dword.fd_id);
598 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
599 * @ring: the ring that owns the buffer
600 * @tx_buffer: the buffer to free
602 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
603 struct i40e_tx_buffer *tx_buffer)
605 if (tx_buffer->skb) {
606 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
607 kfree(tx_buffer->raw_buf);
608 else if (ring_is_xdp(ring))
609 xdp_return_frame(tx_buffer->xdpf);
611 dev_kfree_skb_any(tx_buffer->skb);
612 if (dma_unmap_len(tx_buffer, len))
613 dma_unmap_single(ring->dev,
614 dma_unmap_addr(tx_buffer, dma),
615 dma_unmap_len(tx_buffer, len),
617 } else if (dma_unmap_len(tx_buffer, len)) {
618 dma_unmap_page(ring->dev,
619 dma_unmap_addr(tx_buffer, dma),
620 dma_unmap_len(tx_buffer, len),
624 tx_buffer->next_to_watch = NULL;
625 tx_buffer->skb = NULL;
626 dma_unmap_len_set(tx_buffer, len, 0);
627 /* tx_buffer must be completely set up in the transmit path */
631 * i40e_clean_tx_ring - Free any empty Tx buffers
632 * @tx_ring: ring to be cleaned
634 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
636 unsigned long bi_size;
639 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
640 i40e_xsk_clean_tx_ring(tx_ring);
642 /* ring already cleared, nothing to do */
646 /* Free all the Tx ring sk_buffs */
647 for (i = 0; i < tx_ring->count; i++)
648 i40e_unmap_and_free_tx_resource(tx_ring,
652 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
653 memset(tx_ring->tx_bi, 0, bi_size);
655 /* Zero out the descriptor ring */
656 memset(tx_ring->desc, 0, tx_ring->size);
658 tx_ring->next_to_use = 0;
659 tx_ring->next_to_clean = 0;
661 if (!tx_ring->netdev)
664 /* cleanup Tx queue statistics */
665 netdev_tx_reset_queue(txring_txq(tx_ring));
669 * i40e_free_tx_resources - Free Tx resources per queue
670 * @tx_ring: Tx descriptor ring for a specific queue
672 * Free all transmit software resources
674 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
676 i40e_clean_tx_ring(tx_ring);
677 kfree(tx_ring->tx_bi);
678 tx_ring->tx_bi = NULL;
679 kfree(tx_ring->xsk_descs);
680 tx_ring->xsk_descs = NULL;
683 dma_free_coherent(tx_ring->dev, tx_ring->size,
684 tx_ring->desc, tx_ring->dma);
685 tx_ring->desc = NULL;
690 * i40e_get_tx_pending - how many tx descriptors not processed
691 * @ring: the ring of descriptors
692 * @in_sw: use SW variables
694 * Since there is no access to the ring head register
695 * in XL710, we need to use our local copies
697 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
702 head = i40e_get_head(ring);
703 tail = readl(ring->tail);
705 head = ring->next_to_clean;
706 tail = ring->next_to_use;
710 return (head < tail) ?
711 tail - head : (tail + ring->count - head);
717 * i40e_detect_recover_hung - Function to detect and recover hung_queues
718 * @vsi: pointer to vsi struct with tx queues
720 * VSI has netdev and netdev has TX queues. This function is to check each of
721 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
723 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
725 struct i40e_ring *tx_ring = NULL;
726 struct net_device *netdev;
733 if (test_bit(__I40E_VSI_DOWN, vsi->state))
736 netdev = vsi->netdev;
740 if (!netif_carrier_ok(netdev))
743 for (i = 0; i < vsi->num_queue_pairs; i++) {
744 tx_ring = vsi->tx_rings[i];
745 if (tx_ring && tx_ring->desc) {
746 /* If packet counter has not changed the queue is
747 * likely stalled, so force an interrupt for this
750 * prev_pkt_ctr would be negative if there was no
753 packets = tx_ring->stats.packets & INT_MAX;
754 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
755 i40e_force_wb(vsi, tx_ring->q_vector);
759 /* Memory barrier between read of packet count and call
760 * to i40e_get_tx_pending()
763 tx_ring->tx_stats.prev_pkt_ctr =
764 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
770 * i40e_clean_tx_irq - Reclaim resources after transmit completes
771 * @vsi: the VSI we care about
772 * @tx_ring: Tx ring to clean
773 * @napi_budget: Used to determine if we are in netpoll
775 * Returns true if there's any budget left (e.g. the clean is finished)
777 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
778 struct i40e_ring *tx_ring, int napi_budget)
780 int i = tx_ring->next_to_clean;
781 struct i40e_tx_buffer *tx_buf;
782 struct i40e_tx_desc *tx_head;
783 struct i40e_tx_desc *tx_desc;
784 unsigned int total_bytes = 0, total_packets = 0;
785 unsigned int budget = vsi->work_limit;
787 tx_buf = &tx_ring->tx_bi[i];
788 tx_desc = I40E_TX_DESC(tx_ring, i);
791 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
794 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
796 /* if next_to_watch is not set then there is no work pending */
800 /* prevent any other reads prior to eop_desc */
803 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
804 /* we have caught up to head, no work left to do */
805 if (tx_head == tx_desc)
808 /* clear next_to_watch to prevent false hangs */
809 tx_buf->next_to_watch = NULL;
811 /* update the statistics for this packet */
812 total_bytes += tx_buf->bytecount;
813 total_packets += tx_buf->gso_segs;
815 /* free the skb/XDP data */
816 if (ring_is_xdp(tx_ring))
817 xdp_return_frame(tx_buf->xdpf);
819 napi_consume_skb(tx_buf->skb, napi_budget);
821 /* unmap skb header data */
822 dma_unmap_single(tx_ring->dev,
823 dma_unmap_addr(tx_buf, dma),
824 dma_unmap_len(tx_buf, len),
827 /* clear tx_buffer data */
829 dma_unmap_len_set(tx_buf, len, 0);
831 /* unmap remaining buffers */
832 while (tx_desc != eop_desc) {
833 i40e_trace(clean_tx_irq_unmap,
834 tx_ring, tx_desc, tx_buf);
841 tx_buf = tx_ring->tx_bi;
842 tx_desc = I40E_TX_DESC(tx_ring, 0);
845 /* unmap any remaining paged data */
846 if (dma_unmap_len(tx_buf, len)) {
847 dma_unmap_page(tx_ring->dev,
848 dma_unmap_addr(tx_buf, dma),
849 dma_unmap_len(tx_buf, len),
851 dma_unmap_len_set(tx_buf, len, 0);
855 /* move us one more past the eop_desc for start of next pkt */
861 tx_buf = tx_ring->tx_bi;
862 tx_desc = I40E_TX_DESC(tx_ring, 0);
867 /* update budget accounting */
869 } while (likely(budget));
872 tx_ring->next_to_clean = i;
873 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
874 i40e_arm_wb(tx_ring, vsi, budget);
876 if (ring_is_xdp(tx_ring))
879 /* notify netdev of completed buffers */
880 netdev_tx_completed_queue(txring_txq(tx_ring),
881 total_packets, total_bytes);
883 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
884 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
885 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
886 /* Make sure that anybody stopping the queue after this
887 * sees the new next_to_clean.
890 if (__netif_subqueue_stopped(tx_ring->netdev,
891 tx_ring->queue_index) &&
892 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
893 netif_wake_subqueue(tx_ring->netdev,
894 tx_ring->queue_index);
895 ++tx_ring->tx_stats.restart_queue;
903 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
904 * @vsi: the VSI we care about
905 * @q_vector: the vector on which to enable writeback
908 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
909 struct i40e_q_vector *q_vector)
911 u16 flags = q_vector->tx.ring[0].flags;
914 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
917 if (q_vector->arm_wb_state)
920 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
921 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
922 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
925 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
928 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
929 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
931 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
933 q_vector->arm_wb_state = true;
937 * i40e_force_wb - Issue SW Interrupt so HW does a wb
938 * @vsi: the VSI we care about
939 * @q_vector: the vector on which to force writeback
942 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
944 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
945 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
946 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
947 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
948 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
949 /* allow 00 to be written to the index */
952 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
954 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
955 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
956 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
957 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
958 /* allow 00 to be written to the index */
960 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
964 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
965 struct i40e_ring_container *rc)
967 return &q_vector->rx == rc;
970 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
972 unsigned int divisor;
974 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
975 case I40E_LINK_SPEED_40GB:
976 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
978 case I40E_LINK_SPEED_25GB:
979 case I40E_LINK_SPEED_20GB:
980 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
983 case I40E_LINK_SPEED_10GB:
984 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
986 case I40E_LINK_SPEED_1GB:
987 case I40E_LINK_SPEED_100MB:
988 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
996 * i40e_update_itr - update the dynamic ITR value based on statistics
997 * @q_vector: structure containing interrupt and ring information
998 * @rc: structure containing ring performance data
1000 * Stores a new ITR value based on packets and byte
1001 * counts during the last interrupt. The advantage of per interrupt
1002 * computation is faster updates and more accurate ITR for the current
1003 * traffic pattern. Constants in this function were computed
1004 * based on theoretical maximum wire speed and thresholds were set based
1005 * on testing data as well as attempting to minimize response time
1006 * while increasing bulk throughput.
1008 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1009 struct i40e_ring_container *rc)
1011 unsigned int avg_wire_size, packets, bytes, itr;
1012 unsigned long next_update = jiffies;
1014 /* If we don't have any rings just leave ourselves set for maximum
1015 * possible latency so we take ourselves out of the equation.
1017 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1020 /* For Rx we want to push the delay up and default to low latency.
1021 * for Tx we want to pull the delay down and default to high latency.
1023 itr = i40e_container_is_rx(q_vector, rc) ?
1024 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1025 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1027 /* If we didn't update within up to 1 - 2 jiffies we can assume
1028 * that either packets are coming in so slow there hasn't been
1029 * any work, or that there is so much work that NAPI is dealing
1030 * with interrupt moderation and we don't need to do anything.
1032 if (time_after(next_update, rc->next_update))
1035 /* If itr_countdown is set it means we programmed an ITR within
1036 * the last 4 interrupt cycles. This has a side effect of us
1037 * potentially firing an early interrupt. In order to work around
1038 * this we need to throw out any data received for a few
1039 * interrupts following the update.
1041 if (q_vector->itr_countdown) {
1042 itr = rc->target_itr;
1046 packets = rc->total_packets;
1047 bytes = rc->total_bytes;
1049 if (i40e_container_is_rx(q_vector, rc)) {
1050 /* If Rx there are 1 to 4 packets and bytes are less than
1051 * 9000 assume insufficient data to use bulk rate limiting
1052 * approach unless Tx is already in bulk rate limiting. We
1053 * are likely latency driven.
1055 if (packets && packets < 4 && bytes < 9000 &&
1056 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1057 itr = I40E_ITR_ADAPTIVE_LATENCY;
1058 goto adjust_by_size;
1060 } else if (packets < 4) {
1061 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1062 * bulk mode and we are receiving 4 or fewer packets just
1063 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1064 * that the Rx can relax.
1066 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1067 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1068 I40E_ITR_ADAPTIVE_MAX_USECS)
1070 } else if (packets > 32) {
1071 /* If we have processed over 32 packets in a single interrupt
1072 * for Tx assume we need to switch over to "bulk" mode.
1074 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1077 /* We have no packets to actually measure against. This means
1078 * either one of the other queues on this vector is active or
1079 * we are a Tx queue doing TSO with too high of an interrupt rate.
1081 * Between 4 and 56 we can assume that our current interrupt delay
1082 * is only slightly too low. As such we should increase it by a small
1086 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1087 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1088 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1089 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1094 if (packets <= 256) {
1095 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1096 itr &= I40E_ITR_MASK;
1098 /* Between 56 and 112 is our "goldilocks" zone where we are
1099 * working out "just right". Just report that our current
1100 * ITR is good for us.
1105 /* If packet count is 128 or greater we are likely looking
1106 * at a slight overrun of the delay we want. Try halving
1107 * our delay to see if that will cut the number of packets
1108 * in half per interrupt.
1111 itr &= I40E_ITR_MASK;
1112 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1113 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1118 /* The paths below assume we are dealing with a bulk ITR since
1119 * number of packets is greater than 256. We are just going to have
1120 * to compute a value and try to bring the count under control,
1121 * though for smaller packet sizes there isn't much we can do as
1122 * NAPI polling will likely be kicking in sooner rather than later.
1124 itr = I40E_ITR_ADAPTIVE_BULK;
1127 /* If packet counts are 256 or greater we can assume we have a gross
1128 * overestimation of what the rate should be. Instead of trying to fine
1129 * tune it just use the formula below to try and dial in an exact value
1130 * give the current packet size of the frame.
1132 avg_wire_size = bytes / packets;
1134 /* The following is a crude approximation of:
1135 * wmem_default / (size + overhead) = desired_pkts_per_int
1136 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1137 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1139 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1140 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1143 * (170 * (size + 24)) / (size + 640) = ITR
1145 * We first do some math on the packet size and then finally bitshift
1146 * by 8 after rounding up. We also have to account for PCIe link speed
1147 * difference as ITR scales based on this.
1149 if (avg_wire_size <= 60) {
1150 /* Start at 250k ints/sec */
1151 avg_wire_size = 4096;
1152 } else if (avg_wire_size <= 380) {
1153 /* 250K ints/sec to 60K ints/sec */
1154 avg_wire_size *= 40;
1155 avg_wire_size += 1696;
1156 } else if (avg_wire_size <= 1084) {
1157 /* 60K ints/sec to 36K ints/sec */
1158 avg_wire_size *= 15;
1159 avg_wire_size += 11452;
1160 } else if (avg_wire_size <= 1980) {
1161 /* 36K ints/sec to 30K ints/sec */
1163 avg_wire_size += 22420;
1165 /* plateau at a limit of 30K ints/sec */
1166 avg_wire_size = 32256;
1169 /* If we are in low latency mode halve our delay which doubles the
1170 * rate to somewhere between 100K to 16K ints/sec
1172 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1175 /* Resultant value is 256 times larger than it needs to be. This
1176 * gives us room to adjust the value as needed to either increase
1177 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1179 * Use addition as we have already recorded the new latency flag
1180 * for the ITR value.
1182 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1183 I40E_ITR_ADAPTIVE_MIN_INC;
1185 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1186 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1187 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1191 /* write back value */
1192 rc->target_itr = itr;
1194 /* next update should occur within next jiffy */
1195 rc->next_update = next_update + 1;
1197 rc->total_bytes = 0;
1198 rc->total_packets = 0;
1201 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
1203 return &rx_ring->rx_bi[idx];
1207 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1208 * @rx_ring: rx descriptor ring to store buffers on
1209 * @old_buff: donor buffer to have page reused
1211 * Synchronizes page for reuse by the adapter
1213 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1214 struct i40e_rx_buffer *old_buff)
1216 struct i40e_rx_buffer *new_buff;
1217 u16 nta = rx_ring->next_to_alloc;
1219 new_buff = i40e_rx_bi(rx_ring, nta);
1221 /* update, and store next to alloc */
1223 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1225 /* transfer page from old buffer to new buffer */
1226 new_buff->dma = old_buff->dma;
1227 new_buff->page = old_buff->page;
1228 new_buff->page_offset = old_buff->page_offset;
1229 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1231 rx_ring->rx_stats.page_reuse_count++;
1233 /* clear contents of buffer_info */
1234 old_buff->page = NULL;
1238 * i40e_clean_programming_status - clean the programming status descriptor
1239 * @rx_ring: the rx ring that has this descriptor
1240 * @qword0_raw: qword0
1241 * @qword1: qword1 representing status_error_len in CPU ordering
1243 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1244 * status being successful or not and take actions accordingly. FCoE should
1245 * handle its context/filter programming/invalidation status and take actions.
1247 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1249 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
1254 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1255 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1257 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1258 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1262 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1263 * @tx_ring: the tx ring to set up
1265 * Return 0 on success, negative on error
1267 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1269 struct device *dev = tx_ring->dev;
1275 /* warn if we are about to overwrite the pointer */
1276 WARN_ON(tx_ring->tx_bi);
1277 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1278 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1279 if (!tx_ring->tx_bi)
1282 if (ring_is_xdp(tx_ring)) {
1283 tx_ring->xsk_descs = kcalloc(I40E_MAX_NUM_DESCRIPTORS, sizeof(*tx_ring->xsk_descs),
1285 if (!tx_ring->xsk_descs)
1289 u64_stats_init(&tx_ring->syncp);
1291 /* round up to nearest 4K */
1292 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1293 /* add u32 for head writeback, align after this takes care of
1294 * guaranteeing this is at least one cache line in size
1296 tx_ring->size += sizeof(u32);
1297 tx_ring->size = ALIGN(tx_ring->size, 4096);
1298 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1299 &tx_ring->dma, GFP_KERNEL);
1300 if (!tx_ring->desc) {
1301 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1306 tx_ring->next_to_use = 0;
1307 tx_ring->next_to_clean = 0;
1308 tx_ring->tx_stats.prev_pkt_ctr = -1;
1312 kfree(tx_ring->xsk_descs);
1313 tx_ring->xsk_descs = NULL;
1314 kfree(tx_ring->tx_bi);
1315 tx_ring->tx_bi = NULL;
1319 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
1321 unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
1323 rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
1324 return rx_ring->rx_bi ? 0 : -ENOMEM;
1327 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
1329 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
1333 * i40e_clean_rx_ring - Free Rx buffers
1334 * @rx_ring: ring to be cleaned
1336 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1340 /* ring already cleared, nothing to do */
1341 if (!rx_ring->rx_bi)
1345 dev_kfree_skb(rx_ring->skb);
1346 rx_ring->skb = NULL;
1349 if (rx_ring->xsk_pool) {
1350 i40e_xsk_clean_rx_ring(rx_ring);
1354 /* Free all the Rx ring sk_buffs */
1355 for (i = 0; i < rx_ring->count; i++) {
1356 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1361 /* Invalidate cache lines that may have been written to by
1362 * device so that we avoid corrupting memory.
1364 dma_sync_single_range_for_cpu(rx_ring->dev,
1367 rx_ring->rx_buf_len,
1370 /* free resources associated with mapping */
1371 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1372 i40e_rx_pg_size(rx_ring),
1376 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1379 rx_bi->page_offset = 0;
1383 if (rx_ring->xsk_pool)
1384 i40e_clear_rx_bi_zc(rx_ring);
1386 i40e_clear_rx_bi(rx_ring);
1388 /* Zero out the descriptor ring */
1389 memset(rx_ring->desc, 0, rx_ring->size);
1391 rx_ring->next_to_alloc = 0;
1392 rx_ring->next_to_clean = 0;
1393 rx_ring->next_to_use = 0;
1397 * i40e_free_rx_resources - Free Rx resources
1398 * @rx_ring: ring to clean the resources from
1400 * Free all receive software resources
1402 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1404 i40e_clean_rx_ring(rx_ring);
1405 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1406 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1407 rx_ring->xdp_prog = NULL;
1408 kfree(rx_ring->rx_bi);
1409 rx_ring->rx_bi = NULL;
1411 if (rx_ring->desc) {
1412 dma_free_coherent(rx_ring->dev, rx_ring->size,
1413 rx_ring->desc, rx_ring->dma);
1414 rx_ring->desc = NULL;
1419 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1420 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1422 * Returns 0 on success, negative on failure
1424 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1426 struct device *dev = rx_ring->dev;
1429 u64_stats_init(&rx_ring->syncp);
1431 /* Round up to nearest 4K */
1432 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
1433 rx_ring->size = ALIGN(rx_ring->size, 4096);
1434 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1435 &rx_ring->dma, GFP_KERNEL);
1437 if (!rx_ring->desc) {
1438 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1443 rx_ring->next_to_alloc = 0;
1444 rx_ring->next_to_clean = 0;
1445 rx_ring->next_to_use = 0;
1447 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1448 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1449 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1450 rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
1455 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1461 * i40e_release_rx_desc - Store the new tail and head values
1462 * @rx_ring: ring to bump
1463 * @val: new head index
1465 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1467 rx_ring->next_to_use = val;
1469 /* update next to alloc since we have filled the ring */
1470 rx_ring->next_to_alloc = val;
1472 /* Force memory writes to complete before letting h/w
1473 * know there are new descriptors to fetch. (Only
1474 * applicable for weak-ordered memory model archs,
1478 writel(val, rx_ring->tail);
1482 * i40e_rx_offset - Return expected offset into page to access data
1483 * @rx_ring: Ring we are requesting offset of
1485 * Returns the offset value for ring into the data buffer.
1487 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1489 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1492 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1495 unsigned int truesize;
1497 #if (PAGE_SIZE < 8192)
1498 truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
1500 truesize = i40e_rx_offset(rx_ring) ?
1501 SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
1502 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1503 SKB_DATA_ALIGN(size);
1509 * i40e_alloc_mapped_page - recycle or make a new page
1510 * @rx_ring: ring to use
1511 * @bi: rx_buffer struct to modify
1513 * Returns true if the page was successfully allocated or
1516 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1517 struct i40e_rx_buffer *bi)
1519 struct page *page = bi->page;
1522 /* since we are recycling buffers we should seldom need to alloc */
1524 rx_ring->rx_stats.page_reuse_count++;
1528 /* alloc new page for storage */
1529 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1530 if (unlikely(!page)) {
1531 rx_ring->rx_stats.alloc_page_failed++;
1535 /* map page for use */
1536 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1537 i40e_rx_pg_size(rx_ring),
1541 /* if mapping failed free memory back to system since
1542 * there isn't much point in holding memory we can't use
1544 if (dma_mapping_error(rx_ring->dev, dma)) {
1545 __free_pages(page, i40e_rx_pg_order(rx_ring));
1546 rx_ring->rx_stats.alloc_page_failed++;
1552 bi->page_offset = i40e_rx_offset(rx_ring);
1553 page_ref_add(page, USHRT_MAX - 1);
1554 bi->pagecnt_bias = USHRT_MAX;
1560 * i40e_alloc_rx_buffers - Replace used receive buffers
1561 * @rx_ring: ring to place buffers on
1562 * @cleaned_count: number of buffers to replace
1564 * Returns false if all allocations were successful, true if any fail
1566 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1568 u16 ntu = rx_ring->next_to_use;
1569 union i40e_rx_desc *rx_desc;
1570 struct i40e_rx_buffer *bi;
1572 /* do nothing if no valid netdev defined */
1573 if (!rx_ring->netdev || !cleaned_count)
1576 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1577 bi = i40e_rx_bi(rx_ring, ntu);
1580 if (!i40e_alloc_mapped_page(rx_ring, bi))
1583 /* sync the buffer for use by the device */
1584 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1586 rx_ring->rx_buf_len,
1589 /* Refresh the desc even if buffer_addrs didn't change
1590 * because each write-back erases this info.
1592 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1597 if (unlikely(ntu == rx_ring->count)) {
1598 rx_desc = I40E_RX_DESC(rx_ring, 0);
1599 bi = i40e_rx_bi(rx_ring, 0);
1603 /* clear the status bits for the next_to_use descriptor */
1604 rx_desc->wb.qword1.status_error_len = 0;
1607 } while (cleaned_count);
1609 if (rx_ring->next_to_use != ntu)
1610 i40e_release_rx_desc(rx_ring, ntu);
1615 if (rx_ring->next_to_use != ntu)
1616 i40e_release_rx_desc(rx_ring, ntu);
1618 /* make sure to come back via polling to try again after
1619 * allocation failure
1625 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1626 * @vsi: the VSI we care about
1627 * @skb: skb currently being received and modified
1628 * @rx_desc: the receive descriptor
1630 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1631 struct sk_buff *skb,
1632 union i40e_rx_desc *rx_desc)
1634 struct i40e_rx_ptype_decoded decoded;
1635 u32 rx_error, rx_status;
1640 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1641 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1642 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1643 I40E_RXD_QW1_ERROR_SHIFT;
1644 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1645 I40E_RXD_QW1_STATUS_SHIFT;
1646 decoded = decode_rx_desc_ptype(ptype);
1648 skb->ip_summed = CHECKSUM_NONE;
1650 skb_checksum_none_assert(skb);
1652 /* Rx csum enabled and ip headers found? */
1653 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1656 /* did the hardware decode the packet and checksum? */
1657 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1660 /* both known and outer_ip must be set for the below code to work */
1661 if (!(decoded.known && decoded.outer_ip))
1664 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1665 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1666 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1667 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1670 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1671 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1674 /* likely incorrect csum if alternate IP extension headers found */
1676 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1677 /* don't increment checksum err here, non-fatal err */
1680 /* there was some L4 error, count error and punt packet to the stack */
1681 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1684 /* handle packets that were not able to be checksummed due
1685 * to arrival speed, in this case the stack can compute
1688 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1691 /* If there is an outer header present that might contain a checksum
1692 * we need to bump the checksum level by 1 to reflect the fact that
1693 * we are indicating we validated the inner checksum.
1695 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1696 skb->csum_level = 1;
1698 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1699 switch (decoded.inner_prot) {
1700 case I40E_RX_PTYPE_INNER_PROT_TCP:
1701 case I40E_RX_PTYPE_INNER_PROT_UDP:
1702 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1703 skb->ip_summed = CHECKSUM_UNNECESSARY;
1712 vsi->back->hw_csum_rx_error++;
1716 * i40e_ptype_to_htype - get a hash type
1717 * @ptype: the ptype value from the descriptor
1719 * Returns a hash type to be used by skb_set_hash
1721 static inline int i40e_ptype_to_htype(u8 ptype)
1723 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1726 return PKT_HASH_TYPE_NONE;
1728 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1729 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1730 return PKT_HASH_TYPE_L4;
1731 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1732 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1733 return PKT_HASH_TYPE_L3;
1735 return PKT_HASH_TYPE_L2;
1739 * i40e_rx_hash - set the hash value in the skb
1740 * @ring: descriptor ring
1741 * @rx_desc: specific descriptor
1742 * @skb: skb currently being received and modified
1743 * @rx_ptype: Rx packet type
1745 static inline void i40e_rx_hash(struct i40e_ring *ring,
1746 union i40e_rx_desc *rx_desc,
1747 struct sk_buff *skb,
1751 const __le64 rss_mask =
1752 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1753 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1755 if (!(ring->netdev->features & NETIF_F_RXHASH))
1758 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1759 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1760 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1765 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1766 * @rx_ring: rx descriptor ring packet is being transacted on
1767 * @rx_desc: pointer to the EOP Rx descriptor
1768 * @skb: pointer to current skb being populated
1770 * This function checks the ring, descriptor, and packet information in
1771 * order to populate the hash, checksum, VLAN, protocol, and
1772 * other fields within the skb.
1774 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1775 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1777 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1778 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1779 I40E_RXD_QW1_STATUS_SHIFT;
1780 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1781 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1782 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1783 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1784 I40E_RXD_QW1_PTYPE_SHIFT;
1786 if (unlikely(tsynvalid))
1787 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1789 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1791 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1793 skb_record_rx_queue(skb, rx_ring->queue_index);
1795 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1796 u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1798 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1799 le16_to_cpu(vlan_tag));
1802 /* modifies the skb - consumes the enet header */
1803 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1807 * i40e_cleanup_headers - Correct empty headers
1808 * @rx_ring: rx descriptor ring packet is being transacted on
1809 * @skb: pointer to current skb being fixed
1810 * @rx_desc: pointer to the EOP Rx descriptor
1812 * Also address the case where we are pulling data in on pages only
1813 * and as such no data is present in the skb header.
1815 * In addition if skb is not at least 60 bytes we need to pad it so that
1816 * it is large enough to qualify as a valid Ethernet frame.
1818 * Returns true if an error was encountered and skb was freed.
1820 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1821 union i40e_rx_desc *rx_desc)
1824 /* XDP packets use error pointer so abort at this point */
1828 /* ERR_MASK will only have valid bits if EOP set, and
1829 * what we are doing here is actually checking
1830 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1833 if (unlikely(i40e_test_staterr(rx_desc,
1834 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1835 dev_kfree_skb_any(skb);
1839 /* if eth_skb_pad returns an error the skb was freed */
1840 if (eth_skb_pad(skb))
1847 * i40e_page_is_reusable - check if any reuse is possible
1848 * @page: page struct to check
1850 * A page is not reusable if it was allocated under low memory
1851 * conditions, or it's not in the same NUMA node as this CPU.
1853 static inline bool i40e_page_is_reusable(struct page *page)
1855 return (page_to_nid(page) == numa_mem_id()) &&
1856 !page_is_pfmemalloc(page);
1860 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1861 * the adapter for another receive
1863 * @rx_buffer: buffer containing the page
1864 * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
1866 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1867 * an unused region in the page.
1869 * For small pages, @truesize will be a constant value, half the size
1870 * of the memory at page. We'll attempt to alternate between high and
1871 * low halves of the page, with one half ready for use by the hardware
1872 * and the other half being consumed by the stack. We use the page
1873 * ref count to determine whether the stack has finished consuming the
1874 * portion of this page that was passed up with a previous packet. If
1875 * the page ref count is >1, we'll assume the "other" half page is
1876 * still busy, and this page cannot be reused.
1878 * For larger pages, @truesize will be the actual space used by the
1879 * received packet (adjusted upward to an even multiple of the cache
1880 * line size). This will advance through the page by the amount
1881 * actually consumed by the received packets while there is still
1882 * space for a buffer. Each region of larger pages will be used at
1883 * most once, after which the page will not be reused.
1885 * In either case, if the page is reusable its refcount is increased.
1887 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
1888 int rx_buffer_pgcnt)
1890 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1891 struct page *page = rx_buffer->page;
1893 /* Is any reuse possible? */
1894 if (unlikely(!i40e_page_is_reusable(page)))
1897 #if (PAGE_SIZE < 8192)
1898 /* if we are only owner of page we can reuse it */
1899 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1902 #define I40E_LAST_OFFSET \
1903 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1904 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1908 /* If we have drained the page fragment pool we need to update
1909 * the pagecnt_bias and page count so that we fully restock the
1910 * number of references the driver holds.
1912 if (unlikely(pagecnt_bias == 1)) {
1913 page_ref_add(page, USHRT_MAX - 1);
1914 rx_buffer->pagecnt_bias = USHRT_MAX;
1921 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1922 * @rx_ring: rx descriptor ring to transact packets on
1923 * @rx_buffer: buffer containing page to add
1924 * @skb: sk_buff to place the data into
1925 * @size: packet length from rx_desc
1927 * This function will add the data contained in rx_buffer->page to the skb.
1928 * It will just attach the page as a frag to the skb.
1930 * The function will then update the page offset.
1932 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1933 struct i40e_rx_buffer *rx_buffer,
1934 struct sk_buff *skb,
1937 #if (PAGE_SIZE < 8192)
1938 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1940 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1943 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1944 rx_buffer->page_offset, size, truesize);
1946 /* page is being used so we must update the page offset */
1947 #if (PAGE_SIZE < 8192)
1948 rx_buffer->page_offset ^= truesize;
1950 rx_buffer->page_offset += truesize;
1955 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1956 * @rx_ring: rx descriptor ring to transact packets on
1957 * @size: size of buffer to add to skb
1958 * @rx_buffer_pgcnt: buffer page refcount
1960 * This function will pull an Rx buffer from the ring and synchronize it
1961 * for use by the CPU.
1963 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1964 const unsigned int size,
1965 int *rx_buffer_pgcnt)
1967 struct i40e_rx_buffer *rx_buffer;
1969 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
1971 #if (PAGE_SIZE < 8192)
1972 page_count(rx_buffer->page);
1976 prefetch_page_address(rx_buffer->page);
1978 /* we are reusing so sync this buffer for CPU use */
1979 dma_sync_single_range_for_cpu(rx_ring->dev,
1981 rx_buffer->page_offset,
1985 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1986 rx_buffer->pagecnt_bias--;
1992 * i40e_construct_skb - Allocate skb and populate it
1993 * @rx_ring: rx descriptor ring to transact packets on
1994 * @rx_buffer: rx buffer to pull data from
1995 * @xdp: xdp_buff pointing to the data
1997 * This function allocates an skb. It then populates it with the page
1998 * data from the current receive descriptor, taking care to set up the
2001 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2002 struct i40e_rx_buffer *rx_buffer,
2003 struct xdp_buff *xdp)
2005 unsigned int size = xdp->data_end - xdp->data;
2006 #if (PAGE_SIZE < 8192)
2007 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2009 unsigned int truesize = SKB_DATA_ALIGN(size);
2011 unsigned int headlen;
2012 struct sk_buff *skb;
2014 /* prefetch first cache line of first page */
2015 net_prefetch(xdp->data);
2017 /* Note, we get here by enabling legacy-rx via:
2019 * ethtool --set-priv-flags <dev> legacy-rx on
2021 * In this mode, we currently get 0 extra XDP headroom as
2022 * opposed to having legacy-rx off, where we process XDP
2023 * packets going to stack via i40e_build_skb(). The latter
2024 * provides us currently with 192 bytes of headroom.
2026 * For i40e_construct_skb() mode it means that the
2027 * xdp->data_meta will always point to xdp->data, since
2028 * the helper cannot expand the head. Should this ever
2029 * change in future for legacy-rx mode on, then lets also
2030 * add xdp->data_meta handling here.
2033 /* allocate a skb to store the frags */
2034 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2036 GFP_ATOMIC | __GFP_NOWARN);
2040 /* Determine available headroom for copy */
2042 if (headlen > I40E_RX_HDR_SIZE)
2043 headlen = eth_get_headlen(skb->dev, xdp->data,
2046 /* align pull length to size of long to optimize memcpy performance */
2047 memcpy(__skb_put(skb, headlen), xdp->data,
2048 ALIGN(headlen, sizeof(long)));
2050 /* update all of the pointers */
2053 skb_add_rx_frag(skb, 0, rx_buffer->page,
2054 rx_buffer->page_offset + headlen,
2057 /* buffer is used by skb, update page_offset */
2058 #if (PAGE_SIZE < 8192)
2059 rx_buffer->page_offset ^= truesize;
2061 rx_buffer->page_offset += truesize;
2064 /* buffer is unused, reset bias back to rx_buffer */
2065 rx_buffer->pagecnt_bias++;
2072 * i40e_build_skb - Build skb around an existing buffer
2073 * @rx_ring: Rx descriptor ring to transact packets on
2074 * @rx_buffer: Rx buffer to pull data from
2075 * @xdp: xdp_buff pointing to the data
2077 * This function builds an skb around an existing Rx buffer, taking care
2078 * to set up the skb correctly and avoid any memcpy overhead.
2080 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2081 struct i40e_rx_buffer *rx_buffer,
2082 struct xdp_buff *xdp)
2084 unsigned int metasize = xdp->data - xdp->data_meta;
2085 #if (PAGE_SIZE < 8192)
2086 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2088 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2089 SKB_DATA_ALIGN(xdp->data_end -
2090 xdp->data_hard_start);
2092 struct sk_buff *skb;
2094 /* Prefetch first cache line of first page. If xdp->data_meta
2095 * is unused, this points exactly as xdp->data, otherwise we
2096 * likely have a consumer accessing first few bytes of meta
2097 * data, and then actual data.
2099 net_prefetch(xdp->data_meta);
2101 /* build an skb around the page buffer */
2102 skb = build_skb(xdp->data_hard_start, truesize);
2106 /* update pointers within the skb to store the data */
2107 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2108 __skb_put(skb, xdp->data_end - xdp->data);
2110 skb_metadata_set(skb, metasize);
2112 /* buffer is used by skb, update page_offset */
2113 #if (PAGE_SIZE < 8192)
2114 rx_buffer->page_offset ^= truesize;
2116 rx_buffer->page_offset += truesize;
2123 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2124 * @rx_ring: rx descriptor ring to transact packets on
2125 * @rx_buffer: rx buffer to pull data from
2126 * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
2128 * This function will clean up the contents of the rx_buffer. It will
2129 * either recycle the buffer or unmap it and free the associated resources.
2131 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2132 struct i40e_rx_buffer *rx_buffer,
2133 int rx_buffer_pgcnt)
2135 if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2136 /* hand second half of page back to the ring */
2137 i40e_reuse_rx_page(rx_ring, rx_buffer);
2139 /* we are not reusing the buffer so unmap it */
2140 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2141 i40e_rx_pg_size(rx_ring),
2142 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2143 __page_frag_cache_drain(rx_buffer->page,
2144 rx_buffer->pagecnt_bias);
2145 /* clear contents of buffer_info */
2146 rx_buffer->page = NULL;
2151 * i40e_is_non_eop - process handling of non-EOP buffers
2152 * @rx_ring: Rx ring being processed
2153 * @rx_desc: Rx descriptor for current buffer
2154 * @skb: Current socket buffer containing buffer in progress
2156 * This function updates next to clean. If the buffer is an EOP buffer
2157 * this function exits returning false, otherwise it will place the
2158 * sk_buff in the next buffer to be chained and return true indicating
2159 * that this is in fact a non-EOP buffer.
2161 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2162 union i40e_rx_desc *rx_desc,
2163 struct sk_buff *skb)
2165 u32 ntc = rx_ring->next_to_clean + 1;
2167 /* fetch, update, and store next to clean */
2168 ntc = (ntc < rx_ring->count) ? ntc : 0;
2169 rx_ring->next_to_clean = ntc;
2171 prefetch(I40E_RX_DESC(rx_ring, ntc));
2173 /* if we are the last buffer then there is nothing else to do */
2174 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2175 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2178 rx_ring->rx_stats.non_eop_descs++;
2183 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2184 struct i40e_ring *xdp_ring);
2186 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2188 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2190 if (unlikely(!xdpf))
2191 return I40E_XDP_CONSUMED;
2193 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2197 * i40e_run_xdp - run an XDP program
2198 * @rx_ring: Rx ring being processed
2199 * @xdp: XDP buffer containing the frame
2201 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2202 struct xdp_buff *xdp)
2204 int err, result = I40E_XDP_PASS;
2205 struct i40e_ring *xdp_ring;
2206 struct bpf_prog *xdp_prog;
2210 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2215 prefetchw(xdp->data_hard_start); /* xdp_frame write */
2217 act = bpf_prog_run_xdp(xdp_prog, xdp);
2222 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2223 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2226 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2227 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2230 bpf_warn_invalid_xdp_action(act);
2233 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2234 fallthrough; /* handle aborts by dropping packet */
2236 result = I40E_XDP_CONSUMED;
2241 return ERR_PTR(-result);
2245 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2247 * @rx_buffer: Rx buffer to adjust
2248 * @size: Size of adjustment
2250 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2251 struct i40e_rx_buffer *rx_buffer,
2254 unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
2256 #if (PAGE_SIZE < 8192)
2257 rx_buffer->page_offset ^= truesize;
2259 rx_buffer->page_offset += truesize;
2264 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2265 * @xdp_ring: XDP Tx ring
2267 * This function updates the XDP Tx ring tail register.
2269 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2271 /* Force memory writes to complete before letting h/w
2272 * know there are new descriptors to fetch.
2275 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2279 * i40e_update_rx_stats - Update Rx ring statistics
2280 * @rx_ring: rx descriptor ring
2281 * @total_rx_bytes: number of bytes received
2282 * @total_rx_packets: number of packets received
2284 * This function updates the Rx ring statistics.
2286 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2287 unsigned int total_rx_bytes,
2288 unsigned int total_rx_packets)
2290 u64_stats_update_begin(&rx_ring->syncp);
2291 rx_ring->stats.packets += total_rx_packets;
2292 rx_ring->stats.bytes += total_rx_bytes;
2293 u64_stats_update_end(&rx_ring->syncp);
2294 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2295 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2299 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2301 * @xdp_res: Result of the receive batch
2303 * This function bumps XDP Tx tail and/or flush redirect map, and
2304 * should be called when a batch of packets has been processed in the
2307 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2309 if (xdp_res & I40E_XDP_REDIR)
2312 if (xdp_res & I40E_XDP_TX) {
2313 struct i40e_ring *xdp_ring =
2314 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2316 i40e_xdp_ring_update_tail(xdp_ring);
2321 * i40e_inc_ntc: Advance the next_to_clean index
2324 static void i40e_inc_ntc(struct i40e_ring *rx_ring)
2326 u32 ntc = rx_ring->next_to_clean + 1;
2328 ntc = (ntc < rx_ring->count) ? ntc : 0;
2329 rx_ring->next_to_clean = ntc;
2330 prefetch(I40E_RX_DESC(rx_ring, ntc));
2334 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2335 * @rx_ring: rx descriptor ring to transact packets on
2336 * @budget: Total limit on number of packets to process
2338 * This function provides a "bounce buffer" approach to Rx interrupt
2339 * processing. The advantage to this is that on systems that have
2340 * expensive overhead for IOMMU access this provides a means of avoiding
2341 * it by maintaining the mapping of the page to the system.
2343 * Returns amount of work completed
2345 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2347 unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
2348 struct sk_buff *skb = rx_ring->skb;
2349 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2350 unsigned int xdp_xmit = 0;
2351 bool failure = false;
2352 struct xdp_buff xdp;
2354 #if (PAGE_SIZE < 8192)
2355 frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
2357 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
2359 while (likely(total_rx_packets < (unsigned int)budget)) {
2360 struct i40e_rx_buffer *rx_buffer;
2361 union i40e_rx_desc *rx_desc;
2362 int rx_buffer_pgcnt;
2366 /* return some buffers to hardware, one at a time is too slow */
2367 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2368 failure = failure ||
2369 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2373 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2375 /* status_error_len will always be zero for unused descriptors
2376 * because it's cleared in cleanup, and overlaps with hdr_addr
2377 * which is always zero because packet split isn't used, if the
2378 * hardware wrote DD then the length will be non-zero
2380 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2382 /* This memory barrier is needed to keep us from reading
2383 * any other fields out of the rx_desc until we have
2384 * verified the descriptor has been written back.
2388 if (i40e_rx_is_programming_status(qword)) {
2389 i40e_clean_programming_status(rx_ring,
2390 rx_desc->raw.qword[0],
2392 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2393 i40e_inc_ntc(rx_ring);
2394 i40e_reuse_rx_page(rx_ring, rx_buffer);
2399 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2400 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2404 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2405 rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2407 /* retrieve a buffer from the ring */
2409 xdp.data = page_address(rx_buffer->page) +
2410 rx_buffer->page_offset;
2411 xdp.data_meta = xdp.data;
2412 xdp.data_hard_start = xdp.data -
2413 i40e_rx_offset(rx_ring);
2414 xdp.data_end = xdp.data + size;
2415 #if (PAGE_SIZE > 4096)
2416 /* At larger PAGE_SIZE, frame_sz depend on len size */
2417 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
2419 skb = i40e_run_xdp(rx_ring, &xdp);
2423 unsigned int xdp_res = -PTR_ERR(skb);
2425 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2426 xdp_xmit |= xdp_res;
2427 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2429 rx_buffer->pagecnt_bias++;
2431 total_rx_bytes += size;
2434 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2435 } else if (ring_uses_build_skb(rx_ring)) {
2436 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2438 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2441 /* exit if we failed to retrieve a buffer */
2443 rx_ring->rx_stats.alloc_buff_failed++;
2444 rx_buffer->pagecnt_bias++;
2448 i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2451 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2454 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2459 /* probably a little skewed due to removing CRC */
2460 total_rx_bytes += skb->len;
2462 /* populate checksum, VLAN, and protocol */
2463 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2465 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2466 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2469 /* update budget accounting */
2473 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2476 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2478 /* guarantee a trip back through this routine if there was a failure */
2479 return failure ? budget : (int)total_rx_packets;
2482 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2486 /* We don't bother with setting the CLEARPBA bit as the data sheet
2487 * points out doing so is "meaningless since it was already
2488 * auto-cleared". The auto-clearing happens when the interrupt is
2491 * Hardware errata 28 for also indicates that writing to a
2492 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2493 * an event in the PBA anyway so we need to rely on the automask
2494 * to hold pending events for us until the interrupt is re-enabled
2496 * The itr value is reported in microseconds, and the register
2497 * value is recorded in 2 microsecond units. For this reason we
2498 * only need to shift by the interval shift - 1 instead of the
2501 itr &= I40E_ITR_MASK;
2503 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2504 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2505 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2510 /* a small macro to shorten up some long lines */
2511 #define INTREG I40E_PFINT_DYN_CTLN
2513 /* The act of updating the ITR will cause it to immediately trigger. In order
2514 * to prevent this from throwing off adaptive update statistics we defer the
2515 * update so that it can only happen so often. So after either Tx or Rx are
2516 * updated we make the adaptive scheme wait until either the ITR completely
2517 * expires via the next_update expiration or we have been through at least
2520 #define ITR_COUNTDOWN_START 3
2523 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2524 * @vsi: the VSI we care about
2525 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2528 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2529 struct i40e_q_vector *q_vector)
2531 struct i40e_hw *hw = &vsi->back->hw;
2534 /* If we don't have MSIX, then we only need to re-enable icr0 */
2535 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2536 i40e_irq_dynamic_enable_icr0(vsi->back);
2540 /* These will do nothing if dynamic updates are not enabled */
2541 i40e_update_itr(q_vector, &q_vector->tx);
2542 i40e_update_itr(q_vector, &q_vector->rx);
2544 /* This block of logic allows us to get away with only updating
2545 * one ITR value with each interrupt. The idea is to perform a
2546 * pseudo-lazy update with the following criteria.
2548 * 1. Rx is given higher priority than Tx if both are in same state
2549 * 2. If we must reduce an ITR that is given highest priority.
2550 * 3. We then give priority to increasing ITR based on amount.
2552 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2553 /* Rx ITR needs to be reduced, this is highest priority */
2554 intval = i40e_buildreg_itr(I40E_RX_ITR,
2555 q_vector->rx.target_itr);
2556 q_vector->rx.current_itr = q_vector->rx.target_itr;
2557 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2558 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2559 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2560 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2561 /* Tx ITR needs to be reduced, this is second priority
2562 * Tx ITR needs to be increased more than Rx, fourth priority
2564 intval = i40e_buildreg_itr(I40E_TX_ITR,
2565 q_vector->tx.target_itr);
2566 q_vector->tx.current_itr = q_vector->tx.target_itr;
2567 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2568 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2569 /* Rx ITR needs to be increased, third priority */
2570 intval = i40e_buildreg_itr(I40E_RX_ITR,
2571 q_vector->rx.target_itr);
2572 q_vector->rx.current_itr = q_vector->rx.target_itr;
2573 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2575 /* No ITR update, lowest priority */
2576 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2577 if (q_vector->itr_countdown)
2578 q_vector->itr_countdown--;
2581 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2582 wr32(hw, INTREG(q_vector->reg_idx), intval);
2586 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2587 * @napi: napi struct with our devices info in it
2588 * @budget: amount of work driver is allowed to do this pass, in packets
2590 * This function will clean all queues associated with a q_vector.
2592 * Returns the amount of work done
2594 int i40e_napi_poll(struct napi_struct *napi, int budget)
2596 struct i40e_q_vector *q_vector =
2597 container_of(napi, struct i40e_q_vector, napi);
2598 struct i40e_vsi *vsi = q_vector->vsi;
2599 struct i40e_ring *ring;
2600 bool clean_complete = true;
2601 bool arm_wb = false;
2602 int budget_per_ring;
2605 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2606 napi_complete(napi);
2610 /* Since the actual Tx work is minimal, we can give the Tx a larger
2611 * budget and be more aggressive about cleaning up the Tx descriptors.
2613 i40e_for_each_ring(ring, q_vector->tx) {
2614 bool wd = ring->xsk_pool ?
2615 i40e_clean_xdp_tx_irq(vsi, ring) :
2616 i40e_clean_tx_irq(vsi, ring, budget);
2619 clean_complete = false;
2622 arm_wb |= ring->arm_wb;
2623 ring->arm_wb = false;
2626 /* Handle case where we are called by netpoll with a budget of 0 */
2630 /* normally we have 1 Rx ring per q_vector */
2631 if (unlikely(q_vector->num_ringpairs > 1))
2632 /* We attempt to distribute budget to each Rx queue fairly, but
2633 * don't allow the budget to go below 1 because that would exit
2636 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2638 /* Max of 1 Rx ring in this q_vector so give it the budget */
2639 budget_per_ring = budget;
2641 i40e_for_each_ring(ring, q_vector->rx) {
2642 int cleaned = ring->xsk_pool ?
2643 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2644 i40e_clean_rx_irq(ring, budget_per_ring);
2646 work_done += cleaned;
2647 /* if we clean as many as budgeted, we must not be done */
2648 if (cleaned >= budget_per_ring)
2649 clean_complete = false;
2652 /* If work not completed, return budget and polling will return */
2653 if (!clean_complete) {
2654 int cpu_id = smp_processor_id();
2656 /* It is possible that the interrupt affinity has changed but,
2657 * if the cpu is pegged at 100%, polling will never exit while
2658 * traffic continues and the interrupt will be stuck on this
2659 * cpu. We check to make sure affinity is correct before we
2660 * continue to poll, otherwise we must stop polling so the
2661 * interrupt can move to the correct cpu.
2663 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2664 /* Tell napi that we are done polling */
2665 napi_complete_done(napi, work_done);
2667 /* Force an interrupt */
2668 i40e_force_wb(vsi, q_vector);
2670 /* Return budget-1 so that polling stops */
2675 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2676 i40e_enable_wb_on_itr(vsi, q_vector);
2681 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2682 q_vector->arm_wb_state = false;
2684 /* Exit the polling mode, but don't re-enable interrupts if stack might
2685 * poll us due to busy-polling
2687 if (likely(napi_complete_done(napi, work_done)))
2688 i40e_update_enable_itr(vsi, q_vector);
2690 return min(work_done, budget - 1);
2694 * i40e_atr - Add a Flow Director ATR filter
2695 * @tx_ring: ring to add programming descriptor to
2697 * @tx_flags: send tx flags
2699 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2702 struct i40e_filter_program_desc *fdir_desc;
2703 struct i40e_pf *pf = tx_ring->vsi->back;
2705 unsigned char *network;
2707 struct ipv6hdr *ipv6;
2711 u32 flex_ptype, dtype_cmd;
2715 /* make sure ATR is enabled */
2716 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2719 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2722 /* if sampling is disabled do nothing */
2723 if (!tx_ring->atr_sample_rate)
2726 /* Currently only IPv4/IPv6 with TCP is supported */
2727 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2730 /* snag network header to get L4 type and address */
2731 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2732 skb_inner_network_header(skb) : skb_network_header(skb);
2734 /* Note: tx_flags gets modified to reflect inner protocols in
2735 * tx_enable_csum function if encap is enabled.
2737 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2738 /* access ihl as u8 to avoid unaligned access on ia64 */
2739 hlen = (hdr.network[0] & 0x0F) << 2;
2740 l4_proto = hdr.ipv4->protocol;
2742 /* find the start of the innermost ipv6 header */
2743 unsigned int inner_hlen = hdr.network - skb->data;
2744 unsigned int h_offset = inner_hlen;
2746 /* this function updates h_offset to the end of the header */
2748 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2749 /* hlen will contain our best estimate of the tcp header */
2750 hlen = h_offset - inner_hlen;
2753 if (l4_proto != IPPROTO_TCP)
2756 th = (struct tcphdr *)(hdr.network + hlen);
2758 /* Due to lack of space, no more new filters can be programmed */
2759 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2761 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2762 /* HW ATR eviction will take care of removing filters on FIN
2765 if (th->fin || th->rst)
2769 tx_ring->atr_count++;
2771 /* sample on all syn/fin/rst packets or once every atr sample rate */
2775 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2778 tx_ring->atr_count = 0;
2780 /* grab the next descriptor */
2781 i = tx_ring->next_to_use;
2782 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2785 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2787 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2788 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2789 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2790 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2791 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2792 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2793 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2795 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2797 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2799 dtype_cmd |= (th->fin || th->rst) ?
2800 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2801 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2802 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2803 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2805 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2806 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2808 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2809 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2811 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2812 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2814 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2815 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2816 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2819 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2820 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2821 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2823 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2824 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2826 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2827 fdir_desc->rsvd = cpu_to_le32(0);
2828 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2829 fdir_desc->fd_id = cpu_to_le32(0);
2833 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2835 * @tx_ring: ring to send buffer on
2836 * @flags: the tx flags to be set
2838 * Checks the skb and set up correspondingly several generic transmit flags
2839 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2841 * Returns error code indicate the frame should be dropped upon error and the
2842 * otherwise returns 0 to indicate the flags has been set properly.
2844 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2845 struct i40e_ring *tx_ring,
2848 __be16 protocol = skb->protocol;
2851 if (protocol == htons(ETH_P_8021Q) &&
2852 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2853 /* When HW VLAN acceleration is turned off by the user the
2854 * stack sets the protocol to 8021q so that the driver
2855 * can take any steps required to support the SW only
2856 * VLAN handling. In our case the driver doesn't need
2857 * to take any further steps so just set the protocol
2858 * to the encapsulated ethertype.
2860 skb->protocol = vlan_get_protocol(skb);
2864 /* if we have a HW VLAN tag being added, default to the HW one */
2865 if (skb_vlan_tag_present(skb)) {
2866 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2867 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2868 /* else if it is a SW VLAN, check the next protocol and store the tag */
2869 } else if (protocol == htons(ETH_P_8021Q)) {
2870 struct vlan_hdr *vhdr, _vhdr;
2872 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2876 protocol = vhdr->h_vlan_encapsulated_proto;
2877 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2878 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2881 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2884 /* Insert 802.1p priority into VLAN header */
2885 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2886 (skb->priority != TC_PRIO_CONTROL)) {
2887 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2888 tx_flags |= (skb->priority & 0x7) <<
2889 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2890 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2891 struct vlan_ethhdr *vhdr;
2894 rc = skb_cow_head(skb, 0);
2897 vhdr = (struct vlan_ethhdr *)skb->data;
2898 vhdr->h_vlan_TCI = htons(tx_flags >>
2899 I40E_TX_FLAGS_VLAN_SHIFT);
2901 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2911 * i40e_tso - set up the tso context descriptor
2912 * @first: pointer to first Tx buffer for xmit
2913 * @hdr_len: ptr to the size of the packet header
2914 * @cd_type_cmd_tso_mss: Quad Word 1
2916 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2918 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2919 u64 *cd_type_cmd_tso_mss)
2921 struct sk_buff *skb = first->skb;
2922 u64 cd_cmd, cd_tso_len, cd_mss;
2933 u32 paylen, l4_offset;
2934 u16 gso_segs, gso_size;
2937 if (skb->ip_summed != CHECKSUM_PARTIAL)
2940 if (!skb_is_gso(skb))
2943 err = skb_cow_head(skb, 0);
2947 ip.hdr = skb_network_header(skb);
2948 l4.hdr = skb_transport_header(skb);
2950 /* initialize outer IP header fields */
2951 if (ip.v4->version == 4) {
2955 ip.v6->payload_len = 0;
2958 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2962 SKB_GSO_UDP_TUNNEL |
2963 SKB_GSO_UDP_TUNNEL_CSUM)) {
2964 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2965 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2968 /* determine offset of outer transport header */
2969 l4_offset = l4.hdr - skb->data;
2971 /* remove payload length from outer checksum */
2972 paylen = skb->len - l4_offset;
2973 csum_replace_by_diff(&l4.udp->check,
2974 (__force __wsum)htonl(paylen));
2977 /* reset pointers to inner headers */
2978 ip.hdr = skb_inner_network_header(skb);
2979 l4.hdr = skb_inner_transport_header(skb);
2981 /* initialize inner IP header fields */
2982 if (ip.v4->version == 4) {
2986 ip.v6->payload_len = 0;
2990 /* determine offset of inner transport header */
2991 l4_offset = l4.hdr - skb->data;
2993 /* remove payload length from inner checksum */
2994 paylen = skb->len - l4_offset;
2996 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2997 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
2998 /* compute length of segmentation header */
2999 *hdr_len = sizeof(*l4.udp) + l4_offset;
3001 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
3002 /* compute length of segmentation header */
3003 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
3006 /* pull values out of skb_shinfo */
3007 gso_size = skb_shinfo(skb)->gso_size;
3008 gso_segs = skb_shinfo(skb)->gso_segs;
3010 /* update GSO size and bytecount with header size */
3011 first->gso_segs = gso_segs;
3012 first->bytecount += (first->gso_segs - 1) * *hdr_len;
3014 /* find the field values */
3015 cd_cmd = I40E_TX_CTX_DESC_TSO;
3016 cd_tso_len = skb->len - *hdr_len;
3018 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
3019 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
3020 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
3025 * i40e_tsyn - set up the tsyn context descriptor
3026 * @tx_ring: ptr to the ring to send
3027 * @skb: ptr to the skb we're sending
3028 * @tx_flags: the collected send information
3029 * @cd_type_cmd_tso_mss: Quad Word 1
3031 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3033 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3034 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3038 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3041 /* Tx timestamps cannot be sampled when doing TSO */
3042 if (tx_flags & I40E_TX_FLAGS_TSO)
3045 /* only timestamp the outbound packet if the user has requested it and
3046 * we are not already transmitting a packet to be timestamped
3048 pf = i40e_netdev_to_pf(tx_ring->netdev);
3049 if (!(pf->flags & I40E_FLAG_PTP))
3053 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3054 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3055 pf->ptp_tx_start = jiffies;
3056 pf->ptp_tx_skb = skb_get(skb);
3058 pf->tx_hwtstamp_skipped++;
3062 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3063 I40E_TXD_CTX_QW1_CMD_SHIFT;
3069 * i40e_tx_enable_csum - Enable Tx checksum offloads
3071 * @tx_flags: pointer to Tx flags currently set
3072 * @td_cmd: Tx descriptor command bits to set
3073 * @td_offset: Tx descriptor header offsets to set
3074 * @tx_ring: Tx descriptor ring
3075 * @cd_tunneling: ptr to context desc bits
3077 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3078 u32 *td_cmd, u32 *td_offset,
3079 struct i40e_ring *tx_ring,
3092 unsigned char *exthdr;
3093 u32 offset, cmd = 0;
3097 if (skb->ip_summed != CHECKSUM_PARTIAL)
3100 ip.hdr = skb_network_header(skb);
3101 l4.hdr = skb_transport_header(skb);
3103 /* compute outer L2 header size */
3104 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3106 if (skb->encapsulation) {
3108 /* define outer network header type */
3109 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3110 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3111 I40E_TX_CTX_EXT_IP_IPV4 :
3112 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3114 l4_proto = ip.v4->protocol;
3115 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3116 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3118 exthdr = ip.hdr + sizeof(*ip.v6);
3119 l4_proto = ip.v6->nexthdr;
3120 if (l4.hdr != exthdr)
3121 ipv6_skip_exthdr(skb, exthdr - skb->data,
3122 &l4_proto, &frag_off);
3125 /* define outer transport */
3128 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3129 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3132 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3133 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3137 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3138 l4.hdr = skb_inner_network_header(skb);
3141 if (*tx_flags & I40E_TX_FLAGS_TSO)
3144 skb_checksum_help(skb);
3148 /* compute outer L3 header size */
3149 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3150 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3152 /* switch IP header pointer from outer to inner header */
3153 ip.hdr = skb_inner_network_header(skb);
3155 /* compute tunnel header size */
3156 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3157 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3159 /* indicate if we need to offload outer UDP header */
3160 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3161 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3162 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3163 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3165 /* record tunnel offload values */
3166 *cd_tunneling |= tunnel;
3168 /* switch L4 header pointer from outer to inner */
3169 l4.hdr = skb_inner_transport_header(skb);
3172 /* reset type as we transition from outer to inner headers */
3173 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3174 if (ip.v4->version == 4)
3175 *tx_flags |= I40E_TX_FLAGS_IPV4;
3176 if (ip.v6->version == 6)
3177 *tx_flags |= I40E_TX_FLAGS_IPV6;
3180 /* Enable IP checksum offloads */
3181 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3182 l4_proto = ip.v4->protocol;
3183 /* the stack computes the IP header already, the only time we
3184 * need the hardware to recompute it is in the case of TSO.
3186 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3187 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3188 I40E_TX_DESC_CMD_IIPT_IPV4;
3189 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3190 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3192 exthdr = ip.hdr + sizeof(*ip.v6);
3193 l4_proto = ip.v6->nexthdr;
3194 if (l4.hdr != exthdr)
3195 ipv6_skip_exthdr(skb, exthdr - skb->data,
3196 &l4_proto, &frag_off);
3199 /* compute inner L3 header size */
3200 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3202 /* Enable L4 checksum offloads */
3205 /* enable checksum offloads */
3206 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3207 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3210 /* enable SCTP checksum offload */
3211 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3212 offset |= (sizeof(struct sctphdr) >> 2) <<
3213 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3216 /* enable UDP checksum offload */
3217 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3218 offset |= (sizeof(struct udphdr) >> 2) <<
3219 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3222 if (*tx_flags & I40E_TX_FLAGS_TSO)
3224 skb_checksum_help(skb);
3229 *td_offset |= offset;
3235 * i40e_create_tx_ctx Build the Tx context descriptor
3236 * @tx_ring: ring to create the descriptor on
3237 * @cd_type_cmd_tso_mss: Quad Word 1
3238 * @cd_tunneling: Quad Word 0 - bits 0-31
3239 * @cd_l2tag2: Quad Word 0 - bits 32-63
3241 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3242 const u64 cd_type_cmd_tso_mss,
3243 const u32 cd_tunneling, const u32 cd_l2tag2)
3245 struct i40e_tx_context_desc *context_desc;
3246 int i = tx_ring->next_to_use;
3248 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3249 !cd_tunneling && !cd_l2tag2)
3252 /* grab the next descriptor */
3253 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3256 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3258 /* cpu_to_le32 and assign to struct fields */
3259 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3260 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3261 context_desc->rsvd = cpu_to_le16(0);
3262 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3266 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3267 * @tx_ring: the ring to be checked
3268 * @size: the size buffer we want to assure is available
3270 * Returns -EBUSY if a stop is needed, else 0
3272 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3274 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3275 /* Memory barrier before checking head and tail */
3278 /* Check again in a case another CPU has just made room available. */
3279 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3282 /* A reprieve! - use start_queue because it doesn't call schedule */
3283 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3284 ++tx_ring->tx_stats.restart_queue;
3289 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3292 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3293 * and so we need to figure out the cases where we need to linearize the skb.
3295 * For TSO we need to count the TSO header and segment payload separately.
3296 * As such we need to check cases where we have 7 fragments or more as we
3297 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3298 * the segment payload in the first descriptor, and another 7 for the
3301 bool __i40e_chk_linearize(struct sk_buff *skb)
3303 const skb_frag_t *frag, *stale;
3306 /* no need to check if number of frags is less than 7 */
3307 nr_frags = skb_shinfo(skb)->nr_frags;
3308 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3311 /* We need to walk through the list and validate that each group
3312 * of 6 fragments totals at least gso_size.
3314 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3315 frag = &skb_shinfo(skb)->frags[0];
3317 /* Initialize size to the negative value of gso_size minus 1. We
3318 * use this as the worst case scenerio in which the frag ahead
3319 * of us only provides one byte which is why we are limited to 6
3320 * descriptors for a single transmit as the header and previous
3321 * fragment are already consuming 2 descriptors.
3323 sum = 1 - skb_shinfo(skb)->gso_size;
3325 /* Add size of frags 0 through 4 to create our initial sum */
3326 sum += skb_frag_size(frag++);
3327 sum += skb_frag_size(frag++);
3328 sum += skb_frag_size(frag++);
3329 sum += skb_frag_size(frag++);
3330 sum += skb_frag_size(frag++);
3332 /* Walk through fragments adding latest fragment, testing it, and
3333 * then removing stale fragments from the sum.
3335 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3336 int stale_size = skb_frag_size(stale);
3338 sum += skb_frag_size(frag++);
3340 /* The stale fragment may present us with a smaller
3341 * descriptor than the actual fragment size. To account
3342 * for that we need to remove all the data on the front and
3343 * figure out what the remainder would be in the last
3344 * descriptor associated with the fragment.
3346 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3347 int align_pad = -(skb_frag_off(stale)) &
3348 (I40E_MAX_READ_REQ_SIZE - 1);
3351 stale_size -= align_pad;
3354 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3355 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3356 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3359 /* if sum is negative we failed to make sufficient progress */
3373 * i40e_tx_map - Build the Tx descriptor
3374 * @tx_ring: ring to send buffer on
3376 * @first: first buffer info buffer to use
3377 * @tx_flags: collected send information
3378 * @hdr_len: size of the packet header
3379 * @td_cmd: the command field in the descriptor
3380 * @td_offset: offset for checksum or crc
3382 * Returns 0 on success, -1 on failure to DMA
3384 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3385 struct i40e_tx_buffer *first, u32 tx_flags,
3386 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3388 unsigned int data_len = skb->data_len;
3389 unsigned int size = skb_headlen(skb);
3391 struct i40e_tx_buffer *tx_bi;
3392 struct i40e_tx_desc *tx_desc;
3393 u16 i = tx_ring->next_to_use;
3398 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3399 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3400 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3401 I40E_TX_FLAGS_VLAN_SHIFT;
3404 first->tx_flags = tx_flags;
3406 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3408 tx_desc = I40E_TX_DESC(tx_ring, i);
3411 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3412 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3414 if (dma_mapping_error(tx_ring->dev, dma))
3417 /* record length, and DMA address */
3418 dma_unmap_len_set(tx_bi, len, size);
3419 dma_unmap_addr_set(tx_bi, dma, dma);
3421 /* align size to end of page */
3422 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3423 tx_desc->buffer_addr = cpu_to_le64(dma);
3425 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3426 tx_desc->cmd_type_offset_bsz =
3427 build_ctob(td_cmd, td_offset,
3434 if (i == tx_ring->count) {
3435 tx_desc = I40E_TX_DESC(tx_ring, 0);
3442 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3443 tx_desc->buffer_addr = cpu_to_le64(dma);
3446 if (likely(!data_len))
3449 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3456 if (i == tx_ring->count) {
3457 tx_desc = I40E_TX_DESC(tx_ring, 0);
3461 size = skb_frag_size(frag);
3464 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3467 tx_bi = &tx_ring->tx_bi[i];
3470 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3473 if (i == tx_ring->count)
3476 tx_ring->next_to_use = i;
3478 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3480 /* write last descriptor with EOP bit */
3481 td_cmd |= I40E_TX_DESC_CMD_EOP;
3483 /* We OR these values together to check both against 4 (WB_STRIDE)
3484 * below. This is safe since we don't re-use desc_count afterwards.
3486 desc_count |= ++tx_ring->packet_stride;
3488 if (desc_count >= WB_STRIDE) {
3489 /* write last descriptor with RS bit set */
3490 td_cmd |= I40E_TX_DESC_CMD_RS;
3491 tx_ring->packet_stride = 0;
3494 tx_desc->cmd_type_offset_bsz =
3495 build_ctob(td_cmd, td_offset, size, td_tag);
3497 skb_tx_timestamp(skb);
3499 /* Force memory writes to complete before letting h/w know there
3500 * are new descriptors to fetch.
3502 * We also use this memory barrier to make certain all of the
3503 * status bits have been updated before next_to_watch is written.
3507 /* set next_to_watch value indicating a packet is present */
3508 first->next_to_watch = tx_desc;
3510 /* notify HW of packet */
3511 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3512 writel(i, tx_ring->tail);
3518 dev_info(tx_ring->dev, "TX DMA map failed\n");
3520 /* clear dma mappings for failed tx_bi map */
3522 tx_bi = &tx_ring->tx_bi[i];
3523 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3531 tx_ring->next_to_use = i;
3537 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3538 * @xdpf: data to transmit
3539 * @xdp_ring: XDP Tx ring
3541 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3542 struct i40e_ring *xdp_ring)
3544 u16 i = xdp_ring->next_to_use;
3545 struct i40e_tx_buffer *tx_bi;
3546 struct i40e_tx_desc *tx_desc;
3547 void *data = xdpf->data;
3548 u32 size = xdpf->len;
3551 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3552 xdp_ring->tx_stats.tx_busy++;
3553 return I40E_XDP_CONSUMED;
3555 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3556 if (dma_mapping_error(xdp_ring->dev, dma))
3557 return I40E_XDP_CONSUMED;
3559 tx_bi = &xdp_ring->tx_bi[i];
3560 tx_bi->bytecount = size;
3561 tx_bi->gso_segs = 1;
3564 /* record length, and DMA address */
3565 dma_unmap_len_set(tx_bi, len, size);
3566 dma_unmap_addr_set(tx_bi, dma, dma);
3568 tx_desc = I40E_TX_DESC(xdp_ring, i);
3569 tx_desc->buffer_addr = cpu_to_le64(dma);
3570 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3574 /* Make certain all of the status bits have been updated
3575 * before next_to_watch is written.
3579 xdp_ring->xdp_tx_active++;
3581 if (i == xdp_ring->count)
3584 tx_bi->next_to_watch = tx_desc;
3585 xdp_ring->next_to_use = i;
3591 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3593 * @tx_ring: ring to send buffer on
3595 * Returns NETDEV_TX_OK if sent, else an error code
3597 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3598 struct i40e_ring *tx_ring)
3600 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3601 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3602 struct i40e_tx_buffer *first;
3611 /* prefetch the data, we'll need it later */
3612 prefetch(skb->data);
3614 i40e_trace(xmit_frame_ring, skb, tx_ring);
3616 count = i40e_xmit_descriptor_count(skb);
3617 if (i40e_chk_linearize(skb, count)) {
3618 if (__skb_linearize(skb)) {
3619 dev_kfree_skb_any(skb);
3620 return NETDEV_TX_OK;
3622 count = i40e_txd_use_count(skb->len);
3623 tx_ring->tx_stats.tx_linearize++;
3626 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3627 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3628 * + 4 desc gap to avoid the cache line where head is,
3629 * + 1 desc for context descriptor,
3630 * otherwise try next time
3632 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3633 tx_ring->tx_stats.tx_busy++;
3634 return NETDEV_TX_BUSY;
3637 /* record the location of the first descriptor for this packet */
3638 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3640 first->bytecount = skb->len;
3641 first->gso_segs = 1;
3643 /* prepare the xmit flags */
3644 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3647 /* obtain protocol of skb */
3648 protocol = vlan_get_protocol(skb);
3650 /* setup IPv4/IPv6 offloads */
3651 if (protocol == htons(ETH_P_IP))
3652 tx_flags |= I40E_TX_FLAGS_IPV4;
3653 else if (protocol == htons(ETH_P_IPV6))
3654 tx_flags |= I40E_TX_FLAGS_IPV6;
3656 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3661 tx_flags |= I40E_TX_FLAGS_TSO;
3663 /* Always offload the checksum, since it's in the data descriptor */
3664 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3665 tx_ring, &cd_tunneling);
3669 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3672 tx_flags |= I40E_TX_FLAGS_TSYN;
3674 /* always enable CRC insertion offload */
3675 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3677 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3678 cd_tunneling, cd_l2tag2);
3680 /* Add Flow Director ATR if it's enabled.
3682 * NOTE: this must always be directly before the data descriptor.
3684 i40e_atr(tx_ring, skb, tx_flags);
3686 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3688 goto cleanup_tx_tstamp;
3690 return NETDEV_TX_OK;
3693 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3694 dev_kfree_skb_any(first->skb);
3697 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3698 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3700 dev_kfree_skb_any(pf->ptp_tx_skb);
3701 pf->ptp_tx_skb = NULL;
3702 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3705 return NETDEV_TX_OK;
3709 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3711 * @netdev: network interface device structure
3713 * Returns NETDEV_TX_OK if sent, else an error code
3715 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3717 struct i40e_netdev_priv *np = netdev_priv(netdev);
3718 struct i40e_vsi *vsi = np->vsi;
3719 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3721 /* hardware can't handle really short frames, hardware padding works
3724 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3725 return NETDEV_TX_OK;
3727 return i40e_xmit_frame_ring(skb, tx_ring);
3731 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3733 * @n: number of frames
3734 * @frames: array of XDP buffer pointers
3735 * @flags: XDP extra info
3737 * Returns number of frames successfully sent. Frames that fail are
3738 * free'ed via XDP return API.
3740 * For error cases, a negative errno code is returned and no-frames
3741 * are transmitted (caller must handle freeing frames).
3743 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3746 struct i40e_netdev_priv *np = netdev_priv(dev);
3747 unsigned int queue_index = smp_processor_id();
3748 struct i40e_vsi *vsi = np->vsi;
3749 struct i40e_pf *pf = vsi->back;
3750 struct i40e_ring *xdp_ring;
3754 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3757 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3758 test_bit(__I40E_CONFIG_BUSY, pf->state))
3761 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3764 xdp_ring = vsi->xdp_rings[queue_index];
3766 for (i = 0; i < n; i++) {
3767 struct xdp_frame *xdpf = frames[i];
3770 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3771 if (err != I40E_XDP_TX) {
3772 xdp_return_frame_rx_napi(xdpf);
3777 if (unlikely(flags & XDP_XMIT_FLUSH))
3778 i40e_xdp_ring_update_tail(xdp_ring);