1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/prefetch.h>
5 #include <linux/bpf_trace.h>
8 #include "i40e_trace.h"
9 #include "i40e_prototype.h"
10 #include "i40e_txrx_common.h"
13 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
15 * i40e_fdir - Generate a Flow Director descriptor based on fdata
16 * @tx_ring: Tx ring to send buffer on
17 * @fdata: Flow director filter data
18 * @add: Indicate if we are adding a rule or deleting one
21 static void i40e_fdir(struct i40e_ring *tx_ring,
22 struct i40e_fdir_filter *fdata, bool add)
24 struct i40e_filter_program_desc *fdir_desc;
25 struct i40e_pf *pf = tx_ring->vsi->back;
26 u32 flex_ptype, dtype_cmd;
29 /* grab the next descriptor */
30 i = tx_ring->next_to_use;
31 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
34 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
36 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
37 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
39 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
40 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
42 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
43 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
45 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
46 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
48 /* Use LAN VSI Id if not programmed by user */
49 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
50 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
51 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
53 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
56 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
57 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
58 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
59 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
61 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
62 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
64 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
65 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
67 if (fdata->cnt_index) {
68 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
69 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
70 ((u32)fdata->cnt_index <<
71 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
74 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
75 fdir_desc->rsvd = cpu_to_le32(0);
76 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
77 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
80 #define I40E_FD_CLEAN_DELAY 10
82 * i40e_program_fdir_filter - Program a Flow Director filter
83 * @fdir_data: Packet data that will be filter parameters
84 * @raw_packet: the pre-allocated packet buffer for FDir
86 * @add: True for add/update, False for remove
88 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
89 u8 *raw_packet, struct i40e_pf *pf,
92 struct i40e_tx_buffer *tx_buf, *first;
93 struct i40e_tx_desc *tx_desc;
94 struct i40e_ring *tx_ring;
101 /* find existing FDIR VSI */
102 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
106 tx_ring = vsi->tx_rings[0];
109 /* we need two descriptors to add/del a filter and we can wait */
110 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
113 msleep_interruptible(1);
116 dma = dma_map_single(dev, raw_packet,
117 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
118 if (dma_mapping_error(dev, dma))
121 /* grab the next descriptor */
122 i = tx_ring->next_to_use;
123 first = &tx_ring->tx_bi[i];
124 i40e_fdir(tx_ring, fdir_data, add);
126 /* Now program a dummy descriptor */
127 i = tx_ring->next_to_use;
128 tx_desc = I40E_TX_DESC(tx_ring, i);
129 tx_buf = &tx_ring->tx_bi[i];
131 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
133 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
135 /* record length, and DMA address */
136 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
137 dma_unmap_addr_set(tx_buf, dma, dma);
139 tx_desc->buffer_addr = cpu_to_le64(dma);
140 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
142 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
143 tx_buf->raw_buf = (void *)raw_packet;
145 tx_desc->cmd_type_offset_bsz =
146 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
148 /* Force memory writes to complete before letting h/w
149 * know there are new descriptors to fetch.
153 /* Mark the data descriptor to be watched */
154 first->next_to_watch = tx_desc;
156 writel(tx_ring->next_to_use, tx_ring->tail);
163 #define IP_HEADER_OFFSET 14
164 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
166 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
167 * @vsi: pointer to the targeted VSI
168 * @fd_data: the flow director data required for the FDir descriptor
169 * @add: true adds a filter, false removes it
171 * Returns 0 if the filters were successfully added or removed
173 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
174 struct i40e_fdir_filter *fd_data,
177 struct i40e_pf *pf = vsi->back;
182 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
183 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
184 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
186 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
189 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
191 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
192 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
193 + sizeof(struct iphdr));
195 ip->daddr = fd_data->dst_ip;
196 udp->dest = fd_data->dst_port;
197 ip->saddr = fd_data->src_ip;
198 udp->source = fd_data->src_port;
200 if (fd_data->flex_filter) {
201 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
202 __be16 pattern = fd_data->flex_word;
203 u16 off = fd_data->flex_offset;
205 *((__force __be16 *)(payload + off)) = pattern;
208 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
209 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
211 dev_info(&pf->pdev->dev,
212 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
213 fd_data->pctype, fd_data->fd_id, ret);
214 /* Free the packet buffer since it wasn't added to the ring */
217 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
219 dev_info(&pf->pdev->dev,
220 "Filter OK for PCTYPE %d loc = %d\n",
221 fd_data->pctype, fd_data->fd_id);
223 dev_info(&pf->pdev->dev,
224 "Filter deleted for PCTYPE %d loc = %d\n",
225 fd_data->pctype, fd_data->fd_id);
229 pf->fd_udp4_filter_cnt++;
231 pf->fd_udp4_filter_cnt--;
236 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
238 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
239 * @vsi: pointer to the targeted VSI
240 * @fd_data: the flow director data required for the FDir descriptor
241 * @add: true adds a filter, false removes it
243 * Returns 0 if the filters were successfully added or removed
245 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
246 struct i40e_fdir_filter *fd_data,
249 struct i40e_pf *pf = vsi->back;
255 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
256 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
257 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
258 0x0, 0x72, 0, 0, 0, 0};
260 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
263 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
265 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
266 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
267 + sizeof(struct iphdr));
269 ip->daddr = fd_data->dst_ip;
270 tcp->dest = fd_data->dst_port;
271 ip->saddr = fd_data->src_ip;
272 tcp->source = fd_data->src_port;
274 if (fd_data->flex_filter) {
275 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
276 __be16 pattern = fd_data->flex_word;
277 u16 off = fd_data->flex_offset;
279 *((__force __be16 *)(payload + off)) = pattern;
282 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
283 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
285 dev_info(&pf->pdev->dev,
286 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
287 fd_data->pctype, fd_data->fd_id, ret);
288 /* Free the packet buffer since it wasn't added to the ring */
291 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
293 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
294 fd_data->pctype, fd_data->fd_id);
296 dev_info(&pf->pdev->dev,
297 "Filter deleted for PCTYPE %d loc = %d\n",
298 fd_data->pctype, fd_data->fd_id);
302 pf->fd_tcp4_filter_cnt++;
303 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
304 I40E_DEBUG_FD & pf->hw.debug_mask)
305 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
306 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
308 pf->fd_tcp4_filter_cnt--;
314 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
316 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
317 * a specific flow spec
318 * @vsi: pointer to the targeted VSI
319 * @fd_data: the flow director data required for the FDir descriptor
320 * @add: true adds a filter, false removes it
322 * Returns 0 if the filters were successfully added or removed
324 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
325 struct i40e_fdir_filter *fd_data,
328 struct i40e_pf *pf = vsi->back;
329 struct sctphdr *sctp;
334 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
335 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
336 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
338 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
341 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
343 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
344 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
345 + sizeof(struct iphdr));
347 ip->daddr = fd_data->dst_ip;
348 sctp->dest = fd_data->dst_port;
349 ip->saddr = fd_data->src_ip;
350 sctp->source = fd_data->src_port;
352 if (fd_data->flex_filter) {
353 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
354 __be16 pattern = fd_data->flex_word;
355 u16 off = fd_data->flex_offset;
357 *((__force __be16 *)(payload + off)) = pattern;
360 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
361 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
363 dev_info(&pf->pdev->dev,
364 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
365 fd_data->pctype, fd_data->fd_id, ret);
366 /* Free the packet buffer since it wasn't added to the ring */
369 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
371 dev_info(&pf->pdev->dev,
372 "Filter OK for PCTYPE %d loc = %d\n",
373 fd_data->pctype, fd_data->fd_id);
375 dev_info(&pf->pdev->dev,
376 "Filter deleted for PCTYPE %d loc = %d\n",
377 fd_data->pctype, fd_data->fd_id);
381 pf->fd_sctp4_filter_cnt++;
383 pf->fd_sctp4_filter_cnt--;
388 #define I40E_IP_DUMMY_PACKET_LEN 34
390 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
391 * a specific flow spec
392 * @vsi: pointer to the targeted VSI
393 * @fd_data: the flow director data required for the FDir descriptor
394 * @add: true adds a filter, false removes it
396 * Returns 0 if the filters were successfully added or removed
398 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
399 struct i40e_fdir_filter *fd_data,
402 struct i40e_pf *pf = vsi->back;
407 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
408 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
411 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
412 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
413 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
416 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
417 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
419 ip->saddr = fd_data->src_ip;
420 ip->daddr = fd_data->dst_ip;
423 if (fd_data->flex_filter) {
424 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
425 __be16 pattern = fd_data->flex_word;
426 u16 off = fd_data->flex_offset;
428 *((__force __be16 *)(payload + off)) = pattern;
432 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
434 dev_info(&pf->pdev->dev,
435 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
436 fd_data->pctype, fd_data->fd_id, ret);
437 /* The packet buffer wasn't added to the ring so we
438 * need to free it now.
442 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
444 dev_info(&pf->pdev->dev,
445 "Filter OK for PCTYPE %d loc = %d\n",
446 fd_data->pctype, fd_data->fd_id);
448 dev_info(&pf->pdev->dev,
449 "Filter deleted for PCTYPE %d loc = %d\n",
450 fd_data->pctype, fd_data->fd_id);
455 pf->fd_ip4_filter_cnt++;
457 pf->fd_ip4_filter_cnt--;
463 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
464 * @vsi: pointer to the targeted VSI
465 * @input: filter to add or delete
466 * @add: true adds a filter, false removes it
469 int i40e_add_del_fdir(struct i40e_vsi *vsi,
470 struct i40e_fdir_filter *input, bool add)
472 struct i40e_pf *pf = vsi->back;
475 switch (input->flow_type & ~FLOW_EXT) {
477 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
480 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
483 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
486 switch (input->ip4_proto) {
488 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
491 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
494 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
497 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
500 /* We cannot support masking based on protocol */
501 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
507 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
512 /* The buffer allocated here will be normally be freed by
513 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
514 * completion. In the event of an error adding the buffer to the FDIR
515 * ring, it will immediately be freed. It may also be freed by
516 * i40e_clean_tx_ring() when closing the VSI.
522 * i40e_fd_handle_status - check the Programming Status for FD
523 * @rx_ring: the Rx ring for this descriptor
524 * @qword0_raw: qword0
525 * @qword1: qword1 after le_to_cpu
526 * @prog_id: the id originally used for programming
528 * This is used to verify if the FD programming or invalidation
529 * requested by SW to the HW is successful or not and take actions accordingly.
531 static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
532 u64 qword1, u8 prog_id)
534 struct i40e_pf *pf = rx_ring->vsi->back;
535 struct pci_dev *pdev = pf->pdev;
536 struct i40e_32b_rx_wb_qw0 *qw0;
537 u32 fcnt_prog, fcnt_avail;
540 qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
541 error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
542 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
544 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
545 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id);
546 if (qw0->hi_dword.fd_id != 0 ||
547 (I40E_DEBUG_FD & pf->hw.debug_mask))
548 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
551 /* Check if the programming error is for ATR.
552 * If so, auto disable ATR and set a state for
553 * flush in progress. Next time we come here if flush is in
554 * progress do nothing, once flush is complete the state will
557 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
561 /* store the current atr filter count */
562 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
564 if (qw0->hi_dword.fd_id == 0 &&
565 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
566 /* These set_bit() calls aren't atomic with the
567 * test_bit() here, but worse case we potentially
568 * disable ATR and queue a flush right after SB
569 * support is re-enabled. That shouldn't cause an
572 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
573 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
576 /* filter programming failed most likely due to table full */
577 fcnt_prog = i40e_get_global_fd_count(pf);
578 fcnt_avail = pf->fdir_pf_filter_count;
579 /* If ATR is running fcnt_prog can quickly change,
580 * if we are very close to full, it makes sense to disable
581 * FD ATR/SB and then re-enable it when there is room.
583 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
584 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
585 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
587 if (I40E_DEBUG_FD & pf->hw.debug_mask)
588 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
590 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
591 if (I40E_DEBUG_FD & pf->hw.debug_mask)
592 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
593 qw0->hi_dword.fd_id);
598 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
599 * @ring: the ring that owns the buffer
600 * @tx_buffer: the buffer to free
602 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
603 struct i40e_tx_buffer *tx_buffer)
605 if (tx_buffer->skb) {
606 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
607 kfree(tx_buffer->raw_buf);
608 else if (ring_is_xdp(ring))
609 xdp_return_frame(tx_buffer->xdpf);
611 dev_kfree_skb_any(tx_buffer->skb);
612 if (dma_unmap_len(tx_buffer, len))
613 dma_unmap_single(ring->dev,
614 dma_unmap_addr(tx_buffer, dma),
615 dma_unmap_len(tx_buffer, len),
617 } else if (dma_unmap_len(tx_buffer, len)) {
618 dma_unmap_page(ring->dev,
619 dma_unmap_addr(tx_buffer, dma),
620 dma_unmap_len(tx_buffer, len),
624 tx_buffer->next_to_watch = NULL;
625 tx_buffer->skb = NULL;
626 dma_unmap_len_set(tx_buffer, len, 0);
627 /* tx_buffer must be completely set up in the transmit path */
631 * i40e_clean_tx_ring - Free any empty Tx buffers
632 * @tx_ring: ring to be cleaned
634 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
636 unsigned long bi_size;
639 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
640 i40e_xsk_clean_tx_ring(tx_ring);
642 /* ring already cleared, nothing to do */
646 /* Free all the Tx ring sk_buffs */
647 for (i = 0; i < tx_ring->count; i++)
648 i40e_unmap_and_free_tx_resource(tx_ring,
652 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
653 memset(tx_ring->tx_bi, 0, bi_size);
655 /* Zero out the descriptor ring */
656 memset(tx_ring->desc, 0, tx_ring->size);
658 tx_ring->next_to_use = 0;
659 tx_ring->next_to_clean = 0;
661 if (!tx_ring->netdev)
664 /* cleanup Tx queue statistics */
665 netdev_tx_reset_queue(txring_txq(tx_ring));
669 * i40e_free_tx_resources - Free Tx resources per queue
670 * @tx_ring: Tx descriptor ring for a specific queue
672 * Free all transmit software resources
674 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
676 i40e_clean_tx_ring(tx_ring);
677 kfree(tx_ring->tx_bi);
678 tx_ring->tx_bi = NULL;
681 dma_free_coherent(tx_ring->dev, tx_ring->size,
682 tx_ring->desc, tx_ring->dma);
683 tx_ring->desc = NULL;
688 * i40e_get_tx_pending - how many tx descriptors not processed
689 * @ring: the ring of descriptors
690 * @in_sw: use SW variables
692 * Since there is no access to the ring head register
693 * in XL710, we need to use our local copies
695 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
700 head = i40e_get_head(ring);
701 tail = readl(ring->tail);
703 head = ring->next_to_clean;
704 tail = ring->next_to_use;
708 return (head < tail) ?
709 tail - head : (tail + ring->count - head);
715 * i40e_detect_recover_hung - Function to detect and recover hung_queues
716 * @vsi: pointer to vsi struct with tx queues
718 * VSI has netdev and netdev has TX queues. This function is to check each of
719 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
721 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
723 struct i40e_ring *tx_ring = NULL;
724 struct net_device *netdev;
731 if (test_bit(__I40E_VSI_DOWN, vsi->state))
734 netdev = vsi->netdev;
738 if (!netif_carrier_ok(netdev))
741 for (i = 0; i < vsi->num_queue_pairs; i++) {
742 tx_ring = vsi->tx_rings[i];
743 if (tx_ring && tx_ring->desc) {
744 /* If packet counter has not changed the queue is
745 * likely stalled, so force an interrupt for this
748 * prev_pkt_ctr would be negative if there was no
751 packets = tx_ring->stats.packets & INT_MAX;
752 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
753 i40e_force_wb(vsi, tx_ring->q_vector);
757 /* Memory barrier between read of packet count and call
758 * to i40e_get_tx_pending()
761 tx_ring->tx_stats.prev_pkt_ctr =
762 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
768 * i40e_clean_tx_irq - Reclaim resources after transmit completes
769 * @vsi: the VSI we care about
770 * @tx_ring: Tx ring to clean
771 * @napi_budget: Used to determine if we are in netpoll
773 * Returns true if there's any budget left (e.g. the clean is finished)
775 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
776 struct i40e_ring *tx_ring, int napi_budget)
778 int i = tx_ring->next_to_clean;
779 struct i40e_tx_buffer *tx_buf;
780 struct i40e_tx_desc *tx_head;
781 struct i40e_tx_desc *tx_desc;
782 unsigned int total_bytes = 0, total_packets = 0;
783 unsigned int budget = vsi->work_limit;
785 tx_buf = &tx_ring->tx_bi[i];
786 tx_desc = I40E_TX_DESC(tx_ring, i);
789 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
792 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
794 /* if next_to_watch is not set then there is no work pending */
798 /* prevent any other reads prior to eop_desc */
801 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
802 /* we have caught up to head, no work left to do */
803 if (tx_head == tx_desc)
806 /* clear next_to_watch to prevent false hangs */
807 tx_buf->next_to_watch = NULL;
809 /* update the statistics for this packet */
810 total_bytes += tx_buf->bytecount;
811 total_packets += tx_buf->gso_segs;
813 /* free the skb/XDP data */
814 if (ring_is_xdp(tx_ring))
815 xdp_return_frame(tx_buf->xdpf);
817 napi_consume_skb(tx_buf->skb, napi_budget);
819 /* unmap skb header data */
820 dma_unmap_single(tx_ring->dev,
821 dma_unmap_addr(tx_buf, dma),
822 dma_unmap_len(tx_buf, len),
825 /* clear tx_buffer data */
827 dma_unmap_len_set(tx_buf, len, 0);
829 /* unmap remaining buffers */
830 while (tx_desc != eop_desc) {
831 i40e_trace(clean_tx_irq_unmap,
832 tx_ring, tx_desc, tx_buf);
839 tx_buf = tx_ring->tx_bi;
840 tx_desc = I40E_TX_DESC(tx_ring, 0);
843 /* unmap any remaining paged data */
844 if (dma_unmap_len(tx_buf, len)) {
845 dma_unmap_page(tx_ring->dev,
846 dma_unmap_addr(tx_buf, dma),
847 dma_unmap_len(tx_buf, len),
849 dma_unmap_len_set(tx_buf, len, 0);
853 /* move us one more past the eop_desc for start of next pkt */
859 tx_buf = tx_ring->tx_bi;
860 tx_desc = I40E_TX_DESC(tx_ring, 0);
865 /* update budget accounting */
867 } while (likely(budget));
870 tx_ring->next_to_clean = i;
871 i40e_update_tx_stats(tx_ring, total_packets, total_bytes);
872 i40e_arm_wb(tx_ring, vsi, budget);
874 if (ring_is_xdp(tx_ring))
877 /* notify netdev of completed buffers */
878 netdev_tx_completed_queue(txring_txq(tx_ring),
879 total_packets, total_bytes);
881 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
882 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
883 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
884 /* Make sure that anybody stopping the queue after this
885 * sees the new next_to_clean.
888 if (__netif_subqueue_stopped(tx_ring->netdev,
889 tx_ring->queue_index) &&
890 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
891 netif_wake_subqueue(tx_ring->netdev,
892 tx_ring->queue_index);
893 ++tx_ring->tx_stats.restart_queue;
901 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
902 * @vsi: the VSI we care about
903 * @q_vector: the vector on which to enable writeback
906 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
907 struct i40e_q_vector *q_vector)
909 u16 flags = q_vector->tx.ring[0].flags;
912 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
915 if (q_vector->arm_wb_state)
918 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
919 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
920 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
923 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
926 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
927 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
929 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
931 q_vector->arm_wb_state = true;
935 * i40e_force_wb - Issue SW Interrupt so HW does a wb
936 * @vsi: the VSI we care about
937 * @q_vector: the vector on which to force writeback
940 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
942 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
943 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
944 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
945 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
946 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
947 /* allow 00 to be written to the index */
950 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
952 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
953 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
954 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
955 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
956 /* allow 00 to be written to the index */
958 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
962 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
963 struct i40e_ring_container *rc)
965 return &q_vector->rx == rc;
968 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
970 unsigned int divisor;
972 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
973 case I40E_LINK_SPEED_40GB:
974 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
976 case I40E_LINK_SPEED_25GB:
977 case I40E_LINK_SPEED_20GB:
978 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
981 case I40E_LINK_SPEED_10GB:
982 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
984 case I40E_LINK_SPEED_1GB:
985 case I40E_LINK_SPEED_100MB:
986 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
994 * i40e_update_itr - update the dynamic ITR value based on statistics
995 * @q_vector: structure containing interrupt and ring information
996 * @rc: structure containing ring performance data
998 * Stores a new ITR value based on packets and byte
999 * counts during the last interrupt. The advantage of per interrupt
1000 * computation is faster updates and more accurate ITR for the current
1001 * traffic pattern. Constants in this function were computed
1002 * based on theoretical maximum wire speed and thresholds were set based
1003 * on testing data as well as attempting to minimize response time
1004 * while increasing bulk throughput.
1006 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1007 struct i40e_ring_container *rc)
1009 unsigned int avg_wire_size, packets, bytes, itr;
1010 unsigned long next_update = jiffies;
1012 /* If we don't have any rings just leave ourselves set for maximum
1013 * possible latency so we take ourselves out of the equation.
1015 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1018 /* For Rx we want to push the delay up and default to low latency.
1019 * for Tx we want to pull the delay down and default to high latency.
1021 itr = i40e_container_is_rx(q_vector, rc) ?
1022 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1023 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1025 /* If we didn't update within up to 1 - 2 jiffies we can assume
1026 * that either packets are coming in so slow there hasn't been
1027 * any work, or that there is so much work that NAPI is dealing
1028 * with interrupt moderation and we don't need to do anything.
1030 if (time_after(next_update, rc->next_update))
1033 /* If itr_countdown is set it means we programmed an ITR within
1034 * the last 4 interrupt cycles. This has a side effect of us
1035 * potentially firing an early interrupt. In order to work around
1036 * this we need to throw out any data received for a few
1037 * interrupts following the update.
1039 if (q_vector->itr_countdown) {
1040 itr = rc->target_itr;
1044 packets = rc->total_packets;
1045 bytes = rc->total_bytes;
1047 if (i40e_container_is_rx(q_vector, rc)) {
1048 /* If Rx there are 1 to 4 packets and bytes are less than
1049 * 9000 assume insufficient data to use bulk rate limiting
1050 * approach unless Tx is already in bulk rate limiting. We
1051 * are likely latency driven.
1053 if (packets && packets < 4 && bytes < 9000 &&
1054 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1055 itr = I40E_ITR_ADAPTIVE_LATENCY;
1056 goto adjust_by_size;
1058 } else if (packets < 4) {
1059 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1060 * bulk mode and we are receiving 4 or fewer packets just
1061 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1062 * that the Rx can relax.
1064 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1065 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1066 I40E_ITR_ADAPTIVE_MAX_USECS)
1068 } else if (packets > 32) {
1069 /* If we have processed over 32 packets in a single interrupt
1070 * for Tx assume we need to switch over to "bulk" mode.
1072 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1075 /* We have no packets to actually measure against. This means
1076 * either one of the other queues on this vector is active or
1077 * we are a Tx queue doing TSO with too high of an interrupt rate.
1079 * Between 4 and 56 we can assume that our current interrupt delay
1080 * is only slightly too low. As such we should increase it by a small
1084 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1085 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1086 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1087 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1092 if (packets <= 256) {
1093 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1094 itr &= I40E_ITR_MASK;
1096 /* Between 56 and 112 is our "goldilocks" zone where we are
1097 * working out "just right". Just report that our current
1098 * ITR is good for us.
1103 /* If packet count is 128 or greater we are likely looking
1104 * at a slight overrun of the delay we want. Try halving
1105 * our delay to see if that will cut the number of packets
1106 * in half per interrupt.
1109 itr &= I40E_ITR_MASK;
1110 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1111 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1116 /* The paths below assume we are dealing with a bulk ITR since
1117 * number of packets is greater than 256. We are just going to have
1118 * to compute a value and try to bring the count under control,
1119 * though for smaller packet sizes there isn't much we can do as
1120 * NAPI polling will likely be kicking in sooner rather than later.
1122 itr = I40E_ITR_ADAPTIVE_BULK;
1125 /* If packet counts are 256 or greater we can assume we have a gross
1126 * overestimation of what the rate should be. Instead of trying to fine
1127 * tune it just use the formula below to try and dial in an exact value
1128 * give the current packet size of the frame.
1130 avg_wire_size = bytes / packets;
1132 /* The following is a crude approximation of:
1133 * wmem_default / (size + overhead) = desired_pkts_per_int
1134 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1135 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1137 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1138 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1141 * (170 * (size + 24)) / (size + 640) = ITR
1143 * We first do some math on the packet size and then finally bitshift
1144 * by 8 after rounding up. We also have to account for PCIe link speed
1145 * difference as ITR scales based on this.
1147 if (avg_wire_size <= 60) {
1148 /* Start at 250k ints/sec */
1149 avg_wire_size = 4096;
1150 } else if (avg_wire_size <= 380) {
1151 /* 250K ints/sec to 60K ints/sec */
1152 avg_wire_size *= 40;
1153 avg_wire_size += 1696;
1154 } else if (avg_wire_size <= 1084) {
1155 /* 60K ints/sec to 36K ints/sec */
1156 avg_wire_size *= 15;
1157 avg_wire_size += 11452;
1158 } else if (avg_wire_size <= 1980) {
1159 /* 36K ints/sec to 30K ints/sec */
1161 avg_wire_size += 22420;
1163 /* plateau at a limit of 30K ints/sec */
1164 avg_wire_size = 32256;
1167 /* If we are in low latency mode halve our delay which doubles the
1168 * rate to somewhere between 100K to 16K ints/sec
1170 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1173 /* Resultant value is 256 times larger than it needs to be. This
1174 * gives us room to adjust the value as needed to either increase
1175 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1177 * Use addition as we have already recorded the new latency flag
1178 * for the ITR value.
1180 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1181 I40E_ITR_ADAPTIVE_MIN_INC;
1183 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1184 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1185 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1189 /* write back value */
1190 rc->target_itr = itr;
1192 /* next update should occur within next jiffy */
1193 rc->next_update = next_update + 1;
1195 rc->total_bytes = 0;
1196 rc->total_packets = 0;
1199 static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
1201 return &rx_ring->rx_bi[idx];
1205 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1206 * @rx_ring: rx descriptor ring to store buffers on
1207 * @old_buff: donor buffer to have page reused
1209 * Synchronizes page for reuse by the adapter
1211 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1212 struct i40e_rx_buffer *old_buff)
1214 struct i40e_rx_buffer *new_buff;
1215 u16 nta = rx_ring->next_to_alloc;
1217 new_buff = i40e_rx_bi(rx_ring, nta);
1219 /* update, and store next to alloc */
1221 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1223 /* transfer page from old buffer to new buffer */
1224 new_buff->dma = old_buff->dma;
1225 new_buff->page = old_buff->page;
1226 new_buff->page_offset = old_buff->page_offset;
1227 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1229 rx_ring->rx_stats.page_reuse_count++;
1231 /* clear contents of buffer_info */
1232 old_buff->page = NULL;
1236 * i40e_clean_programming_status - clean the programming status descriptor
1237 * @rx_ring: the rx ring that has this descriptor
1238 * @qword0_raw: qword0
1239 * @qword1: qword1 representing status_error_len in CPU ordering
1241 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1242 * status being successful or not and take actions accordingly. FCoE should
1243 * handle its context/filter programming/invalidation status and take actions.
1245 * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL.
1247 void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
1252 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1253 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1255 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1256 i40e_fd_handle_status(rx_ring, qword0_raw, qword1, id);
1260 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1261 * @tx_ring: the tx ring to set up
1263 * Return 0 on success, negative on error
1265 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1267 struct device *dev = tx_ring->dev;
1273 /* warn if we are about to overwrite the pointer */
1274 WARN_ON(tx_ring->tx_bi);
1275 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1276 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1277 if (!tx_ring->tx_bi)
1280 u64_stats_init(&tx_ring->syncp);
1282 /* round up to nearest 4K */
1283 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1284 /* add u32 for head writeback, align after this takes care of
1285 * guaranteeing this is at least one cache line in size
1287 tx_ring->size += sizeof(u32);
1288 tx_ring->size = ALIGN(tx_ring->size, 4096);
1289 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1290 &tx_ring->dma, GFP_KERNEL);
1291 if (!tx_ring->desc) {
1292 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1297 tx_ring->next_to_use = 0;
1298 tx_ring->next_to_clean = 0;
1299 tx_ring->tx_stats.prev_pkt_ctr = -1;
1303 kfree(tx_ring->tx_bi);
1304 tx_ring->tx_bi = NULL;
1308 int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
1310 unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
1312 rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
1313 return rx_ring->rx_bi ? 0 : -ENOMEM;
1316 static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
1318 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
1322 * i40e_clean_rx_ring - Free Rx buffers
1323 * @rx_ring: ring to be cleaned
1325 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1329 /* ring already cleared, nothing to do */
1330 if (!rx_ring->rx_bi)
1334 dev_kfree_skb(rx_ring->skb);
1335 rx_ring->skb = NULL;
1338 if (rx_ring->xsk_pool) {
1339 i40e_xsk_clean_rx_ring(rx_ring);
1343 /* Free all the Rx ring sk_buffs */
1344 for (i = 0; i < rx_ring->count; i++) {
1345 struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
1350 /* Invalidate cache lines that may have been written to by
1351 * device so that we avoid corrupting memory.
1353 dma_sync_single_range_for_cpu(rx_ring->dev,
1356 rx_ring->rx_buf_len,
1359 /* free resources associated with mapping */
1360 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1361 i40e_rx_pg_size(rx_ring),
1365 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1368 rx_bi->page_offset = 0;
1372 if (rx_ring->xsk_pool)
1373 i40e_clear_rx_bi_zc(rx_ring);
1375 i40e_clear_rx_bi(rx_ring);
1377 /* Zero out the descriptor ring */
1378 memset(rx_ring->desc, 0, rx_ring->size);
1380 rx_ring->next_to_alloc = 0;
1381 rx_ring->next_to_clean = 0;
1382 rx_ring->next_to_use = 0;
1386 * i40e_free_rx_resources - Free Rx resources
1387 * @rx_ring: ring to clean the resources from
1389 * Free all receive software resources
1391 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1393 i40e_clean_rx_ring(rx_ring);
1394 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1395 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1396 rx_ring->xdp_prog = NULL;
1397 kfree(rx_ring->rx_bi);
1398 rx_ring->rx_bi = NULL;
1400 if (rx_ring->desc) {
1401 dma_free_coherent(rx_ring->dev, rx_ring->size,
1402 rx_ring->desc, rx_ring->dma);
1403 rx_ring->desc = NULL;
1408 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1409 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1411 * Returns 0 on success, negative on failure
1413 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1415 struct device *dev = rx_ring->dev;
1418 u64_stats_init(&rx_ring->syncp);
1420 /* Round up to nearest 4K */
1421 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1422 rx_ring->size = ALIGN(rx_ring->size, 4096);
1423 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1424 &rx_ring->dma, GFP_KERNEL);
1426 if (!rx_ring->desc) {
1427 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1432 rx_ring->next_to_alloc = 0;
1433 rx_ring->next_to_clean = 0;
1434 rx_ring->next_to_use = 0;
1436 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1437 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1438 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1439 rx_ring->queue_index);
1444 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1450 * i40e_release_rx_desc - Store the new tail and head values
1451 * @rx_ring: ring to bump
1452 * @val: new head index
1454 void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1456 rx_ring->next_to_use = val;
1458 /* update next to alloc since we have filled the ring */
1459 rx_ring->next_to_alloc = val;
1461 /* Force memory writes to complete before letting h/w
1462 * know there are new descriptors to fetch. (Only
1463 * applicable for weak-ordered memory model archs,
1467 writel(val, rx_ring->tail);
1471 * i40e_rx_offset - Return expected offset into page to access data
1472 * @rx_ring: Ring we are requesting offset of
1474 * Returns the offset value for ring into the data buffer.
1476 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1478 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1481 static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
1484 unsigned int truesize;
1486 #if (PAGE_SIZE < 8192)
1487 truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
1489 truesize = i40e_rx_offset(rx_ring) ?
1490 SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
1491 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
1492 SKB_DATA_ALIGN(size);
1498 * i40e_alloc_mapped_page - recycle or make a new page
1499 * @rx_ring: ring to use
1500 * @bi: rx_buffer struct to modify
1502 * Returns true if the page was successfully allocated or
1505 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1506 struct i40e_rx_buffer *bi)
1508 struct page *page = bi->page;
1511 /* since we are recycling buffers we should seldom need to alloc */
1513 rx_ring->rx_stats.page_reuse_count++;
1517 /* alloc new page for storage */
1518 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1519 if (unlikely(!page)) {
1520 rx_ring->rx_stats.alloc_page_failed++;
1524 /* map page for use */
1525 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1526 i40e_rx_pg_size(rx_ring),
1530 /* if mapping failed free memory back to system since
1531 * there isn't much point in holding memory we can't use
1533 if (dma_mapping_error(rx_ring->dev, dma)) {
1534 __free_pages(page, i40e_rx_pg_order(rx_ring));
1535 rx_ring->rx_stats.alloc_page_failed++;
1541 bi->page_offset = i40e_rx_offset(rx_ring);
1542 page_ref_add(page, USHRT_MAX - 1);
1543 bi->pagecnt_bias = USHRT_MAX;
1549 * i40e_alloc_rx_buffers - Replace used receive buffers
1550 * @rx_ring: ring to place buffers on
1551 * @cleaned_count: number of buffers to replace
1553 * Returns false if all allocations were successful, true if any fail
1555 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1557 u16 ntu = rx_ring->next_to_use;
1558 union i40e_rx_desc *rx_desc;
1559 struct i40e_rx_buffer *bi;
1561 /* do nothing if no valid netdev defined */
1562 if (!rx_ring->netdev || !cleaned_count)
1565 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1566 bi = i40e_rx_bi(rx_ring, ntu);
1569 if (!i40e_alloc_mapped_page(rx_ring, bi))
1572 /* sync the buffer for use by the device */
1573 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1575 rx_ring->rx_buf_len,
1578 /* Refresh the desc even if buffer_addrs didn't change
1579 * because each write-back erases this info.
1581 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1586 if (unlikely(ntu == rx_ring->count)) {
1587 rx_desc = I40E_RX_DESC(rx_ring, 0);
1588 bi = i40e_rx_bi(rx_ring, 0);
1592 /* clear the status bits for the next_to_use descriptor */
1593 rx_desc->wb.qword1.status_error_len = 0;
1596 } while (cleaned_count);
1598 if (rx_ring->next_to_use != ntu)
1599 i40e_release_rx_desc(rx_ring, ntu);
1604 if (rx_ring->next_to_use != ntu)
1605 i40e_release_rx_desc(rx_ring, ntu);
1607 /* make sure to come back via polling to try again after
1608 * allocation failure
1614 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1615 * @vsi: the VSI we care about
1616 * @skb: skb currently being received and modified
1617 * @rx_desc: the receive descriptor
1619 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1620 struct sk_buff *skb,
1621 union i40e_rx_desc *rx_desc)
1623 struct i40e_rx_ptype_decoded decoded;
1624 u32 rx_error, rx_status;
1629 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1630 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1631 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1632 I40E_RXD_QW1_ERROR_SHIFT;
1633 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1634 I40E_RXD_QW1_STATUS_SHIFT;
1635 decoded = decode_rx_desc_ptype(ptype);
1637 skb->ip_summed = CHECKSUM_NONE;
1639 skb_checksum_none_assert(skb);
1641 /* Rx csum enabled and ip headers found? */
1642 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1645 /* did the hardware decode the packet and checksum? */
1646 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1649 /* both known and outer_ip must be set for the below code to work */
1650 if (!(decoded.known && decoded.outer_ip))
1653 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1654 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1655 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1656 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1659 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1660 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1663 /* likely incorrect csum if alternate IP extension headers found */
1665 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1666 /* don't increment checksum err here, non-fatal err */
1669 /* there was some L4 error, count error and punt packet to the stack */
1670 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1673 /* handle packets that were not able to be checksummed due
1674 * to arrival speed, in this case the stack can compute
1677 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1680 /* If there is an outer header present that might contain a checksum
1681 * we need to bump the checksum level by 1 to reflect the fact that
1682 * we are indicating we validated the inner checksum.
1684 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1685 skb->csum_level = 1;
1687 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1688 switch (decoded.inner_prot) {
1689 case I40E_RX_PTYPE_INNER_PROT_TCP:
1690 case I40E_RX_PTYPE_INNER_PROT_UDP:
1691 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1692 skb->ip_summed = CHECKSUM_UNNECESSARY;
1701 vsi->back->hw_csum_rx_error++;
1705 * i40e_ptype_to_htype - get a hash type
1706 * @ptype: the ptype value from the descriptor
1708 * Returns a hash type to be used by skb_set_hash
1710 static inline int i40e_ptype_to_htype(u8 ptype)
1712 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1715 return PKT_HASH_TYPE_NONE;
1717 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1718 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1719 return PKT_HASH_TYPE_L4;
1720 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1721 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1722 return PKT_HASH_TYPE_L3;
1724 return PKT_HASH_TYPE_L2;
1728 * i40e_rx_hash - set the hash value in the skb
1729 * @ring: descriptor ring
1730 * @rx_desc: specific descriptor
1731 * @skb: skb currently being received and modified
1732 * @rx_ptype: Rx packet type
1734 static inline void i40e_rx_hash(struct i40e_ring *ring,
1735 union i40e_rx_desc *rx_desc,
1736 struct sk_buff *skb,
1740 const __le64 rss_mask =
1741 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1742 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1744 if (!(ring->netdev->features & NETIF_F_RXHASH))
1747 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1748 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1749 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1754 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1755 * @rx_ring: rx descriptor ring packet is being transacted on
1756 * @rx_desc: pointer to the EOP Rx descriptor
1757 * @skb: pointer to current skb being populated
1758 * @rx_ptype: the packet type decoded by hardware
1760 * This function checks the ring, descriptor, and packet information in
1761 * order to populate the hash, checksum, VLAN, protocol, and
1762 * other fields within the skb.
1764 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1765 union i40e_rx_desc *rx_desc, struct sk_buff *skb)
1767 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1768 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1769 I40E_RXD_QW1_STATUS_SHIFT;
1770 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1771 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1772 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1773 u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
1774 I40E_RXD_QW1_PTYPE_SHIFT;
1776 if (unlikely(tsynvalid))
1777 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1779 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1781 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1783 skb_record_rx_queue(skb, rx_ring->queue_index);
1785 if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
1786 u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
1788 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1789 le16_to_cpu(vlan_tag));
1792 /* modifies the skb - consumes the enet header */
1793 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1797 * i40e_cleanup_headers - Correct empty headers
1798 * @rx_ring: rx descriptor ring packet is being transacted on
1799 * @skb: pointer to current skb being fixed
1800 * @rx_desc: pointer to the EOP Rx descriptor
1802 * Also address the case where we are pulling data in on pages only
1803 * and as such no data is present in the skb header.
1805 * In addition if skb is not at least 60 bytes we need to pad it so that
1806 * it is large enough to qualify as a valid Ethernet frame.
1808 * Returns true if an error was encountered and skb was freed.
1810 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1811 union i40e_rx_desc *rx_desc)
1814 /* XDP packets use error pointer so abort at this point */
1818 /* ERR_MASK will only have valid bits if EOP set, and
1819 * what we are doing here is actually checking
1820 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1823 if (unlikely(i40e_test_staterr(rx_desc,
1824 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1825 dev_kfree_skb_any(skb);
1829 /* if eth_skb_pad returns an error the skb was freed */
1830 if (eth_skb_pad(skb))
1837 * i40e_page_is_reusable - check if any reuse is possible
1838 * @page: page struct to check
1840 * A page is not reusable if it was allocated under low memory
1841 * conditions, or it's not in the same NUMA node as this CPU.
1843 static inline bool i40e_page_is_reusable(struct page *page)
1845 return (page_to_nid(page) == numa_mem_id()) &&
1846 !page_is_pfmemalloc(page);
1850 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1851 * the adapter for another receive
1853 * @rx_buffer: buffer containing the page
1855 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1856 * an unused region in the page.
1858 * For small pages, @truesize will be a constant value, half the size
1859 * of the memory at page. We'll attempt to alternate between high and
1860 * low halves of the page, with one half ready for use by the hardware
1861 * and the other half being consumed by the stack. We use the page
1862 * ref count to determine whether the stack has finished consuming the
1863 * portion of this page that was passed up with a previous packet. If
1864 * the page ref count is >1, we'll assume the "other" half page is
1865 * still busy, and this page cannot be reused.
1867 * For larger pages, @truesize will be the actual space used by the
1868 * received packet (adjusted upward to an even multiple of the cache
1869 * line size). This will advance through the page by the amount
1870 * actually consumed by the received packets while there is still
1871 * space for a buffer. Each region of larger pages will be used at
1872 * most once, after which the page will not be reused.
1874 * In either case, if the page is reusable its refcount is increased.
1876 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1878 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1879 struct page *page = rx_buffer->page;
1881 /* Is any reuse possible? */
1882 if (unlikely(!i40e_page_is_reusable(page)))
1885 #if (PAGE_SIZE < 8192)
1886 /* if we are only owner of page we can reuse it */
1887 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1890 #define I40E_LAST_OFFSET \
1891 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1892 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1896 /* If we have drained the page fragment pool we need to update
1897 * the pagecnt_bias and page count so that we fully restock the
1898 * number of references the driver holds.
1900 if (unlikely(pagecnt_bias == 1)) {
1901 page_ref_add(page, USHRT_MAX - 1);
1902 rx_buffer->pagecnt_bias = USHRT_MAX;
1909 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1910 * @rx_ring: rx descriptor ring to transact packets on
1911 * @rx_buffer: buffer containing page to add
1912 * @skb: sk_buff to place the data into
1913 * @size: packet length from rx_desc
1915 * This function will add the data contained in rx_buffer->page to the skb.
1916 * It will just attach the page as a frag to the skb.
1918 * The function will then update the page offset.
1920 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1921 struct i40e_rx_buffer *rx_buffer,
1922 struct sk_buff *skb,
1925 #if (PAGE_SIZE < 8192)
1926 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1928 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1931 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1932 rx_buffer->page_offset, size, truesize);
1934 /* page is being used so we must update the page offset */
1935 #if (PAGE_SIZE < 8192)
1936 rx_buffer->page_offset ^= truesize;
1938 rx_buffer->page_offset += truesize;
1943 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1944 * @rx_ring: rx descriptor ring to transact packets on
1945 * @size: size of buffer to add to skb
1947 * This function will pull an Rx buffer from the ring and synchronize it
1948 * for use by the CPU.
1950 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1951 const unsigned int size)
1953 struct i40e_rx_buffer *rx_buffer;
1955 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
1956 prefetch_page_address(rx_buffer->page);
1958 /* we are reusing so sync this buffer for CPU use */
1959 dma_sync_single_range_for_cpu(rx_ring->dev,
1961 rx_buffer->page_offset,
1965 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1966 rx_buffer->pagecnt_bias--;
1972 * i40e_construct_skb - Allocate skb and populate it
1973 * @rx_ring: rx descriptor ring to transact packets on
1974 * @rx_buffer: rx buffer to pull data from
1975 * @xdp: xdp_buff pointing to the data
1977 * This function allocates an skb. It then populates it with the page
1978 * data from the current receive descriptor, taking care to set up the
1981 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1982 struct i40e_rx_buffer *rx_buffer,
1983 struct xdp_buff *xdp)
1985 unsigned int size = xdp->data_end - xdp->data;
1986 #if (PAGE_SIZE < 8192)
1987 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1989 unsigned int truesize = SKB_DATA_ALIGN(size);
1991 unsigned int headlen;
1992 struct sk_buff *skb;
1994 /* prefetch first cache line of first page */
1995 net_prefetch(xdp->data);
1997 /* Note, we get here by enabling legacy-rx via:
1999 * ethtool --set-priv-flags <dev> legacy-rx on
2001 * In this mode, we currently get 0 extra XDP headroom as
2002 * opposed to having legacy-rx off, where we process XDP
2003 * packets going to stack via i40e_build_skb(). The latter
2004 * provides us currently with 192 bytes of headroom.
2006 * For i40e_construct_skb() mode it means that the
2007 * xdp->data_meta will always point to xdp->data, since
2008 * the helper cannot expand the head. Should this ever
2009 * change in future for legacy-rx mode on, then lets also
2010 * add xdp->data_meta handling here.
2013 /* allocate a skb to store the frags */
2014 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2016 GFP_ATOMIC | __GFP_NOWARN);
2020 /* Determine available headroom for copy */
2022 if (headlen > I40E_RX_HDR_SIZE)
2023 headlen = eth_get_headlen(skb->dev, xdp->data,
2026 /* align pull length to size of long to optimize memcpy performance */
2027 memcpy(__skb_put(skb, headlen), xdp->data,
2028 ALIGN(headlen, sizeof(long)));
2030 /* update all of the pointers */
2033 skb_add_rx_frag(skb, 0, rx_buffer->page,
2034 rx_buffer->page_offset + headlen,
2037 /* buffer is used by skb, update page_offset */
2038 #if (PAGE_SIZE < 8192)
2039 rx_buffer->page_offset ^= truesize;
2041 rx_buffer->page_offset += truesize;
2044 /* buffer is unused, reset bias back to rx_buffer */
2045 rx_buffer->pagecnt_bias++;
2052 * i40e_build_skb - Build skb around an existing buffer
2053 * @rx_ring: Rx descriptor ring to transact packets on
2054 * @rx_buffer: Rx buffer to pull data from
2055 * @xdp: xdp_buff pointing to the data
2057 * This function builds an skb around an existing Rx buffer, taking care
2058 * to set up the skb correctly and avoid any memcpy overhead.
2060 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2061 struct i40e_rx_buffer *rx_buffer,
2062 struct xdp_buff *xdp)
2064 unsigned int metasize = xdp->data - xdp->data_meta;
2065 #if (PAGE_SIZE < 8192)
2066 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2068 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2069 SKB_DATA_ALIGN(xdp->data_end -
2070 xdp->data_hard_start);
2072 struct sk_buff *skb;
2074 /* Prefetch first cache line of first page. If xdp->data_meta
2075 * is unused, this points exactly as xdp->data, otherwise we
2076 * likely have a consumer accessing first few bytes of meta
2077 * data, and then actual data.
2079 net_prefetch(xdp->data_meta);
2081 /* build an skb around the page buffer */
2082 skb = build_skb(xdp->data_hard_start, truesize);
2086 /* update pointers within the skb to store the data */
2087 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2088 __skb_put(skb, xdp->data_end - xdp->data);
2090 skb_metadata_set(skb, metasize);
2092 /* buffer is used by skb, update page_offset */
2093 #if (PAGE_SIZE < 8192)
2094 rx_buffer->page_offset ^= truesize;
2096 rx_buffer->page_offset += truesize;
2103 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2104 * @rx_ring: rx descriptor ring to transact packets on
2105 * @rx_buffer: rx buffer to pull data from
2107 * This function will clean up the contents of the rx_buffer. It will
2108 * either recycle the buffer or unmap it and free the associated resources.
2110 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2111 struct i40e_rx_buffer *rx_buffer)
2113 if (i40e_can_reuse_rx_page(rx_buffer)) {
2114 /* hand second half of page back to the ring */
2115 i40e_reuse_rx_page(rx_ring, rx_buffer);
2117 /* we are not reusing the buffer so unmap it */
2118 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2119 i40e_rx_pg_size(rx_ring),
2120 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2121 __page_frag_cache_drain(rx_buffer->page,
2122 rx_buffer->pagecnt_bias);
2123 /* clear contents of buffer_info */
2124 rx_buffer->page = NULL;
2129 * i40e_is_non_eop - process handling of non-EOP buffers
2130 * @rx_ring: Rx ring being processed
2131 * @rx_desc: Rx descriptor for current buffer
2132 * @skb: Current socket buffer containing buffer in progress
2134 * This function updates next to clean. If the buffer is an EOP buffer
2135 * this function exits returning false, otherwise it will place the
2136 * sk_buff in the next buffer to be chained and return true indicating
2137 * that this is in fact a non-EOP buffer.
2139 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2140 union i40e_rx_desc *rx_desc,
2141 struct sk_buff *skb)
2143 u32 ntc = rx_ring->next_to_clean + 1;
2145 /* fetch, update, and store next to clean */
2146 ntc = (ntc < rx_ring->count) ? ntc : 0;
2147 rx_ring->next_to_clean = ntc;
2149 prefetch(I40E_RX_DESC(rx_ring, ntc));
2151 /* if we are the last buffer then there is nothing else to do */
2152 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2153 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2156 rx_ring->rx_stats.non_eop_descs++;
2161 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2162 struct i40e_ring *xdp_ring);
2164 int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring)
2166 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2168 if (unlikely(!xdpf))
2169 return I40E_XDP_CONSUMED;
2171 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2175 * i40e_run_xdp - run an XDP program
2176 * @rx_ring: Rx ring being processed
2177 * @xdp: XDP buffer containing the frame
2179 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2180 struct xdp_buff *xdp)
2182 int err, result = I40E_XDP_PASS;
2183 struct i40e_ring *xdp_ring;
2184 struct bpf_prog *xdp_prog;
2188 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2193 prefetchw(xdp->data_hard_start); /* xdp_frame write */
2195 act = bpf_prog_run_xdp(xdp_prog, xdp);
2200 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2201 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2204 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2205 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2208 bpf_warn_invalid_xdp_action(act);
2211 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2212 fallthrough; /* handle aborts by dropping packet */
2214 result = I40E_XDP_CONSUMED;
2219 return ERR_PTR(-result);
2223 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2225 * @rx_buffer: Rx buffer to adjust
2226 * @size: Size of adjustment
2228 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2229 struct i40e_rx_buffer *rx_buffer,
2232 unsigned int truesize = i40e_rx_frame_truesize(rx_ring, size);
2234 #if (PAGE_SIZE < 8192)
2235 rx_buffer->page_offset ^= truesize;
2237 rx_buffer->page_offset += truesize;
2242 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2243 * @xdp_ring: XDP Tx ring
2245 * This function updates the XDP Tx ring tail register.
2247 void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2249 /* Force memory writes to complete before letting h/w
2250 * know there are new descriptors to fetch.
2253 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2257 * i40e_update_rx_stats - Update Rx ring statistics
2258 * @rx_ring: rx descriptor ring
2259 * @total_rx_bytes: number of bytes received
2260 * @total_rx_packets: number of packets received
2262 * This function updates the Rx ring statistics.
2264 void i40e_update_rx_stats(struct i40e_ring *rx_ring,
2265 unsigned int total_rx_bytes,
2266 unsigned int total_rx_packets)
2268 u64_stats_update_begin(&rx_ring->syncp);
2269 rx_ring->stats.packets += total_rx_packets;
2270 rx_ring->stats.bytes += total_rx_bytes;
2271 u64_stats_update_end(&rx_ring->syncp);
2272 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2273 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2277 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2279 * @xdp_res: Result of the receive batch
2281 * This function bumps XDP Tx tail and/or flush redirect map, and
2282 * should be called when a batch of packets has been processed in the
2285 void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
2287 if (xdp_res & I40E_XDP_REDIR)
2290 if (xdp_res & I40E_XDP_TX) {
2291 struct i40e_ring *xdp_ring =
2292 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2294 i40e_xdp_ring_update_tail(xdp_ring);
2299 * i40e_inc_ntc: Advance the next_to_clean index
2302 static void i40e_inc_ntc(struct i40e_ring *rx_ring)
2304 u32 ntc = rx_ring->next_to_clean + 1;
2306 ntc = (ntc < rx_ring->count) ? ntc : 0;
2307 rx_ring->next_to_clean = ntc;
2308 prefetch(I40E_RX_DESC(rx_ring, ntc));
2312 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2313 * @rx_ring: rx descriptor ring to transact packets on
2314 * @budget: Total limit on number of packets to process
2316 * This function provides a "bounce buffer" approach to Rx interrupt
2317 * processing. The advantage to this is that on systems that have
2318 * expensive overhead for IOMMU access this provides a means of avoiding
2319 * it by maintaining the mapping of the page to the system.
2321 * Returns amount of work completed
2323 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2325 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2326 struct sk_buff *skb = rx_ring->skb;
2327 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2328 unsigned int xdp_xmit = 0;
2329 bool failure = false;
2330 struct xdp_buff xdp;
2332 #if (PAGE_SIZE < 8192)
2333 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
2335 xdp.rxq = &rx_ring->xdp_rxq;
2337 while (likely(total_rx_packets < (unsigned int)budget)) {
2338 struct i40e_rx_buffer *rx_buffer;
2339 union i40e_rx_desc *rx_desc;
2343 /* return some buffers to hardware, one at a time is too slow */
2344 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2345 failure = failure ||
2346 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2350 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2352 /* status_error_len will always be zero for unused descriptors
2353 * because it's cleared in cleanup, and overlaps with hdr_addr
2354 * which is always zero because packet split isn't used, if the
2355 * hardware wrote DD then the length will be non-zero
2357 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2359 /* This memory barrier is needed to keep us from reading
2360 * any other fields out of the rx_desc until we have
2361 * verified the descriptor has been written back.
2365 if (i40e_rx_is_programming_status(qword)) {
2366 i40e_clean_programming_status(rx_ring,
2367 rx_desc->raw.qword[0],
2369 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
2370 i40e_inc_ntc(rx_ring);
2371 i40e_reuse_rx_page(rx_ring, rx_buffer);
2376 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2377 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2381 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2382 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2384 /* retrieve a buffer from the ring */
2386 xdp.data = page_address(rx_buffer->page) +
2387 rx_buffer->page_offset;
2388 xdp.data_meta = xdp.data;
2389 xdp.data_hard_start = xdp.data -
2390 i40e_rx_offset(rx_ring);
2391 xdp.data_end = xdp.data + size;
2392 #if (PAGE_SIZE > 4096)
2393 /* At larger PAGE_SIZE, frame_sz depend on len size */
2394 xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
2396 skb = i40e_run_xdp(rx_ring, &xdp);
2400 unsigned int xdp_res = -PTR_ERR(skb);
2402 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2403 xdp_xmit |= xdp_res;
2404 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2406 rx_buffer->pagecnt_bias++;
2408 total_rx_bytes += size;
2411 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2412 } else if (ring_uses_build_skb(rx_ring)) {
2413 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2415 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2418 /* exit if we failed to retrieve a buffer */
2420 rx_ring->rx_stats.alloc_buff_failed++;
2421 rx_buffer->pagecnt_bias++;
2425 i40e_put_rx_buffer(rx_ring, rx_buffer);
2428 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2431 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2436 /* probably a little skewed due to removing CRC */
2437 total_rx_bytes += skb->len;
2439 /* populate checksum, VLAN, and protocol */
2440 i40e_process_skb_fields(rx_ring, rx_desc, skb);
2442 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2443 napi_gro_receive(&rx_ring->q_vector->napi, skb);
2446 /* update budget accounting */
2450 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
2453 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
2455 /* guarantee a trip back through this routine if there was a failure */
2456 return failure ? budget : (int)total_rx_packets;
2459 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2463 /* We don't bother with setting the CLEARPBA bit as the data sheet
2464 * points out doing so is "meaningless since it was already
2465 * auto-cleared". The auto-clearing happens when the interrupt is
2468 * Hardware errata 28 for also indicates that writing to a
2469 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2470 * an event in the PBA anyway so we need to rely on the automask
2471 * to hold pending events for us until the interrupt is re-enabled
2473 * The itr value is reported in microseconds, and the register
2474 * value is recorded in 2 microsecond units. For this reason we
2475 * only need to shift by the interval shift - 1 instead of the
2478 itr &= I40E_ITR_MASK;
2480 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2481 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2482 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2487 /* a small macro to shorten up some long lines */
2488 #define INTREG I40E_PFINT_DYN_CTLN
2490 /* The act of updating the ITR will cause it to immediately trigger. In order
2491 * to prevent this from throwing off adaptive update statistics we defer the
2492 * update so that it can only happen so often. So after either Tx or Rx are
2493 * updated we make the adaptive scheme wait until either the ITR completely
2494 * expires via the next_update expiration or we have been through at least
2497 #define ITR_COUNTDOWN_START 3
2500 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2501 * @vsi: the VSI we care about
2502 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2505 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2506 struct i40e_q_vector *q_vector)
2508 struct i40e_hw *hw = &vsi->back->hw;
2511 /* If we don't have MSIX, then we only need to re-enable icr0 */
2512 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2513 i40e_irq_dynamic_enable_icr0(vsi->back);
2517 /* These will do nothing if dynamic updates are not enabled */
2518 i40e_update_itr(q_vector, &q_vector->tx);
2519 i40e_update_itr(q_vector, &q_vector->rx);
2521 /* This block of logic allows us to get away with only updating
2522 * one ITR value with each interrupt. The idea is to perform a
2523 * pseudo-lazy update with the following criteria.
2525 * 1. Rx is given higher priority than Tx if both are in same state
2526 * 2. If we must reduce an ITR that is given highest priority.
2527 * 3. We then give priority to increasing ITR based on amount.
2529 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2530 /* Rx ITR needs to be reduced, this is highest priority */
2531 intval = i40e_buildreg_itr(I40E_RX_ITR,
2532 q_vector->rx.target_itr);
2533 q_vector->rx.current_itr = q_vector->rx.target_itr;
2534 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2535 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2536 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2537 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2538 /* Tx ITR needs to be reduced, this is second priority
2539 * Tx ITR needs to be increased more than Rx, fourth priority
2541 intval = i40e_buildreg_itr(I40E_TX_ITR,
2542 q_vector->tx.target_itr);
2543 q_vector->tx.current_itr = q_vector->tx.target_itr;
2544 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2545 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2546 /* Rx ITR needs to be increased, third priority */
2547 intval = i40e_buildreg_itr(I40E_RX_ITR,
2548 q_vector->rx.target_itr);
2549 q_vector->rx.current_itr = q_vector->rx.target_itr;
2550 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2552 /* No ITR update, lowest priority */
2553 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2554 if (q_vector->itr_countdown)
2555 q_vector->itr_countdown--;
2558 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2559 wr32(hw, INTREG(q_vector->reg_idx), intval);
2563 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2564 * @napi: napi struct with our devices info in it
2565 * @budget: amount of work driver is allowed to do this pass, in packets
2567 * This function will clean all queues associated with a q_vector.
2569 * Returns the amount of work done
2571 int i40e_napi_poll(struct napi_struct *napi, int budget)
2573 struct i40e_q_vector *q_vector =
2574 container_of(napi, struct i40e_q_vector, napi);
2575 struct i40e_vsi *vsi = q_vector->vsi;
2576 struct i40e_ring *ring;
2577 bool clean_complete = true;
2578 bool arm_wb = false;
2579 int budget_per_ring;
2582 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2583 napi_complete(napi);
2587 /* Since the actual Tx work is minimal, we can give the Tx a larger
2588 * budget and be more aggressive about cleaning up the Tx descriptors.
2590 i40e_for_each_ring(ring, q_vector->tx) {
2591 bool wd = ring->xsk_pool ?
2592 i40e_clean_xdp_tx_irq(vsi, ring) :
2593 i40e_clean_tx_irq(vsi, ring, budget);
2596 clean_complete = false;
2599 arm_wb |= ring->arm_wb;
2600 ring->arm_wb = false;
2603 /* Handle case where we are called by netpoll with a budget of 0 */
2607 /* normally we have 1 Rx ring per q_vector */
2608 if (unlikely(q_vector->num_ringpairs > 1))
2609 /* We attempt to distribute budget to each Rx queue fairly, but
2610 * don't allow the budget to go below 1 because that would exit
2613 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
2615 /* Max of 1 Rx ring in this q_vector so give it the budget */
2616 budget_per_ring = budget;
2618 i40e_for_each_ring(ring, q_vector->rx) {
2619 int cleaned = ring->xsk_pool ?
2620 i40e_clean_rx_irq_zc(ring, budget_per_ring) :
2621 i40e_clean_rx_irq(ring, budget_per_ring);
2623 work_done += cleaned;
2624 /* if we clean as many as budgeted, we must not be done */
2625 if (cleaned >= budget_per_ring)
2626 clean_complete = false;
2629 /* If work not completed, return budget and polling will return */
2630 if (!clean_complete) {
2631 int cpu_id = smp_processor_id();
2633 /* It is possible that the interrupt affinity has changed but,
2634 * if the cpu is pegged at 100%, polling will never exit while
2635 * traffic continues and the interrupt will be stuck on this
2636 * cpu. We check to make sure affinity is correct before we
2637 * continue to poll, otherwise we must stop polling so the
2638 * interrupt can move to the correct cpu.
2640 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2641 /* Tell napi that we are done polling */
2642 napi_complete_done(napi, work_done);
2644 /* Force an interrupt */
2645 i40e_force_wb(vsi, q_vector);
2647 /* Return budget-1 so that polling stops */
2652 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2653 i40e_enable_wb_on_itr(vsi, q_vector);
2658 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2659 q_vector->arm_wb_state = false;
2661 /* Exit the polling mode, but don't re-enable interrupts if stack might
2662 * poll us due to busy-polling
2664 if (likely(napi_complete_done(napi, work_done)))
2665 i40e_update_enable_itr(vsi, q_vector);
2667 return min(work_done, budget - 1);
2671 * i40e_atr - Add a Flow Director ATR filter
2672 * @tx_ring: ring to add programming descriptor to
2674 * @tx_flags: send tx flags
2676 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2679 struct i40e_filter_program_desc *fdir_desc;
2680 struct i40e_pf *pf = tx_ring->vsi->back;
2682 unsigned char *network;
2684 struct ipv6hdr *ipv6;
2688 u32 flex_ptype, dtype_cmd;
2692 /* make sure ATR is enabled */
2693 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2696 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2699 /* if sampling is disabled do nothing */
2700 if (!tx_ring->atr_sample_rate)
2703 /* Currently only IPv4/IPv6 with TCP is supported */
2704 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2707 /* snag network header to get L4 type and address */
2708 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2709 skb_inner_network_header(skb) : skb_network_header(skb);
2711 /* Note: tx_flags gets modified to reflect inner protocols in
2712 * tx_enable_csum function if encap is enabled.
2714 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2715 /* access ihl as u8 to avoid unaligned access on ia64 */
2716 hlen = (hdr.network[0] & 0x0F) << 2;
2717 l4_proto = hdr.ipv4->protocol;
2719 /* find the start of the innermost ipv6 header */
2720 unsigned int inner_hlen = hdr.network - skb->data;
2721 unsigned int h_offset = inner_hlen;
2723 /* this function updates h_offset to the end of the header */
2725 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2726 /* hlen will contain our best estimate of the tcp header */
2727 hlen = h_offset - inner_hlen;
2730 if (l4_proto != IPPROTO_TCP)
2733 th = (struct tcphdr *)(hdr.network + hlen);
2735 /* Due to lack of space, no more new filters can be programmed */
2736 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2738 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2739 /* HW ATR eviction will take care of removing filters on FIN
2742 if (th->fin || th->rst)
2746 tx_ring->atr_count++;
2748 /* sample on all syn/fin/rst packets or once every atr sample rate */
2752 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2755 tx_ring->atr_count = 0;
2757 /* grab the next descriptor */
2758 i = tx_ring->next_to_use;
2759 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2762 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2764 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2765 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2766 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2767 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2768 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2769 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2770 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2772 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2774 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2776 dtype_cmd |= (th->fin || th->rst) ?
2777 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2778 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2779 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2780 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2782 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2783 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2785 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2786 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2788 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2789 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2791 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2792 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2793 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2796 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2797 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2798 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2800 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2801 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2803 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2804 fdir_desc->rsvd = cpu_to_le32(0);
2805 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2806 fdir_desc->fd_id = cpu_to_le32(0);
2810 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2812 * @tx_ring: ring to send buffer on
2813 * @flags: the tx flags to be set
2815 * Checks the skb and set up correspondingly several generic transmit flags
2816 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2818 * Returns error code indicate the frame should be dropped upon error and the
2819 * otherwise returns 0 to indicate the flags has been set properly.
2821 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2822 struct i40e_ring *tx_ring,
2825 __be16 protocol = skb->protocol;
2828 if (protocol == htons(ETH_P_8021Q) &&
2829 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2830 /* When HW VLAN acceleration is turned off by the user the
2831 * stack sets the protocol to 8021q so that the driver
2832 * can take any steps required to support the SW only
2833 * VLAN handling. In our case the driver doesn't need
2834 * to take any further steps so just set the protocol
2835 * to the encapsulated ethertype.
2837 skb->protocol = vlan_get_protocol(skb);
2841 /* if we have a HW VLAN tag being added, default to the HW one */
2842 if (skb_vlan_tag_present(skb)) {
2843 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2844 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2845 /* else if it is a SW VLAN, check the next protocol and store the tag */
2846 } else if (protocol == htons(ETH_P_8021Q)) {
2847 struct vlan_hdr *vhdr, _vhdr;
2849 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2853 protocol = vhdr->h_vlan_encapsulated_proto;
2854 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2855 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2858 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2861 /* Insert 802.1p priority into VLAN header */
2862 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2863 (skb->priority != TC_PRIO_CONTROL)) {
2864 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2865 tx_flags |= (skb->priority & 0x7) <<
2866 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2867 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2868 struct vlan_ethhdr *vhdr;
2871 rc = skb_cow_head(skb, 0);
2874 vhdr = (struct vlan_ethhdr *)skb->data;
2875 vhdr->h_vlan_TCI = htons(tx_flags >>
2876 I40E_TX_FLAGS_VLAN_SHIFT);
2878 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2888 * i40e_tso - set up the tso context descriptor
2889 * @first: pointer to first Tx buffer for xmit
2890 * @hdr_len: ptr to the size of the packet header
2891 * @cd_type_cmd_tso_mss: Quad Word 1
2893 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2895 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2896 u64 *cd_type_cmd_tso_mss)
2898 struct sk_buff *skb = first->skb;
2899 u64 cd_cmd, cd_tso_len, cd_mss;
2910 u32 paylen, l4_offset;
2911 u16 gso_segs, gso_size;
2914 if (skb->ip_summed != CHECKSUM_PARTIAL)
2917 if (!skb_is_gso(skb))
2920 err = skb_cow_head(skb, 0);
2924 ip.hdr = skb_network_header(skb);
2925 l4.hdr = skb_transport_header(skb);
2927 /* initialize outer IP header fields */
2928 if (ip.v4->version == 4) {
2932 ip.v6->payload_len = 0;
2935 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2939 SKB_GSO_UDP_TUNNEL |
2940 SKB_GSO_UDP_TUNNEL_CSUM)) {
2941 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2942 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2945 /* determine offset of outer transport header */
2946 l4_offset = l4.hdr - skb->data;
2948 /* remove payload length from outer checksum */
2949 paylen = skb->len - l4_offset;
2950 csum_replace_by_diff(&l4.udp->check,
2951 (__force __wsum)htonl(paylen));
2954 /* reset pointers to inner headers */
2955 ip.hdr = skb_inner_network_header(skb);
2956 l4.hdr = skb_inner_transport_header(skb);
2958 /* initialize inner IP header fields */
2959 if (ip.v4->version == 4) {
2963 ip.v6->payload_len = 0;
2967 /* determine offset of inner transport header */
2968 l4_offset = l4.hdr - skb->data;
2970 /* remove payload length from inner checksum */
2971 paylen = skb->len - l4_offset;
2973 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2974 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
2975 /* compute length of segmentation header */
2976 *hdr_len = sizeof(*l4.udp) + l4_offset;
2978 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2979 /* compute length of segmentation header */
2980 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2983 /* pull values out of skb_shinfo */
2984 gso_size = skb_shinfo(skb)->gso_size;
2985 gso_segs = skb_shinfo(skb)->gso_segs;
2987 /* update GSO size and bytecount with header size */
2988 first->gso_segs = gso_segs;
2989 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2991 /* find the field values */
2992 cd_cmd = I40E_TX_CTX_DESC_TSO;
2993 cd_tso_len = skb->len - *hdr_len;
2995 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2996 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2997 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
3002 * i40e_tsyn - set up the tsyn context descriptor
3003 * @tx_ring: ptr to the ring to send
3004 * @skb: ptr to the skb we're sending
3005 * @tx_flags: the collected send information
3006 * @cd_type_cmd_tso_mss: Quad Word 1
3008 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
3010 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
3011 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
3015 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
3018 /* Tx timestamps cannot be sampled when doing TSO */
3019 if (tx_flags & I40E_TX_FLAGS_TSO)
3022 /* only timestamp the outbound packet if the user has requested it and
3023 * we are not already transmitting a packet to be timestamped
3025 pf = i40e_netdev_to_pf(tx_ring->netdev);
3026 if (!(pf->flags & I40E_FLAG_PTP))
3030 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3031 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3032 pf->ptp_tx_start = jiffies;
3033 pf->ptp_tx_skb = skb_get(skb);
3035 pf->tx_hwtstamp_skipped++;
3039 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3040 I40E_TXD_CTX_QW1_CMD_SHIFT;
3046 * i40e_tx_enable_csum - Enable Tx checksum offloads
3048 * @tx_flags: pointer to Tx flags currently set
3049 * @td_cmd: Tx descriptor command bits to set
3050 * @td_offset: Tx descriptor header offsets to set
3051 * @tx_ring: Tx descriptor ring
3052 * @cd_tunneling: ptr to context desc bits
3054 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3055 u32 *td_cmd, u32 *td_offset,
3056 struct i40e_ring *tx_ring,
3069 unsigned char *exthdr;
3070 u32 offset, cmd = 0;
3074 if (skb->ip_summed != CHECKSUM_PARTIAL)
3077 ip.hdr = skb_network_header(skb);
3078 l4.hdr = skb_transport_header(skb);
3080 /* compute outer L2 header size */
3081 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3083 if (skb->encapsulation) {
3085 /* define outer network header type */
3086 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3087 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3088 I40E_TX_CTX_EXT_IP_IPV4 :
3089 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3091 l4_proto = ip.v4->protocol;
3092 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3093 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3095 exthdr = ip.hdr + sizeof(*ip.v6);
3096 l4_proto = ip.v6->nexthdr;
3097 if (l4.hdr != exthdr)
3098 ipv6_skip_exthdr(skb, exthdr - skb->data,
3099 &l4_proto, &frag_off);
3102 /* define outer transport */
3105 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3106 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3109 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3110 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3114 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3115 l4.hdr = skb_inner_network_header(skb);
3118 if (*tx_flags & I40E_TX_FLAGS_TSO)
3121 skb_checksum_help(skb);
3125 /* compute outer L3 header size */
3126 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3127 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3129 /* switch IP header pointer from outer to inner header */
3130 ip.hdr = skb_inner_network_header(skb);
3132 /* compute tunnel header size */
3133 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3134 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3136 /* indicate if we need to offload outer UDP header */
3137 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3138 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3139 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3140 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3142 /* record tunnel offload values */
3143 *cd_tunneling |= tunnel;
3145 /* switch L4 header pointer from outer to inner */
3146 l4.hdr = skb_inner_transport_header(skb);
3149 /* reset type as we transition from outer to inner headers */
3150 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3151 if (ip.v4->version == 4)
3152 *tx_flags |= I40E_TX_FLAGS_IPV4;
3153 if (ip.v6->version == 6)
3154 *tx_flags |= I40E_TX_FLAGS_IPV6;
3157 /* Enable IP checksum offloads */
3158 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3159 l4_proto = ip.v4->protocol;
3160 /* the stack computes the IP header already, the only time we
3161 * need the hardware to recompute it is in the case of TSO.
3163 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3164 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3165 I40E_TX_DESC_CMD_IIPT_IPV4;
3166 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3167 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3169 exthdr = ip.hdr + sizeof(*ip.v6);
3170 l4_proto = ip.v6->nexthdr;
3171 if (l4.hdr != exthdr)
3172 ipv6_skip_exthdr(skb, exthdr - skb->data,
3173 &l4_proto, &frag_off);
3176 /* compute inner L3 header size */
3177 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3179 /* Enable L4 checksum offloads */
3182 /* enable checksum offloads */
3183 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3184 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3187 /* enable SCTP checksum offload */
3188 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3189 offset |= (sizeof(struct sctphdr) >> 2) <<
3190 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3193 /* enable UDP checksum offload */
3194 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3195 offset |= (sizeof(struct udphdr) >> 2) <<
3196 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3199 if (*tx_flags & I40E_TX_FLAGS_TSO)
3201 skb_checksum_help(skb);
3206 *td_offset |= offset;
3212 * i40e_create_tx_ctx Build the Tx context descriptor
3213 * @tx_ring: ring to create the descriptor on
3214 * @cd_type_cmd_tso_mss: Quad Word 1
3215 * @cd_tunneling: Quad Word 0 - bits 0-31
3216 * @cd_l2tag2: Quad Word 0 - bits 32-63
3218 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3219 const u64 cd_type_cmd_tso_mss,
3220 const u32 cd_tunneling, const u32 cd_l2tag2)
3222 struct i40e_tx_context_desc *context_desc;
3223 int i = tx_ring->next_to_use;
3225 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3226 !cd_tunneling && !cd_l2tag2)
3229 /* grab the next descriptor */
3230 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3233 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3235 /* cpu_to_le32 and assign to struct fields */
3236 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3237 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3238 context_desc->rsvd = cpu_to_le16(0);
3239 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3243 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3244 * @tx_ring: the ring to be checked
3245 * @size: the size buffer we want to assure is available
3247 * Returns -EBUSY if a stop is needed, else 0
3249 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3251 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3252 /* Memory barrier before checking head and tail */
3255 /* Check again in a case another CPU has just made room available. */
3256 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3259 /* A reprieve! - use start_queue because it doesn't call schedule */
3260 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3261 ++tx_ring->tx_stats.restart_queue;
3266 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3269 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3270 * and so we need to figure out the cases where we need to linearize the skb.
3272 * For TSO we need to count the TSO header and segment payload separately.
3273 * As such we need to check cases where we have 7 fragments or more as we
3274 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3275 * the segment payload in the first descriptor, and another 7 for the
3278 bool __i40e_chk_linearize(struct sk_buff *skb)
3280 const skb_frag_t *frag, *stale;
3283 /* no need to check if number of frags is less than 7 */
3284 nr_frags = skb_shinfo(skb)->nr_frags;
3285 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3288 /* We need to walk through the list and validate that each group
3289 * of 6 fragments totals at least gso_size.
3291 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3292 frag = &skb_shinfo(skb)->frags[0];
3294 /* Initialize size to the negative value of gso_size minus 1. We
3295 * use this as the worst case scenerio in which the frag ahead
3296 * of us only provides one byte which is why we are limited to 6
3297 * descriptors for a single transmit as the header and previous
3298 * fragment are already consuming 2 descriptors.
3300 sum = 1 - skb_shinfo(skb)->gso_size;
3302 /* Add size of frags 0 through 4 to create our initial sum */
3303 sum += skb_frag_size(frag++);
3304 sum += skb_frag_size(frag++);
3305 sum += skb_frag_size(frag++);
3306 sum += skb_frag_size(frag++);
3307 sum += skb_frag_size(frag++);
3309 /* Walk through fragments adding latest fragment, testing it, and
3310 * then removing stale fragments from the sum.
3312 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3313 int stale_size = skb_frag_size(stale);
3315 sum += skb_frag_size(frag++);
3317 /* The stale fragment may present us with a smaller
3318 * descriptor than the actual fragment size. To account
3319 * for that we need to remove all the data on the front and
3320 * figure out what the remainder would be in the last
3321 * descriptor associated with the fragment.
3323 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3324 int align_pad = -(skb_frag_off(stale)) &
3325 (I40E_MAX_READ_REQ_SIZE - 1);
3328 stale_size -= align_pad;
3331 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3332 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3333 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3336 /* if sum is negative we failed to make sufficient progress */
3350 * i40e_tx_map - Build the Tx descriptor
3351 * @tx_ring: ring to send buffer on
3353 * @first: first buffer info buffer to use
3354 * @tx_flags: collected send information
3355 * @hdr_len: size of the packet header
3356 * @td_cmd: the command field in the descriptor
3357 * @td_offset: offset for checksum or crc
3359 * Returns 0 on success, -1 on failure to DMA
3361 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3362 struct i40e_tx_buffer *first, u32 tx_flags,
3363 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3365 unsigned int data_len = skb->data_len;
3366 unsigned int size = skb_headlen(skb);
3368 struct i40e_tx_buffer *tx_bi;
3369 struct i40e_tx_desc *tx_desc;
3370 u16 i = tx_ring->next_to_use;
3375 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3376 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3377 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3378 I40E_TX_FLAGS_VLAN_SHIFT;
3381 first->tx_flags = tx_flags;
3383 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3385 tx_desc = I40E_TX_DESC(tx_ring, i);
3388 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3389 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3391 if (dma_mapping_error(tx_ring->dev, dma))
3394 /* record length, and DMA address */
3395 dma_unmap_len_set(tx_bi, len, size);
3396 dma_unmap_addr_set(tx_bi, dma, dma);
3398 /* align size to end of page */
3399 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3400 tx_desc->buffer_addr = cpu_to_le64(dma);
3402 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3403 tx_desc->cmd_type_offset_bsz =
3404 build_ctob(td_cmd, td_offset,
3411 if (i == tx_ring->count) {
3412 tx_desc = I40E_TX_DESC(tx_ring, 0);
3419 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3420 tx_desc->buffer_addr = cpu_to_le64(dma);
3423 if (likely(!data_len))
3426 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3433 if (i == tx_ring->count) {
3434 tx_desc = I40E_TX_DESC(tx_ring, 0);
3438 size = skb_frag_size(frag);
3441 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3444 tx_bi = &tx_ring->tx_bi[i];
3447 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3450 if (i == tx_ring->count)
3453 tx_ring->next_to_use = i;
3455 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3457 /* write last descriptor with EOP bit */
3458 td_cmd |= I40E_TX_DESC_CMD_EOP;
3460 /* We OR these values together to check both against 4 (WB_STRIDE)
3461 * below. This is safe since we don't re-use desc_count afterwards.
3463 desc_count |= ++tx_ring->packet_stride;
3465 if (desc_count >= WB_STRIDE) {
3466 /* write last descriptor with RS bit set */
3467 td_cmd |= I40E_TX_DESC_CMD_RS;
3468 tx_ring->packet_stride = 0;
3471 tx_desc->cmd_type_offset_bsz =
3472 build_ctob(td_cmd, td_offset, size, td_tag);
3474 skb_tx_timestamp(skb);
3476 /* Force memory writes to complete before letting h/w know there
3477 * are new descriptors to fetch.
3479 * We also use this memory barrier to make certain all of the
3480 * status bits have been updated before next_to_watch is written.
3484 /* set next_to_watch value indicating a packet is present */
3485 first->next_to_watch = tx_desc;
3487 /* notify HW of packet */
3488 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
3489 writel(i, tx_ring->tail);
3495 dev_info(tx_ring->dev, "TX DMA map failed\n");
3497 /* clear dma mappings for failed tx_bi map */
3499 tx_bi = &tx_ring->tx_bi[i];
3500 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3508 tx_ring->next_to_use = i;
3514 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3515 * @xdp: data to transmit
3516 * @xdp_ring: XDP Tx ring
3518 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3519 struct i40e_ring *xdp_ring)
3521 u16 i = xdp_ring->next_to_use;
3522 struct i40e_tx_buffer *tx_bi;
3523 struct i40e_tx_desc *tx_desc;
3524 void *data = xdpf->data;
3525 u32 size = xdpf->len;
3528 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3529 xdp_ring->tx_stats.tx_busy++;
3530 return I40E_XDP_CONSUMED;
3532 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
3533 if (dma_mapping_error(xdp_ring->dev, dma))
3534 return I40E_XDP_CONSUMED;
3536 tx_bi = &xdp_ring->tx_bi[i];
3537 tx_bi->bytecount = size;
3538 tx_bi->gso_segs = 1;
3541 /* record length, and DMA address */
3542 dma_unmap_len_set(tx_bi, len, size);
3543 dma_unmap_addr_set(tx_bi, dma, dma);
3545 tx_desc = I40E_TX_DESC(xdp_ring, i);
3546 tx_desc->buffer_addr = cpu_to_le64(dma);
3547 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3551 /* Make certain all of the status bits have been updated
3552 * before next_to_watch is written.
3556 xdp_ring->xdp_tx_active++;
3558 if (i == xdp_ring->count)
3561 tx_bi->next_to_watch = tx_desc;
3562 xdp_ring->next_to_use = i;
3568 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3570 * @tx_ring: ring to send buffer on
3572 * Returns NETDEV_TX_OK if sent, else an error code
3574 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3575 struct i40e_ring *tx_ring)
3577 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3578 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3579 struct i40e_tx_buffer *first;
3588 /* prefetch the data, we'll need it later */
3589 prefetch(skb->data);
3591 i40e_trace(xmit_frame_ring, skb, tx_ring);
3593 count = i40e_xmit_descriptor_count(skb);
3594 if (i40e_chk_linearize(skb, count)) {
3595 if (__skb_linearize(skb)) {
3596 dev_kfree_skb_any(skb);
3597 return NETDEV_TX_OK;
3599 count = i40e_txd_use_count(skb->len);
3600 tx_ring->tx_stats.tx_linearize++;
3603 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3604 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3605 * + 4 desc gap to avoid the cache line where head is,
3606 * + 1 desc for context descriptor,
3607 * otherwise try next time
3609 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3610 tx_ring->tx_stats.tx_busy++;
3611 return NETDEV_TX_BUSY;
3614 /* record the location of the first descriptor for this packet */
3615 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3617 first->bytecount = skb->len;
3618 first->gso_segs = 1;
3620 /* prepare the xmit flags */
3621 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3624 /* obtain protocol of skb */
3625 protocol = vlan_get_protocol(skb);
3627 /* setup IPv4/IPv6 offloads */
3628 if (protocol == htons(ETH_P_IP))
3629 tx_flags |= I40E_TX_FLAGS_IPV4;
3630 else if (protocol == htons(ETH_P_IPV6))
3631 tx_flags |= I40E_TX_FLAGS_IPV6;
3633 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3638 tx_flags |= I40E_TX_FLAGS_TSO;
3640 /* Always offload the checksum, since it's in the data descriptor */
3641 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3642 tx_ring, &cd_tunneling);
3646 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3649 tx_flags |= I40E_TX_FLAGS_TSYN;
3651 /* always enable CRC insertion offload */
3652 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3654 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3655 cd_tunneling, cd_l2tag2);
3657 /* Add Flow Director ATR if it's enabled.
3659 * NOTE: this must always be directly before the data descriptor.
3661 i40e_atr(tx_ring, skb, tx_flags);
3663 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3665 goto cleanup_tx_tstamp;
3667 return NETDEV_TX_OK;
3670 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3671 dev_kfree_skb_any(first->skb);
3674 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3675 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3677 dev_kfree_skb_any(pf->ptp_tx_skb);
3678 pf->ptp_tx_skb = NULL;
3679 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3682 return NETDEV_TX_OK;
3686 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3688 * @netdev: network interface device structure
3690 * Returns NETDEV_TX_OK if sent, else an error code
3692 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3694 struct i40e_netdev_priv *np = netdev_priv(netdev);
3695 struct i40e_vsi *vsi = np->vsi;
3696 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3698 /* hardware can't handle really short frames, hardware padding works
3701 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3702 return NETDEV_TX_OK;
3704 return i40e_xmit_frame_ring(skb, tx_ring);
3708 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3712 * Returns number of frames successfully sent. Frames that fail are
3713 * free'ed via XDP return API.
3715 * For error cases, a negative errno code is returned and no-frames
3716 * are transmitted (caller must handle freeing frames).
3718 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3721 struct i40e_netdev_priv *np = netdev_priv(dev);
3722 unsigned int queue_index = smp_processor_id();
3723 struct i40e_vsi *vsi = np->vsi;
3724 struct i40e_pf *pf = vsi->back;
3725 struct i40e_ring *xdp_ring;
3729 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3732 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs ||
3733 test_bit(__I40E_CONFIG_BUSY, pf->state))
3736 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3739 xdp_ring = vsi->xdp_rings[queue_index];
3741 for (i = 0; i < n; i++) {
3742 struct xdp_frame *xdpf = frames[i];
3745 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3746 if (err != I40E_XDP_TX) {
3747 xdp_return_frame_rx_napi(xdpf);
3752 if (unlikely(flags & XDP_XMIT_FLUSH))
3753 i40e_xdp_ring_update_tail(xdp_ring);