1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/prefetch.h>
5 #include <net/busy_poll.h>
6 #include <linux/bpf_trace.h>
9 #include "i40e_trace.h"
10 #include "i40e_prototype.h"
12 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
15 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
16 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
17 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
18 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
19 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
22 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
24 * i40e_fdir - Generate a Flow Director descriptor based on fdata
25 * @tx_ring: Tx ring to send buffer on
26 * @fdata: Flow director filter data
27 * @add: Indicate if we are adding a rule or deleting one
30 static void i40e_fdir(struct i40e_ring *tx_ring,
31 struct i40e_fdir_filter *fdata, bool add)
33 struct i40e_filter_program_desc *fdir_desc;
34 struct i40e_pf *pf = tx_ring->vsi->back;
35 u32 flex_ptype, dtype_cmd;
38 /* grab the next descriptor */
39 i = tx_ring->next_to_use;
40 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
43 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
45 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
46 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
48 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
49 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
51 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
52 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
54 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
55 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
57 /* Use LAN VSI Id if not programmed by user */
58 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
59 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
60 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
62 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
65 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
66 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
67 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
68 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
70 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
71 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
73 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
74 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
76 if (fdata->cnt_index) {
77 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
78 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
79 ((u32)fdata->cnt_index <<
80 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
83 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
84 fdir_desc->rsvd = cpu_to_le32(0);
85 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
86 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
89 #define I40E_FD_CLEAN_DELAY 10
91 * i40e_program_fdir_filter - Program a Flow Director filter
92 * @fdir_data: Packet data that will be filter parameters
93 * @raw_packet: the pre-allocated packet buffer for FDir
95 * @add: True for add/update, False for remove
97 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
98 u8 *raw_packet, struct i40e_pf *pf,
101 struct i40e_tx_buffer *tx_buf, *first;
102 struct i40e_tx_desc *tx_desc;
103 struct i40e_ring *tx_ring;
104 struct i40e_vsi *vsi;
110 /* find existing FDIR VSI */
111 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
115 tx_ring = vsi->tx_rings[0];
118 /* we need two descriptors to add/del a filter and we can wait */
119 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
122 msleep_interruptible(1);
125 dma = dma_map_single(dev, raw_packet,
126 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
127 if (dma_mapping_error(dev, dma))
130 /* grab the next descriptor */
131 i = tx_ring->next_to_use;
132 first = &tx_ring->tx_bi[i];
133 i40e_fdir(tx_ring, fdir_data, add);
135 /* Now program a dummy descriptor */
136 i = tx_ring->next_to_use;
137 tx_desc = I40E_TX_DESC(tx_ring, i);
138 tx_buf = &tx_ring->tx_bi[i];
140 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
142 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
144 /* record length, and DMA address */
145 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
146 dma_unmap_addr_set(tx_buf, dma, dma);
148 tx_desc->buffer_addr = cpu_to_le64(dma);
149 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
151 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
152 tx_buf->raw_buf = (void *)raw_packet;
154 tx_desc->cmd_type_offset_bsz =
155 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
157 /* Force memory writes to complete before letting h/w
158 * know there are new descriptors to fetch.
162 /* Mark the data descriptor to be watched */
163 first->next_to_watch = tx_desc;
165 writel(tx_ring->next_to_use, tx_ring->tail);
172 #define IP_HEADER_OFFSET 14
173 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
175 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
176 * @vsi: pointer to the targeted VSI
177 * @fd_data: the flow director data required for the FDir descriptor
178 * @add: true adds a filter, false removes it
180 * Returns 0 if the filters were successfully added or removed
182 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
183 struct i40e_fdir_filter *fd_data,
186 struct i40e_pf *pf = vsi->back;
191 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
192 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
193 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
195 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
198 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
200 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
201 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
202 + sizeof(struct iphdr));
204 ip->daddr = fd_data->dst_ip;
205 udp->dest = fd_data->dst_port;
206 ip->saddr = fd_data->src_ip;
207 udp->source = fd_data->src_port;
209 if (fd_data->flex_filter) {
210 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
211 __be16 pattern = fd_data->flex_word;
212 u16 off = fd_data->flex_offset;
214 *((__force __be16 *)(payload + off)) = pattern;
217 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
218 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
220 dev_info(&pf->pdev->dev,
221 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
222 fd_data->pctype, fd_data->fd_id, ret);
223 /* Free the packet buffer since it wasn't added to the ring */
226 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
228 dev_info(&pf->pdev->dev,
229 "Filter OK for PCTYPE %d loc = %d\n",
230 fd_data->pctype, fd_data->fd_id);
232 dev_info(&pf->pdev->dev,
233 "Filter deleted for PCTYPE %d loc = %d\n",
234 fd_data->pctype, fd_data->fd_id);
238 pf->fd_udp4_filter_cnt++;
240 pf->fd_udp4_filter_cnt--;
245 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
247 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
248 * @vsi: pointer to the targeted VSI
249 * @fd_data: the flow director data required for the FDir descriptor
250 * @add: true adds a filter, false removes it
252 * Returns 0 if the filters were successfully added or removed
254 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
255 struct i40e_fdir_filter *fd_data,
258 struct i40e_pf *pf = vsi->back;
264 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
265 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
266 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
267 0x0, 0x72, 0, 0, 0, 0};
269 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
272 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
274 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
275 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
276 + sizeof(struct iphdr));
278 ip->daddr = fd_data->dst_ip;
279 tcp->dest = fd_data->dst_port;
280 ip->saddr = fd_data->src_ip;
281 tcp->source = fd_data->src_port;
283 if (fd_data->flex_filter) {
284 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
285 __be16 pattern = fd_data->flex_word;
286 u16 off = fd_data->flex_offset;
288 *((__force __be16 *)(payload + off)) = pattern;
291 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
292 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
294 dev_info(&pf->pdev->dev,
295 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
296 fd_data->pctype, fd_data->fd_id, ret);
297 /* Free the packet buffer since it wasn't added to the ring */
300 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
302 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
303 fd_data->pctype, fd_data->fd_id);
305 dev_info(&pf->pdev->dev,
306 "Filter deleted for PCTYPE %d loc = %d\n",
307 fd_data->pctype, fd_data->fd_id);
311 pf->fd_tcp4_filter_cnt++;
312 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
313 I40E_DEBUG_FD & pf->hw.debug_mask)
314 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
315 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
317 pf->fd_tcp4_filter_cnt--;
323 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
325 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
326 * a specific flow spec
327 * @vsi: pointer to the targeted VSI
328 * @fd_data: the flow director data required for the FDir descriptor
329 * @add: true adds a filter, false removes it
331 * Returns 0 if the filters were successfully added or removed
333 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
334 struct i40e_fdir_filter *fd_data,
337 struct i40e_pf *pf = vsi->back;
338 struct sctphdr *sctp;
343 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
344 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
345 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
347 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
350 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
352 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
353 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
354 + sizeof(struct iphdr));
356 ip->daddr = fd_data->dst_ip;
357 sctp->dest = fd_data->dst_port;
358 ip->saddr = fd_data->src_ip;
359 sctp->source = fd_data->src_port;
361 if (fd_data->flex_filter) {
362 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
363 __be16 pattern = fd_data->flex_word;
364 u16 off = fd_data->flex_offset;
366 *((__force __be16 *)(payload + off)) = pattern;
369 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
370 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
372 dev_info(&pf->pdev->dev,
373 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
374 fd_data->pctype, fd_data->fd_id, ret);
375 /* Free the packet buffer since it wasn't added to the ring */
378 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
380 dev_info(&pf->pdev->dev,
381 "Filter OK for PCTYPE %d loc = %d\n",
382 fd_data->pctype, fd_data->fd_id);
384 dev_info(&pf->pdev->dev,
385 "Filter deleted for PCTYPE %d loc = %d\n",
386 fd_data->pctype, fd_data->fd_id);
390 pf->fd_sctp4_filter_cnt++;
392 pf->fd_sctp4_filter_cnt--;
397 #define I40E_IP_DUMMY_PACKET_LEN 34
399 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
400 * a specific flow spec
401 * @vsi: pointer to the targeted VSI
402 * @fd_data: the flow director data required for the FDir descriptor
403 * @add: true adds a filter, false removes it
405 * Returns 0 if the filters were successfully added or removed
407 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
408 struct i40e_fdir_filter *fd_data,
411 struct i40e_pf *pf = vsi->back;
416 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
417 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
420 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
421 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
422 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
425 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
426 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
428 ip->saddr = fd_data->src_ip;
429 ip->daddr = fd_data->dst_ip;
432 if (fd_data->flex_filter) {
433 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
434 __be16 pattern = fd_data->flex_word;
435 u16 off = fd_data->flex_offset;
437 *((__force __be16 *)(payload + off)) = pattern;
441 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
443 dev_info(&pf->pdev->dev,
444 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
445 fd_data->pctype, fd_data->fd_id, ret);
446 /* The packet buffer wasn't added to the ring so we
447 * need to free it now.
451 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
453 dev_info(&pf->pdev->dev,
454 "Filter OK for PCTYPE %d loc = %d\n",
455 fd_data->pctype, fd_data->fd_id);
457 dev_info(&pf->pdev->dev,
458 "Filter deleted for PCTYPE %d loc = %d\n",
459 fd_data->pctype, fd_data->fd_id);
464 pf->fd_ip4_filter_cnt++;
466 pf->fd_ip4_filter_cnt--;
472 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
473 * @vsi: pointer to the targeted VSI
474 * @input: filter to add or delete
475 * @add: true adds a filter, false removes it
478 int i40e_add_del_fdir(struct i40e_vsi *vsi,
479 struct i40e_fdir_filter *input, bool add)
481 struct i40e_pf *pf = vsi->back;
484 switch (input->flow_type & ~FLOW_EXT) {
486 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
489 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
492 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
495 switch (input->ip4_proto) {
497 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
500 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
503 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
506 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
509 /* We cannot support masking based on protocol */
510 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
516 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
521 /* The buffer allocated here will be normally be freed by
522 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
523 * completion. In the event of an error adding the buffer to the FDIR
524 * ring, it will immediately be freed. It may also be freed by
525 * i40e_clean_tx_ring() when closing the VSI.
531 * i40e_fd_handle_status - check the Programming Status for FD
532 * @rx_ring: the Rx ring for this descriptor
533 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
534 * @prog_id: the id originally used for programming
536 * This is used to verify if the FD programming or invalidation
537 * requested by SW to the HW is successful or not and take actions accordingly.
539 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
540 union i40e_rx_desc *rx_desc, u8 prog_id)
542 struct i40e_pf *pf = rx_ring->vsi->back;
543 struct pci_dev *pdev = pf->pdev;
544 u32 fcnt_prog, fcnt_avail;
548 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
549 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
550 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
552 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
553 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
554 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
555 (I40E_DEBUG_FD & pf->hw.debug_mask))
556 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
559 /* Check if the programming error is for ATR.
560 * If so, auto disable ATR and set a state for
561 * flush in progress. Next time we come here if flush is in
562 * progress do nothing, once flush is complete the state will
565 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
569 /* store the current atr filter count */
570 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
572 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
573 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) {
574 /* These set_bit() calls aren't atomic with the
575 * test_bit() here, but worse case we potentially
576 * disable ATR and queue a flush right after SB
577 * support is re-enabled. That shouldn't cause an
580 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
581 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
584 /* filter programming failed most likely due to table full */
585 fcnt_prog = i40e_get_global_fd_count(pf);
586 fcnt_avail = pf->fdir_pf_filter_count;
587 /* If ATR is running fcnt_prog can quickly change,
588 * if we are very close to full, it makes sense to disable
589 * FD ATR/SB and then re-enable it when there is room.
591 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
592 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
593 !test_and_set_bit(__I40E_FD_SB_AUTO_DISABLED,
595 if (I40E_DEBUG_FD & pf->hw.debug_mask)
596 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
598 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
599 if (I40E_DEBUG_FD & pf->hw.debug_mask)
600 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
601 rx_desc->wb.qword0.hi_dword.fd_id);
606 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
607 * @ring: the ring that owns the buffer
608 * @tx_buffer: the buffer to free
610 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
611 struct i40e_tx_buffer *tx_buffer)
613 if (tx_buffer->skb) {
614 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
615 kfree(tx_buffer->raw_buf);
616 else if (ring_is_xdp(ring))
617 xdp_return_frame(tx_buffer->xdpf);
619 dev_kfree_skb_any(tx_buffer->skb);
620 if (dma_unmap_len(tx_buffer, len))
621 dma_unmap_single(ring->dev,
622 dma_unmap_addr(tx_buffer, dma),
623 dma_unmap_len(tx_buffer, len),
625 } else if (dma_unmap_len(tx_buffer, len)) {
626 dma_unmap_page(ring->dev,
627 dma_unmap_addr(tx_buffer, dma),
628 dma_unmap_len(tx_buffer, len),
632 tx_buffer->next_to_watch = NULL;
633 tx_buffer->skb = NULL;
634 dma_unmap_len_set(tx_buffer, len, 0);
635 /* tx_buffer must be completely set up in the transmit path */
639 * i40e_clean_tx_ring - Free any empty Tx buffers
640 * @tx_ring: ring to be cleaned
642 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
644 unsigned long bi_size;
647 /* ring already cleared, nothing to do */
651 /* Free all the Tx ring sk_buffs */
652 for (i = 0; i < tx_ring->count; i++)
653 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
655 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
656 memset(tx_ring->tx_bi, 0, bi_size);
658 /* Zero out the descriptor ring */
659 memset(tx_ring->desc, 0, tx_ring->size);
661 tx_ring->next_to_use = 0;
662 tx_ring->next_to_clean = 0;
664 if (!tx_ring->netdev)
667 /* cleanup Tx queue statistics */
668 netdev_tx_reset_queue(txring_txq(tx_ring));
672 * i40e_free_tx_resources - Free Tx resources per queue
673 * @tx_ring: Tx descriptor ring for a specific queue
675 * Free all transmit software resources
677 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
679 i40e_clean_tx_ring(tx_ring);
680 kfree(tx_ring->tx_bi);
681 tx_ring->tx_bi = NULL;
684 dma_free_coherent(tx_ring->dev, tx_ring->size,
685 tx_ring->desc, tx_ring->dma);
686 tx_ring->desc = NULL;
691 * i40e_get_tx_pending - how many tx descriptors not processed
692 * @ring: the ring of descriptors
693 * @in_sw: use SW variables
695 * Since there is no access to the ring head register
696 * in XL710, we need to use our local copies
698 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
703 head = i40e_get_head(ring);
704 tail = readl(ring->tail);
706 head = ring->next_to_clean;
707 tail = ring->next_to_use;
711 return (head < tail) ?
712 tail - head : (tail + ring->count - head);
718 * i40e_detect_recover_hung - Function to detect and recover hung_queues
719 * @vsi: pointer to vsi struct with tx queues
721 * VSI has netdev and netdev has TX queues. This function is to check each of
722 * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
724 void i40e_detect_recover_hung(struct i40e_vsi *vsi)
726 struct i40e_ring *tx_ring = NULL;
727 struct net_device *netdev;
734 if (test_bit(__I40E_VSI_DOWN, vsi->state))
737 netdev = vsi->netdev;
741 if (!netif_carrier_ok(netdev))
744 for (i = 0; i < vsi->num_queue_pairs; i++) {
745 tx_ring = vsi->tx_rings[i];
746 if (tx_ring && tx_ring->desc) {
747 /* If packet counter has not changed the queue is
748 * likely stalled, so force an interrupt for this
751 * prev_pkt_ctr would be negative if there was no
754 packets = tx_ring->stats.packets & INT_MAX;
755 if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
756 i40e_force_wb(vsi, tx_ring->q_vector);
760 /* Memory barrier between read of packet count and call
761 * to i40e_get_tx_pending()
764 tx_ring->tx_stats.prev_pkt_ctr =
765 i40e_get_tx_pending(tx_ring, true) ? packets : -1;
773 * i40e_clean_tx_irq - Reclaim resources after transmit completes
774 * @vsi: the VSI we care about
775 * @tx_ring: Tx ring to clean
776 * @napi_budget: Used to determine if we are in netpoll
778 * Returns true if there's any budget left (e.g. the clean is finished)
780 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
781 struct i40e_ring *tx_ring, int napi_budget)
783 u16 i = tx_ring->next_to_clean;
784 struct i40e_tx_buffer *tx_buf;
785 struct i40e_tx_desc *tx_head;
786 struct i40e_tx_desc *tx_desc;
787 unsigned int total_bytes = 0, total_packets = 0;
788 unsigned int budget = vsi->work_limit;
790 tx_buf = &tx_ring->tx_bi[i];
791 tx_desc = I40E_TX_DESC(tx_ring, i);
794 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
797 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
799 /* if next_to_watch is not set then there is no work pending */
803 /* prevent any other reads prior to eop_desc */
806 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
807 /* we have caught up to head, no work left to do */
808 if (tx_head == tx_desc)
811 /* clear next_to_watch to prevent false hangs */
812 tx_buf->next_to_watch = NULL;
814 /* update the statistics for this packet */
815 total_bytes += tx_buf->bytecount;
816 total_packets += tx_buf->gso_segs;
818 /* free the skb/XDP data */
819 if (ring_is_xdp(tx_ring))
820 xdp_return_frame(tx_buf->xdpf);
822 napi_consume_skb(tx_buf->skb, napi_budget);
824 /* unmap skb header data */
825 dma_unmap_single(tx_ring->dev,
826 dma_unmap_addr(tx_buf, dma),
827 dma_unmap_len(tx_buf, len),
830 /* clear tx_buffer data */
832 dma_unmap_len_set(tx_buf, len, 0);
834 /* unmap remaining buffers */
835 while (tx_desc != eop_desc) {
836 i40e_trace(clean_tx_irq_unmap,
837 tx_ring, tx_desc, tx_buf);
844 tx_buf = tx_ring->tx_bi;
845 tx_desc = I40E_TX_DESC(tx_ring, 0);
848 /* unmap any remaining paged data */
849 if (dma_unmap_len(tx_buf, len)) {
850 dma_unmap_page(tx_ring->dev,
851 dma_unmap_addr(tx_buf, dma),
852 dma_unmap_len(tx_buf, len),
854 dma_unmap_len_set(tx_buf, len, 0);
858 /* move us one more past the eop_desc for start of next pkt */
864 tx_buf = tx_ring->tx_bi;
865 tx_desc = I40E_TX_DESC(tx_ring, 0);
870 /* update budget accounting */
872 } while (likely(budget));
875 tx_ring->next_to_clean = i;
876 u64_stats_update_begin(&tx_ring->syncp);
877 tx_ring->stats.bytes += total_bytes;
878 tx_ring->stats.packets += total_packets;
879 u64_stats_update_end(&tx_ring->syncp);
880 tx_ring->q_vector->tx.total_bytes += total_bytes;
881 tx_ring->q_vector->tx.total_packets += total_packets;
883 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
884 /* check to see if there are < 4 descriptors
885 * waiting to be written back, then kick the hardware to force
886 * them to be written back in case we stay in NAPI.
887 * In this mode on X722 we do not enable Interrupt.
889 unsigned int j = i40e_get_tx_pending(tx_ring, false);
892 ((j / WB_STRIDE) == 0) && (j > 0) &&
893 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
894 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
895 tx_ring->arm_wb = true;
898 if (ring_is_xdp(tx_ring))
901 /* notify netdev of completed buffers */
902 netdev_tx_completed_queue(txring_txq(tx_ring),
903 total_packets, total_bytes);
905 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
906 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
907 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
908 /* Make sure that anybody stopping the queue after this
909 * sees the new next_to_clean.
912 if (__netif_subqueue_stopped(tx_ring->netdev,
913 tx_ring->queue_index) &&
914 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
915 netif_wake_subqueue(tx_ring->netdev,
916 tx_ring->queue_index);
917 ++tx_ring->tx_stats.restart_queue;
925 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
926 * @vsi: the VSI we care about
927 * @q_vector: the vector on which to enable writeback
930 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
931 struct i40e_q_vector *q_vector)
933 u16 flags = q_vector->tx.ring[0].flags;
936 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
939 if (q_vector->arm_wb_state)
942 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
943 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
944 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
947 I40E_PFINT_DYN_CTLN(q_vector->reg_idx),
950 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
951 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
953 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
955 q_vector->arm_wb_state = true;
959 * i40e_force_wb - Issue SW Interrupt so HW does a wb
960 * @vsi: the VSI we care about
961 * @q_vector: the vector on which to force writeback
964 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
966 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
967 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
968 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
969 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
970 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
971 /* allow 00 to be written to the index */
974 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val);
976 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
977 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
978 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
979 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
980 /* allow 00 to be written to the index */
982 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
986 static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
987 struct i40e_ring_container *rc)
989 return &q_vector->rx == rc;
992 static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
994 unsigned int divisor;
996 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
997 case I40E_LINK_SPEED_40GB:
998 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
1000 case I40E_LINK_SPEED_25GB:
1001 case I40E_LINK_SPEED_20GB:
1002 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
1005 case I40E_LINK_SPEED_10GB:
1006 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
1008 case I40E_LINK_SPEED_1GB:
1009 case I40E_LINK_SPEED_100MB:
1010 divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
1018 * i40e_update_itr - update the dynamic ITR value based on statistics
1019 * @q_vector: structure containing interrupt and ring information
1020 * @rc: structure containing ring performance data
1022 * Stores a new ITR value based on packets and byte
1023 * counts during the last interrupt. The advantage of per interrupt
1024 * computation is faster updates and more accurate ITR for the current
1025 * traffic pattern. Constants in this function were computed
1026 * based on theoretical maximum wire speed and thresholds were set based
1027 * on testing data as well as attempting to minimize response time
1028 * while increasing bulk throughput.
1030 static void i40e_update_itr(struct i40e_q_vector *q_vector,
1031 struct i40e_ring_container *rc)
1033 unsigned int avg_wire_size, packets, bytes, itr;
1034 unsigned long next_update = jiffies;
1036 /* If we don't have any rings just leave ourselves set for maximum
1037 * possible latency so we take ourselves out of the equation.
1039 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
1042 /* For Rx we want to push the delay up and default to low latency.
1043 * for Tx we want to pull the delay down and default to high latency.
1045 itr = i40e_container_is_rx(q_vector, rc) ?
1046 I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
1047 I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
1049 /* If we didn't update within up to 1 - 2 jiffies we can assume
1050 * that either packets are coming in so slow there hasn't been
1051 * any work, or that there is so much work that NAPI is dealing
1052 * with interrupt moderation and we don't need to do anything.
1054 if (time_after(next_update, rc->next_update))
1057 /* If itr_countdown is set it means we programmed an ITR within
1058 * the last 4 interrupt cycles. This has a side effect of us
1059 * potentially firing an early interrupt. In order to work around
1060 * this we need to throw out any data received for a few
1061 * interrupts following the update.
1063 if (q_vector->itr_countdown) {
1064 itr = rc->target_itr;
1068 packets = rc->total_packets;
1069 bytes = rc->total_bytes;
1071 if (i40e_container_is_rx(q_vector, rc)) {
1072 /* If Rx there are 1 to 4 packets and bytes are less than
1073 * 9000 assume insufficient data to use bulk rate limiting
1074 * approach unless Tx is already in bulk rate limiting. We
1075 * are likely latency driven.
1077 if (packets && packets < 4 && bytes < 9000 &&
1078 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
1079 itr = I40E_ITR_ADAPTIVE_LATENCY;
1080 goto adjust_by_size;
1082 } else if (packets < 4) {
1083 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1084 * bulk mode and we are receiving 4 or fewer packets just
1085 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1086 * that the Rx can relax.
1088 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
1089 (q_vector->rx.target_itr & I40E_ITR_MASK) ==
1090 I40E_ITR_ADAPTIVE_MAX_USECS)
1092 } else if (packets > 32) {
1093 /* If we have processed over 32 packets in a single interrupt
1094 * for Tx assume we need to switch over to "bulk" mode.
1096 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
1099 /* We have no packets to actually measure against. This means
1100 * either one of the other queues on this vector is active or
1101 * we are a Tx queue doing TSO with too high of an interrupt rate.
1103 * Between 4 and 56 we can assume that our current interrupt delay
1104 * is only slightly too low. As such we should increase it by a small
1108 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
1109 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1110 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1111 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1116 if (packets <= 256) {
1117 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1118 itr &= I40E_ITR_MASK;
1120 /* Between 56 and 112 is our "goldilocks" zone where we are
1121 * working out "just right". Just report that our current
1122 * ITR is good for us.
1127 /* If packet count is 128 or greater we are likely looking
1128 * at a slight overrun of the delay we want. Try halving
1129 * our delay to see if that will cut the number of packets
1130 * in half per interrupt.
1133 itr &= I40E_ITR_MASK;
1134 if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
1135 itr = I40E_ITR_ADAPTIVE_MIN_USECS;
1140 /* The paths below assume we are dealing with a bulk ITR since
1141 * number of packets is greater than 256. We are just going to have
1142 * to compute a value and try to bring the count under control,
1143 * though for smaller packet sizes there isn't much we can do as
1144 * NAPI polling will likely be kicking in sooner rather than later.
1146 itr = I40E_ITR_ADAPTIVE_BULK;
1149 /* If packet counts are 256 or greater we can assume we have a gross
1150 * overestimation of what the rate should be. Instead of trying to fine
1151 * tune it just use the formula below to try and dial in an exact value
1152 * give the current packet size of the frame.
1154 avg_wire_size = bytes / packets;
1156 /* The following is a crude approximation of:
1157 * wmem_default / (size + overhead) = desired_pkts_per_int
1158 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1159 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1161 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1162 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1165 * (170 * (size + 24)) / (size + 640) = ITR
1167 * We first do some math on the packet size and then finally bitshift
1168 * by 8 after rounding up. We also have to account for PCIe link speed
1169 * difference as ITR scales based on this.
1171 if (avg_wire_size <= 60) {
1172 /* Start at 250k ints/sec */
1173 avg_wire_size = 4096;
1174 } else if (avg_wire_size <= 380) {
1175 /* 250K ints/sec to 60K ints/sec */
1176 avg_wire_size *= 40;
1177 avg_wire_size += 1696;
1178 } else if (avg_wire_size <= 1084) {
1179 /* 60K ints/sec to 36K ints/sec */
1180 avg_wire_size *= 15;
1181 avg_wire_size += 11452;
1182 } else if (avg_wire_size <= 1980) {
1183 /* 36K ints/sec to 30K ints/sec */
1185 avg_wire_size += 22420;
1187 /* plateau at a limit of 30K ints/sec */
1188 avg_wire_size = 32256;
1191 /* If we are in low latency mode halve our delay which doubles the
1192 * rate to somewhere between 100K to 16K ints/sec
1194 if (itr & I40E_ITR_ADAPTIVE_LATENCY)
1197 /* Resultant value is 256 times larger than it needs to be. This
1198 * gives us room to adjust the value as needed to either increase
1199 * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
1201 * Use addition as we have already recorded the new latency flag
1202 * for the ITR value.
1204 itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
1205 I40E_ITR_ADAPTIVE_MIN_INC;
1207 if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
1208 itr &= I40E_ITR_ADAPTIVE_LATENCY;
1209 itr += I40E_ITR_ADAPTIVE_MAX_USECS;
1213 /* write back value */
1214 rc->target_itr = itr;
1216 /* next update should occur within next jiffy */
1217 rc->next_update = next_update + 1;
1219 rc->total_bytes = 0;
1220 rc->total_packets = 0;
1224 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1225 * @rx_ring: rx descriptor ring to store buffers on
1226 * @old_buff: donor buffer to have page reused
1228 * Synchronizes page for reuse by the adapter
1230 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1231 struct i40e_rx_buffer *old_buff)
1233 struct i40e_rx_buffer *new_buff;
1234 u16 nta = rx_ring->next_to_alloc;
1236 new_buff = &rx_ring->rx_bi[nta];
1238 /* update, and store next to alloc */
1240 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1242 /* transfer page from old buffer to new buffer */
1243 new_buff->dma = old_buff->dma;
1244 new_buff->page = old_buff->page;
1245 new_buff->page_offset = old_buff->page_offset;
1246 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1250 * i40e_rx_is_programming_status - check for programming status descriptor
1251 * @qw: qword representing status_error_len in CPU ordering
1253 * The value of in the descriptor length field indicate if this
1254 * is a programming status descriptor for flow director or FCoE
1255 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1256 * it is a packet descriptor.
1258 static inline bool i40e_rx_is_programming_status(u64 qw)
1260 /* The Rx filter programming status and SPH bit occupy the same
1261 * spot in the descriptor. Since we don't support packet split we
1262 * can just reuse the bit as an indication that this is a
1263 * programming status descriptor.
1265 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1269 * i40e_clean_programming_status - clean the programming status descriptor
1270 * @rx_ring: the rx ring that has this descriptor
1271 * @rx_desc: the rx descriptor written back by HW
1272 * @qw: qword representing status_error_len in CPU ordering
1274 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1275 * status being successful or not and take actions accordingly. FCoE should
1276 * handle its context/filter programming/invalidation status and take actions.
1279 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1280 union i40e_rx_desc *rx_desc,
1283 struct i40e_rx_buffer *rx_buffer;
1284 u32 ntc = rx_ring->next_to_clean;
1287 /* fetch, update, and store next to clean */
1288 rx_buffer = &rx_ring->rx_bi[ntc++];
1289 ntc = (ntc < rx_ring->count) ? ntc : 0;
1290 rx_ring->next_to_clean = ntc;
1292 prefetch(I40E_RX_DESC(rx_ring, ntc));
1294 /* place unused page back on the ring */
1295 i40e_reuse_rx_page(rx_ring, rx_buffer);
1296 rx_ring->rx_stats.page_reuse_count++;
1298 /* clear contents of buffer_info */
1299 rx_buffer->page = NULL;
1301 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1302 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1304 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1305 i40e_fd_handle_status(rx_ring, rx_desc, id);
1309 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1310 * @tx_ring: the tx ring to set up
1312 * Return 0 on success, negative on error
1314 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1316 struct device *dev = tx_ring->dev;
1322 /* warn if we are about to overwrite the pointer */
1323 WARN_ON(tx_ring->tx_bi);
1324 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1325 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1326 if (!tx_ring->tx_bi)
1329 u64_stats_init(&tx_ring->syncp);
1331 /* round up to nearest 4K */
1332 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1333 /* add u32 for head writeback, align after this takes care of
1334 * guaranteeing this is at least one cache line in size
1336 tx_ring->size += sizeof(u32);
1337 tx_ring->size = ALIGN(tx_ring->size, 4096);
1338 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1339 &tx_ring->dma, GFP_KERNEL);
1340 if (!tx_ring->desc) {
1341 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1346 tx_ring->next_to_use = 0;
1347 tx_ring->next_to_clean = 0;
1348 tx_ring->tx_stats.prev_pkt_ctr = -1;
1352 kfree(tx_ring->tx_bi);
1353 tx_ring->tx_bi = NULL;
1358 * i40e_clean_rx_ring - Free Rx buffers
1359 * @rx_ring: ring to be cleaned
1361 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1363 unsigned long bi_size;
1366 /* ring already cleared, nothing to do */
1367 if (!rx_ring->rx_bi)
1371 dev_kfree_skb(rx_ring->skb);
1372 rx_ring->skb = NULL;
1375 /* Free all the Rx ring sk_buffs */
1376 for (i = 0; i < rx_ring->count; i++) {
1377 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1382 /* Invalidate cache lines that may have been written to by
1383 * device so that we avoid corrupting memory.
1385 dma_sync_single_range_for_cpu(rx_ring->dev,
1388 rx_ring->rx_buf_len,
1391 /* free resources associated with mapping */
1392 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1393 i40e_rx_pg_size(rx_ring),
1397 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1400 rx_bi->page_offset = 0;
1403 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1404 memset(rx_ring->rx_bi, 0, bi_size);
1406 /* Zero out the descriptor ring */
1407 memset(rx_ring->desc, 0, rx_ring->size);
1409 rx_ring->next_to_alloc = 0;
1410 rx_ring->next_to_clean = 0;
1411 rx_ring->next_to_use = 0;
1415 * i40e_free_rx_resources - Free Rx resources
1416 * @rx_ring: ring to clean the resources from
1418 * Free all receive software resources
1420 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1422 i40e_clean_rx_ring(rx_ring);
1423 if (rx_ring->vsi->type == I40E_VSI_MAIN)
1424 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
1425 rx_ring->xdp_prog = NULL;
1426 kfree(rx_ring->rx_bi);
1427 rx_ring->rx_bi = NULL;
1429 if (rx_ring->desc) {
1430 dma_free_coherent(rx_ring->dev, rx_ring->size,
1431 rx_ring->desc, rx_ring->dma);
1432 rx_ring->desc = NULL;
1437 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1438 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1440 * Returns 0 on success, negative on failure
1442 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1444 struct device *dev = rx_ring->dev;
1448 /* warn if we are about to overwrite the pointer */
1449 WARN_ON(rx_ring->rx_bi);
1450 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1451 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1452 if (!rx_ring->rx_bi)
1455 u64_stats_init(&rx_ring->syncp);
1457 /* Round up to nearest 4K */
1458 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1459 rx_ring->size = ALIGN(rx_ring->size, 4096);
1460 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1461 &rx_ring->dma, GFP_KERNEL);
1463 if (!rx_ring->desc) {
1464 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1469 rx_ring->next_to_alloc = 0;
1470 rx_ring->next_to_clean = 0;
1471 rx_ring->next_to_use = 0;
1473 /* XDP RX-queue info only needed for RX rings exposed to XDP */
1474 if (rx_ring->vsi->type == I40E_VSI_MAIN) {
1475 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
1476 rx_ring->queue_index);
1481 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1485 kfree(rx_ring->rx_bi);
1486 rx_ring->rx_bi = NULL;
1491 * i40e_release_rx_desc - Store the new tail and head values
1492 * @rx_ring: ring to bump
1493 * @val: new head index
1495 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1497 rx_ring->next_to_use = val;
1499 /* update next to alloc since we have filled the ring */
1500 rx_ring->next_to_alloc = val;
1502 /* Force memory writes to complete before letting h/w
1503 * know there are new descriptors to fetch. (Only
1504 * applicable for weak-ordered memory model archs,
1508 writel(val, rx_ring->tail);
1512 * i40e_rx_offset - Return expected offset into page to access data
1513 * @rx_ring: Ring we are requesting offset of
1515 * Returns the offset value for ring into the data buffer.
1517 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1519 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1523 * i40e_alloc_mapped_page - recycle or make a new page
1524 * @rx_ring: ring to use
1525 * @bi: rx_buffer struct to modify
1527 * Returns true if the page was successfully allocated or
1530 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1531 struct i40e_rx_buffer *bi)
1533 struct page *page = bi->page;
1536 /* since we are recycling buffers we should seldom need to alloc */
1538 rx_ring->rx_stats.page_reuse_count++;
1542 /* alloc new page for storage */
1543 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1544 if (unlikely(!page)) {
1545 rx_ring->rx_stats.alloc_page_failed++;
1549 /* map page for use */
1550 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1551 i40e_rx_pg_size(rx_ring),
1555 /* if mapping failed free memory back to system since
1556 * there isn't much point in holding memory we can't use
1558 if (dma_mapping_error(rx_ring->dev, dma)) {
1559 __free_pages(page, i40e_rx_pg_order(rx_ring));
1560 rx_ring->rx_stats.alloc_page_failed++;
1566 bi->page_offset = i40e_rx_offset(rx_ring);
1567 page_ref_add(page, USHRT_MAX - 1);
1568 bi->pagecnt_bias = USHRT_MAX;
1574 * i40e_receive_skb - Send a completed packet up the stack
1575 * @rx_ring: rx ring in play
1576 * @skb: packet to send up
1577 * @vlan_tag: vlan tag for packet
1579 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1580 struct sk_buff *skb, u16 vlan_tag)
1582 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1584 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1585 (vlan_tag & VLAN_VID_MASK))
1586 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1588 napi_gro_receive(&q_vector->napi, skb);
1592 * i40e_alloc_rx_buffers - Replace used receive buffers
1593 * @rx_ring: ring to place buffers on
1594 * @cleaned_count: number of buffers to replace
1596 * Returns false if all allocations were successful, true if any fail
1598 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1600 u16 ntu = rx_ring->next_to_use;
1601 union i40e_rx_desc *rx_desc;
1602 struct i40e_rx_buffer *bi;
1604 /* do nothing if no valid netdev defined */
1605 if (!rx_ring->netdev || !cleaned_count)
1608 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1609 bi = &rx_ring->rx_bi[ntu];
1612 if (!i40e_alloc_mapped_page(rx_ring, bi))
1615 /* sync the buffer for use by the device */
1616 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1618 rx_ring->rx_buf_len,
1621 /* Refresh the desc even if buffer_addrs didn't change
1622 * because each write-back erases this info.
1624 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1629 if (unlikely(ntu == rx_ring->count)) {
1630 rx_desc = I40E_RX_DESC(rx_ring, 0);
1631 bi = rx_ring->rx_bi;
1635 /* clear the status bits for the next_to_use descriptor */
1636 rx_desc->wb.qword1.status_error_len = 0;
1639 } while (cleaned_count);
1641 if (rx_ring->next_to_use != ntu)
1642 i40e_release_rx_desc(rx_ring, ntu);
1647 if (rx_ring->next_to_use != ntu)
1648 i40e_release_rx_desc(rx_ring, ntu);
1650 /* make sure to come back via polling to try again after
1651 * allocation failure
1657 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1658 * @vsi: the VSI we care about
1659 * @skb: skb currently being received and modified
1660 * @rx_desc: the receive descriptor
1662 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1663 struct sk_buff *skb,
1664 union i40e_rx_desc *rx_desc)
1666 struct i40e_rx_ptype_decoded decoded;
1667 u32 rx_error, rx_status;
1672 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1673 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1674 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1675 I40E_RXD_QW1_ERROR_SHIFT;
1676 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1677 I40E_RXD_QW1_STATUS_SHIFT;
1678 decoded = decode_rx_desc_ptype(ptype);
1680 skb->ip_summed = CHECKSUM_NONE;
1682 skb_checksum_none_assert(skb);
1684 /* Rx csum enabled and ip headers found? */
1685 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1688 /* did the hardware decode the packet and checksum? */
1689 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1692 /* both known and outer_ip must be set for the below code to work */
1693 if (!(decoded.known && decoded.outer_ip))
1696 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1697 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1698 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1699 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1702 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1703 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1706 /* likely incorrect csum if alternate IP extension headers found */
1708 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1709 /* don't increment checksum err here, non-fatal err */
1712 /* there was some L4 error, count error and punt packet to the stack */
1713 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1716 /* handle packets that were not able to be checksummed due
1717 * to arrival speed, in this case the stack can compute
1720 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1723 /* If there is an outer header present that might contain a checksum
1724 * we need to bump the checksum level by 1 to reflect the fact that
1725 * we are indicating we validated the inner checksum.
1727 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1728 skb->csum_level = 1;
1730 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1731 switch (decoded.inner_prot) {
1732 case I40E_RX_PTYPE_INNER_PROT_TCP:
1733 case I40E_RX_PTYPE_INNER_PROT_UDP:
1734 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1735 skb->ip_summed = CHECKSUM_UNNECESSARY;
1744 vsi->back->hw_csum_rx_error++;
1748 * i40e_ptype_to_htype - get a hash type
1749 * @ptype: the ptype value from the descriptor
1751 * Returns a hash type to be used by skb_set_hash
1753 static inline int i40e_ptype_to_htype(u8 ptype)
1755 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1758 return PKT_HASH_TYPE_NONE;
1760 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1761 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1762 return PKT_HASH_TYPE_L4;
1763 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1764 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1765 return PKT_HASH_TYPE_L3;
1767 return PKT_HASH_TYPE_L2;
1771 * i40e_rx_hash - set the hash value in the skb
1772 * @ring: descriptor ring
1773 * @rx_desc: specific descriptor
1774 * @skb: skb currently being received and modified
1775 * @rx_ptype: Rx packet type
1777 static inline void i40e_rx_hash(struct i40e_ring *ring,
1778 union i40e_rx_desc *rx_desc,
1779 struct sk_buff *skb,
1783 const __le64 rss_mask =
1784 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1785 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1787 if (!(ring->netdev->features & NETIF_F_RXHASH))
1790 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1791 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1792 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1797 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1798 * @rx_ring: rx descriptor ring packet is being transacted on
1799 * @rx_desc: pointer to the EOP Rx descriptor
1800 * @skb: pointer to current skb being populated
1801 * @rx_ptype: the packet type decoded by hardware
1803 * This function checks the ring, descriptor, and packet information in
1804 * order to populate the hash, checksum, VLAN, protocol, and
1805 * other fields within the skb.
1808 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1809 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1812 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1813 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1814 I40E_RXD_QW1_STATUS_SHIFT;
1815 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1816 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1817 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1819 if (unlikely(tsynvalid))
1820 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1822 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1824 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1826 skb_record_rx_queue(skb, rx_ring->queue_index);
1828 /* modifies the skb - consumes the enet header */
1829 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1833 * i40e_cleanup_headers - Correct empty headers
1834 * @rx_ring: rx descriptor ring packet is being transacted on
1835 * @skb: pointer to current skb being fixed
1836 * @rx_desc: pointer to the EOP Rx descriptor
1838 * Also address the case where we are pulling data in on pages only
1839 * and as such no data is present in the skb header.
1841 * In addition if skb is not at least 60 bytes we need to pad it so that
1842 * it is large enough to qualify as a valid Ethernet frame.
1844 * Returns true if an error was encountered and skb was freed.
1846 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1847 union i40e_rx_desc *rx_desc)
1850 /* XDP packets use error pointer so abort at this point */
1854 /* ERR_MASK will only have valid bits if EOP set, and
1855 * what we are doing here is actually checking
1856 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1859 if (unlikely(i40e_test_staterr(rx_desc,
1860 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1861 dev_kfree_skb_any(skb);
1865 /* if eth_skb_pad returns an error the skb was freed */
1866 if (eth_skb_pad(skb))
1873 * i40e_page_is_reusable - check if any reuse is possible
1874 * @page: page struct to check
1876 * A page is not reusable if it was allocated under low memory
1877 * conditions, or it's not in the same NUMA node as this CPU.
1879 static inline bool i40e_page_is_reusable(struct page *page)
1881 return (page_to_nid(page) == numa_mem_id()) &&
1882 !page_is_pfmemalloc(page);
1886 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1887 * the adapter for another receive
1889 * @rx_buffer: buffer containing the page
1891 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1892 * an unused region in the page.
1894 * For small pages, @truesize will be a constant value, half the size
1895 * of the memory at page. We'll attempt to alternate between high and
1896 * low halves of the page, with one half ready for use by the hardware
1897 * and the other half being consumed by the stack. We use the page
1898 * ref count to determine whether the stack has finished consuming the
1899 * portion of this page that was passed up with a previous packet. If
1900 * the page ref count is >1, we'll assume the "other" half page is
1901 * still busy, and this page cannot be reused.
1903 * For larger pages, @truesize will be the actual space used by the
1904 * received packet (adjusted upward to an even multiple of the cache
1905 * line size). This will advance through the page by the amount
1906 * actually consumed by the received packets while there is still
1907 * space for a buffer. Each region of larger pages will be used at
1908 * most once, after which the page will not be reused.
1910 * In either case, if the page is reusable its refcount is increased.
1912 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1914 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1915 struct page *page = rx_buffer->page;
1917 /* Is any reuse possible? */
1918 if (unlikely(!i40e_page_is_reusable(page)))
1921 #if (PAGE_SIZE < 8192)
1922 /* if we are only owner of page we can reuse it */
1923 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1926 #define I40E_LAST_OFFSET \
1927 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1928 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1932 /* If we have drained the page fragment pool we need to update
1933 * the pagecnt_bias and page count so that we fully restock the
1934 * number of references the driver holds.
1936 if (unlikely(pagecnt_bias == 1)) {
1937 page_ref_add(page, USHRT_MAX - 1);
1938 rx_buffer->pagecnt_bias = USHRT_MAX;
1945 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1946 * @rx_ring: rx descriptor ring to transact packets on
1947 * @rx_buffer: buffer containing page to add
1948 * @skb: sk_buff to place the data into
1949 * @size: packet length from rx_desc
1951 * This function will add the data contained in rx_buffer->page to the skb.
1952 * It will just attach the page as a frag to the skb.
1954 * The function will then update the page offset.
1956 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1957 struct i40e_rx_buffer *rx_buffer,
1958 struct sk_buff *skb,
1961 #if (PAGE_SIZE < 8192)
1962 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1964 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1967 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1968 rx_buffer->page_offset, size, truesize);
1970 /* page is being used so we must update the page offset */
1971 #if (PAGE_SIZE < 8192)
1972 rx_buffer->page_offset ^= truesize;
1974 rx_buffer->page_offset += truesize;
1979 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1980 * @rx_ring: rx descriptor ring to transact packets on
1981 * @size: size of buffer to add to skb
1983 * This function will pull an Rx buffer from the ring and synchronize it
1984 * for use by the CPU.
1986 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1987 const unsigned int size)
1989 struct i40e_rx_buffer *rx_buffer;
1991 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1992 prefetchw(rx_buffer->page);
1994 /* we are reusing so sync this buffer for CPU use */
1995 dma_sync_single_range_for_cpu(rx_ring->dev,
1997 rx_buffer->page_offset,
2001 /* We have pulled a buffer for use, so decrement pagecnt_bias */
2002 rx_buffer->pagecnt_bias--;
2008 * i40e_construct_skb - Allocate skb and populate it
2009 * @rx_ring: rx descriptor ring to transact packets on
2010 * @rx_buffer: rx buffer to pull data from
2011 * @xdp: xdp_buff pointing to the data
2013 * This function allocates an skb. It then populates it with the page
2014 * data from the current receive descriptor, taking care to set up the
2017 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
2018 struct i40e_rx_buffer *rx_buffer,
2019 struct xdp_buff *xdp)
2021 unsigned int size = xdp->data_end - xdp->data;
2022 #if (PAGE_SIZE < 8192)
2023 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2025 unsigned int truesize = SKB_DATA_ALIGN(size);
2027 unsigned int headlen;
2028 struct sk_buff *skb;
2030 /* prefetch first cache line of first page */
2031 prefetch(xdp->data);
2032 #if L1_CACHE_BYTES < 128
2033 prefetch(xdp->data + L1_CACHE_BYTES);
2035 /* Note, we get here by enabling legacy-rx via:
2037 * ethtool --set-priv-flags <dev> legacy-rx on
2039 * In this mode, we currently get 0 extra XDP headroom as
2040 * opposed to having legacy-rx off, where we process XDP
2041 * packets going to stack via i40e_build_skb(). The latter
2042 * provides us currently with 192 bytes of headroom.
2044 * For i40e_construct_skb() mode it means that the
2045 * xdp->data_meta will always point to xdp->data, since
2046 * the helper cannot expand the head. Should this ever
2047 * change in future for legacy-rx mode on, then lets also
2048 * add xdp->data_meta handling here.
2051 /* allocate a skb to store the frags */
2052 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
2054 GFP_ATOMIC | __GFP_NOWARN);
2058 /* Determine available headroom for copy */
2060 if (headlen > I40E_RX_HDR_SIZE)
2061 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
2063 /* align pull length to size of long to optimize memcpy performance */
2064 memcpy(__skb_put(skb, headlen), xdp->data,
2065 ALIGN(headlen, sizeof(long)));
2067 /* update all of the pointers */
2070 skb_add_rx_frag(skb, 0, rx_buffer->page,
2071 rx_buffer->page_offset + headlen,
2074 /* buffer is used by skb, update page_offset */
2075 #if (PAGE_SIZE < 8192)
2076 rx_buffer->page_offset ^= truesize;
2078 rx_buffer->page_offset += truesize;
2081 /* buffer is unused, reset bias back to rx_buffer */
2082 rx_buffer->pagecnt_bias++;
2089 * i40e_build_skb - Build skb around an existing buffer
2090 * @rx_ring: Rx descriptor ring to transact packets on
2091 * @rx_buffer: Rx buffer to pull data from
2092 * @xdp: xdp_buff pointing to the data
2094 * This function builds an skb around an existing Rx buffer, taking care
2095 * to set up the skb correctly and avoid any memcpy overhead.
2097 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
2098 struct i40e_rx_buffer *rx_buffer,
2099 struct xdp_buff *xdp)
2101 unsigned int metasize = xdp->data - xdp->data_meta;
2102 #if (PAGE_SIZE < 8192)
2103 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2105 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
2106 SKB_DATA_ALIGN(xdp->data_end -
2107 xdp->data_hard_start);
2109 struct sk_buff *skb;
2111 /* Prefetch first cache line of first page. If xdp->data_meta
2112 * is unused, this points exactly as xdp->data, otherwise we
2113 * likely have a consumer accessing first few bytes of meta
2114 * data, and then actual data.
2116 prefetch(xdp->data_meta);
2117 #if L1_CACHE_BYTES < 128
2118 prefetch(xdp->data_meta + L1_CACHE_BYTES);
2120 /* build an skb around the page buffer */
2121 skb = build_skb(xdp->data_hard_start, truesize);
2125 /* update pointers within the skb to store the data */
2126 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2127 __skb_put(skb, xdp->data_end - xdp->data);
2129 skb_metadata_set(skb, metasize);
2131 /* buffer is used by skb, update page_offset */
2132 #if (PAGE_SIZE < 8192)
2133 rx_buffer->page_offset ^= truesize;
2135 rx_buffer->page_offset += truesize;
2142 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2143 * @rx_ring: rx descriptor ring to transact packets on
2144 * @rx_buffer: rx buffer to pull data from
2146 * This function will clean up the contents of the rx_buffer. It will
2147 * either recycle the buffer or unmap it and free the associated resources.
2149 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
2150 struct i40e_rx_buffer *rx_buffer)
2152 if (i40e_can_reuse_rx_page(rx_buffer)) {
2153 /* hand second half of page back to the ring */
2154 i40e_reuse_rx_page(rx_ring, rx_buffer);
2155 rx_ring->rx_stats.page_reuse_count++;
2157 /* we are not reusing the buffer so unmap it */
2158 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2159 i40e_rx_pg_size(rx_ring),
2160 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
2161 __page_frag_cache_drain(rx_buffer->page,
2162 rx_buffer->pagecnt_bias);
2165 /* clear contents of buffer_info */
2166 rx_buffer->page = NULL;
2170 * i40e_is_non_eop - process handling of non-EOP buffers
2171 * @rx_ring: Rx ring being processed
2172 * @rx_desc: Rx descriptor for current buffer
2173 * @skb: Current socket buffer containing buffer in progress
2175 * This function updates next to clean. If the buffer is an EOP buffer
2176 * this function exits returning false, otherwise it will place the
2177 * sk_buff in the next buffer to be chained and return true indicating
2178 * that this is in fact a non-EOP buffer.
2180 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2181 union i40e_rx_desc *rx_desc,
2182 struct sk_buff *skb)
2184 u32 ntc = rx_ring->next_to_clean + 1;
2186 /* fetch, update, and store next to clean */
2187 ntc = (ntc < rx_ring->count) ? ntc : 0;
2188 rx_ring->next_to_clean = ntc;
2190 prefetch(I40E_RX_DESC(rx_ring, ntc));
2192 /* if we are the last buffer then there is nothing else to do */
2193 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
2194 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
2197 rx_ring->rx_stats.non_eop_descs++;
2202 #define I40E_XDP_PASS 0
2203 #define I40E_XDP_CONSUMED 1
2204 #define I40E_XDP_TX 2
2206 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2207 struct i40e_ring *xdp_ring);
2209 static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp,
2210 struct i40e_ring *xdp_ring)
2212 struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
2214 if (unlikely(!xdpf))
2215 return I40E_XDP_CONSUMED;
2217 return i40e_xmit_xdp_ring(xdpf, xdp_ring);
2221 * i40e_run_xdp - run an XDP program
2222 * @rx_ring: Rx ring being processed
2223 * @xdp: XDP buffer containing the frame
2225 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2226 struct xdp_buff *xdp)
2228 int err, result = I40E_XDP_PASS;
2229 struct i40e_ring *xdp_ring;
2230 struct bpf_prog *xdp_prog;
2234 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2239 prefetchw(xdp->data_hard_start); /* xdp_frame write */
2241 act = bpf_prog_run_xdp(xdp_prog, xdp);
2246 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2247 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
2250 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2251 result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
2254 bpf_warn_invalid_xdp_action(act);
2256 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2257 /* fallthrough -- handle aborts by dropping packet */
2259 result = I40E_XDP_CONSUMED;
2264 return ERR_PTR(-result);
2268 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2270 * @rx_buffer: Rx buffer to adjust
2271 * @size: Size of adjustment
2273 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2274 struct i40e_rx_buffer *rx_buffer,
2277 #if (PAGE_SIZE < 8192)
2278 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2280 rx_buffer->page_offset ^= truesize;
2282 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2284 rx_buffer->page_offset += truesize;
2288 static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring)
2290 /* Force memory writes to complete before letting h/w
2291 * know there are new descriptors to fetch.
2294 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
2298 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2299 * @rx_ring: rx descriptor ring to transact packets on
2300 * @budget: Total limit on number of packets to process
2302 * This function provides a "bounce buffer" approach to Rx interrupt
2303 * processing. The advantage to this is that on systems that have
2304 * expensive overhead for IOMMU access this provides a means of avoiding
2305 * it by maintaining the mapping of the page to the system.
2307 * Returns amount of work completed
2309 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2311 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2312 struct sk_buff *skb = rx_ring->skb;
2313 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2314 bool failure = false, xdp_xmit = false;
2315 struct xdp_buff xdp;
2317 xdp.rxq = &rx_ring->xdp_rxq;
2319 while (likely(total_rx_packets < (unsigned int)budget)) {
2320 struct i40e_rx_buffer *rx_buffer;
2321 union i40e_rx_desc *rx_desc;
2327 /* return some buffers to hardware, one at a time is too slow */
2328 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2329 failure = failure ||
2330 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2334 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2336 /* status_error_len will always be zero for unused descriptors
2337 * because it's cleared in cleanup, and overlaps with hdr_addr
2338 * which is always zero because packet split isn't used, if the
2339 * hardware wrote DD then the length will be non-zero
2341 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2343 /* This memory barrier is needed to keep us from reading
2344 * any other fields out of the rx_desc until we have
2345 * verified the descriptor has been written back.
2349 if (unlikely(i40e_rx_is_programming_status(qword))) {
2350 i40e_clean_programming_status(rx_ring, rx_desc, qword);
2354 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2355 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2359 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2360 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2362 /* retrieve a buffer from the ring */
2364 xdp.data = page_address(rx_buffer->page) +
2365 rx_buffer->page_offset;
2366 xdp.data_meta = xdp.data;
2367 xdp.data_hard_start = xdp.data -
2368 i40e_rx_offset(rx_ring);
2369 xdp.data_end = xdp.data + size;
2371 skb = i40e_run_xdp(rx_ring, &xdp);
2375 if (PTR_ERR(skb) == -I40E_XDP_TX) {
2377 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2379 rx_buffer->pagecnt_bias++;
2381 total_rx_bytes += size;
2384 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2385 } else if (ring_uses_build_skb(rx_ring)) {
2386 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2388 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2391 /* exit if we failed to retrieve a buffer */
2393 rx_ring->rx_stats.alloc_buff_failed++;
2394 rx_buffer->pagecnt_bias++;
2398 i40e_put_rx_buffer(rx_ring, rx_buffer);
2401 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2404 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2409 /* probably a little skewed due to removing CRC */
2410 total_rx_bytes += skb->len;
2412 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2413 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2414 I40E_RXD_QW1_PTYPE_SHIFT;
2416 /* populate checksum, VLAN, and protocol */
2417 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2419 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2420 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2422 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2423 i40e_receive_skb(rx_ring, skb, vlan_tag);
2426 /* update budget accounting */
2431 struct i40e_ring *xdp_ring =
2432 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2434 i40e_xdp_ring_update_tail(xdp_ring);
2440 u64_stats_update_begin(&rx_ring->syncp);
2441 rx_ring->stats.packets += total_rx_packets;
2442 rx_ring->stats.bytes += total_rx_bytes;
2443 u64_stats_update_end(&rx_ring->syncp);
2444 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2445 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2447 /* guarantee a trip back through this routine if there was a failure */
2448 return failure ? budget : (int)total_rx_packets;
2451 static inline u32 i40e_buildreg_itr(const int type, u16 itr)
2455 /* We don't bother with setting the CLEARPBA bit as the data sheet
2456 * points out doing so is "meaningless since it was already
2457 * auto-cleared". The auto-clearing happens when the interrupt is
2460 * Hardware errata 28 for also indicates that writing to a
2461 * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
2462 * an event in the PBA anyway so we need to rely on the automask
2463 * to hold pending events for us until the interrupt is re-enabled
2465 * The itr value is reported in microseconds, and the register
2466 * value is recorded in 2 microsecond units. For this reason we
2467 * only need to shift by the interval shift - 1 instead of the
2470 itr &= I40E_ITR_MASK;
2472 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2473 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2474 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
2479 /* a small macro to shorten up some long lines */
2480 #define INTREG I40E_PFINT_DYN_CTLN
2482 /* The act of updating the ITR will cause it to immediately trigger. In order
2483 * to prevent this from throwing off adaptive update statistics we defer the
2484 * update so that it can only happen so often. So after either Tx or Rx are
2485 * updated we make the adaptive scheme wait until either the ITR completely
2486 * expires via the next_update expiration or we have been through at least
2489 #define ITR_COUNTDOWN_START 3
2492 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2493 * @vsi: the VSI we care about
2494 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2497 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2498 struct i40e_q_vector *q_vector)
2500 struct i40e_hw *hw = &vsi->back->hw;
2503 /* If we don't have MSIX, then we only need to re-enable icr0 */
2504 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2505 i40e_irq_dynamic_enable_icr0(vsi->back);
2509 /* These will do nothing if dynamic updates are not enabled */
2510 i40e_update_itr(q_vector, &q_vector->tx);
2511 i40e_update_itr(q_vector, &q_vector->rx);
2513 /* This block of logic allows us to get away with only updating
2514 * one ITR value with each interrupt. The idea is to perform a
2515 * pseudo-lazy update with the following criteria.
2517 * 1. Rx is given higher priority than Tx if both are in same state
2518 * 2. If we must reduce an ITR that is given highest priority.
2519 * 3. We then give priority to increasing ITR based on amount.
2521 if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
2522 /* Rx ITR needs to be reduced, this is highest priority */
2523 intval = i40e_buildreg_itr(I40E_RX_ITR,
2524 q_vector->rx.target_itr);
2525 q_vector->rx.current_itr = q_vector->rx.target_itr;
2526 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2527 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
2528 ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
2529 (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
2530 /* Tx ITR needs to be reduced, this is second priority
2531 * Tx ITR needs to be increased more than Rx, fourth priority
2533 intval = i40e_buildreg_itr(I40E_TX_ITR,
2534 q_vector->tx.target_itr);
2535 q_vector->tx.current_itr = q_vector->tx.target_itr;
2536 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2537 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
2538 /* Rx ITR needs to be increased, third priority */
2539 intval = i40e_buildreg_itr(I40E_RX_ITR,
2540 q_vector->rx.target_itr);
2541 q_vector->rx.current_itr = q_vector->rx.target_itr;
2542 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2544 /* No ITR update, lowest priority */
2545 intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2546 if (q_vector->itr_countdown)
2547 q_vector->itr_countdown--;
2550 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2551 wr32(hw, INTREG(q_vector->reg_idx), intval);
2555 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2556 * @napi: napi struct with our devices info in it
2557 * @budget: amount of work driver is allowed to do this pass, in packets
2559 * This function will clean all queues associated with a q_vector.
2561 * Returns the amount of work done
2563 int i40e_napi_poll(struct napi_struct *napi, int budget)
2565 struct i40e_q_vector *q_vector =
2566 container_of(napi, struct i40e_q_vector, napi);
2567 struct i40e_vsi *vsi = q_vector->vsi;
2568 struct i40e_ring *ring;
2569 bool clean_complete = true;
2570 bool arm_wb = false;
2571 int budget_per_ring;
2574 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2575 napi_complete(napi);
2579 /* Since the actual Tx work is minimal, we can give the Tx a larger
2580 * budget and be more aggressive about cleaning up the Tx descriptors.
2582 i40e_for_each_ring(ring, q_vector->tx) {
2583 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2584 clean_complete = false;
2587 arm_wb |= ring->arm_wb;
2588 ring->arm_wb = false;
2591 /* Handle case where we are called by netpoll with a budget of 0 */
2595 /* We attempt to distribute budget to each Rx queue fairly, but don't
2596 * allow the budget to go below 1 because that would exit polling early.
2598 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2600 i40e_for_each_ring(ring, q_vector->rx) {
2601 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2603 work_done += cleaned;
2604 /* if we clean as many as budgeted, we must not be done */
2605 if (cleaned >= budget_per_ring)
2606 clean_complete = false;
2609 /* If work not completed, return budget and polling will return */
2610 if (!clean_complete) {
2611 int cpu_id = smp_processor_id();
2613 /* It is possible that the interrupt affinity has changed but,
2614 * if the cpu is pegged at 100%, polling will never exit while
2615 * traffic continues and the interrupt will be stuck on this
2616 * cpu. We check to make sure affinity is correct before we
2617 * continue to poll, otherwise we must stop polling so the
2618 * interrupt can move to the correct cpu.
2620 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2621 /* Tell napi that we are done polling */
2622 napi_complete_done(napi, work_done);
2624 /* Force an interrupt */
2625 i40e_force_wb(vsi, q_vector);
2627 /* Return budget-1 so that polling stops */
2632 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2633 i40e_enable_wb_on_itr(vsi, q_vector);
2638 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2639 q_vector->arm_wb_state = false;
2641 /* Work is done so exit the polling mode and re-enable the interrupt */
2642 napi_complete_done(napi, work_done);
2644 i40e_update_enable_itr(vsi, q_vector);
2646 return min(work_done, budget - 1);
2650 * i40e_atr - Add a Flow Director ATR filter
2651 * @tx_ring: ring to add programming descriptor to
2653 * @tx_flags: send tx flags
2655 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2658 struct i40e_filter_program_desc *fdir_desc;
2659 struct i40e_pf *pf = tx_ring->vsi->back;
2661 unsigned char *network;
2663 struct ipv6hdr *ipv6;
2667 u32 flex_ptype, dtype_cmd;
2671 /* make sure ATR is enabled */
2672 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2675 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2678 /* if sampling is disabled do nothing */
2679 if (!tx_ring->atr_sample_rate)
2682 /* Currently only IPv4/IPv6 with TCP is supported */
2683 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2686 /* snag network header to get L4 type and address */
2687 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2688 skb_inner_network_header(skb) : skb_network_header(skb);
2690 /* Note: tx_flags gets modified to reflect inner protocols in
2691 * tx_enable_csum function if encap is enabled.
2693 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2694 /* access ihl as u8 to avoid unaligned access on ia64 */
2695 hlen = (hdr.network[0] & 0x0F) << 2;
2696 l4_proto = hdr.ipv4->protocol;
2698 /* find the start of the innermost ipv6 header */
2699 unsigned int inner_hlen = hdr.network - skb->data;
2700 unsigned int h_offset = inner_hlen;
2702 /* this function updates h_offset to the end of the header */
2704 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2705 /* hlen will contain our best estimate of the tcp header */
2706 hlen = h_offset - inner_hlen;
2709 if (l4_proto != IPPROTO_TCP)
2712 th = (struct tcphdr *)(hdr.network + hlen);
2714 /* Due to lack of space, no more new filters can be programmed */
2715 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state))
2717 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2718 /* HW ATR eviction will take care of removing filters on FIN
2721 if (th->fin || th->rst)
2725 tx_ring->atr_count++;
2727 /* sample on all syn/fin/rst packets or once every atr sample rate */
2731 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2734 tx_ring->atr_count = 0;
2736 /* grab the next descriptor */
2737 i = tx_ring->next_to_use;
2738 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2741 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2743 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2744 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2745 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2746 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2747 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2748 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2749 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2751 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2753 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2755 dtype_cmd |= (th->fin || th->rst) ?
2756 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2757 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2758 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2759 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2761 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2762 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2764 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2765 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2767 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2768 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2770 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2771 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2772 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2775 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2776 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2777 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2779 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2780 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2782 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2783 fdir_desc->rsvd = cpu_to_le32(0);
2784 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2785 fdir_desc->fd_id = cpu_to_le32(0);
2789 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2791 * @tx_ring: ring to send buffer on
2792 * @flags: the tx flags to be set
2794 * Checks the skb and set up correspondingly several generic transmit flags
2795 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2797 * Returns error code indicate the frame should be dropped upon error and the
2798 * otherwise returns 0 to indicate the flags has been set properly.
2800 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2801 struct i40e_ring *tx_ring,
2804 __be16 protocol = skb->protocol;
2807 if (protocol == htons(ETH_P_8021Q) &&
2808 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2809 /* When HW VLAN acceleration is turned off by the user the
2810 * stack sets the protocol to 8021q so that the driver
2811 * can take any steps required to support the SW only
2812 * VLAN handling. In our case the driver doesn't need
2813 * to take any further steps so just set the protocol
2814 * to the encapsulated ethertype.
2816 skb->protocol = vlan_get_protocol(skb);
2820 /* if we have a HW VLAN tag being added, default to the HW one */
2821 if (skb_vlan_tag_present(skb)) {
2822 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2823 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2824 /* else if it is a SW VLAN, check the next protocol and store the tag */
2825 } else if (protocol == htons(ETH_P_8021Q)) {
2826 struct vlan_hdr *vhdr, _vhdr;
2828 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2832 protocol = vhdr->h_vlan_encapsulated_proto;
2833 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2834 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2837 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2840 /* Insert 802.1p priority into VLAN header */
2841 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2842 (skb->priority != TC_PRIO_CONTROL)) {
2843 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2844 tx_flags |= (skb->priority & 0x7) <<
2845 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2846 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2847 struct vlan_ethhdr *vhdr;
2850 rc = skb_cow_head(skb, 0);
2853 vhdr = (struct vlan_ethhdr *)skb->data;
2854 vhdr->h_vlan_TCI = htons(tx_flags >>
2855 I40E_TX_FLAGS_VLAN_SHIFT);
2857 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2867 * i40e_tso - set up the tso context descriptor
2868 * @first: pointer to first Tx buffer for xmit
2869 * @hdr_len: ptr to the size of the packet header
2870 * @cd_type_cmd_tso_mss: Quad Word 1
2872 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2874 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2875 u64 *cd_type_cmd_tso_mss)
2877 struct sk_buff *skb = first->skb;
2878 u64 cd_cmd, cd_tso_len, cd_mss;
2889 u32 paylen, l4_offset;
2890 u16 gso_segs, gso_size;
2893 if (skb->ip_summed != CHECKSUM_PARTIAL)
2896 if (!skb_is_gso(skb))
2899 err = skb_cow_head(skb, 0);
2903 ip.hdr = skb_network_header(skb);
2904 l4.hdr = skb_transport_header(skb);
2906 /* initialize outer IP header fields */
2907 if (ip.v4->version == 4) {
2911 ip.v6->payload_len = 0;
2914 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2918 SKB_GSO_UDP_TUNNEL |
2919 SKB_GSO_UDP_TUNNEL_CSUM)) {
2920 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2921 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2924 /* determine offset of outer transport header */
2925 l4_offset = l4.hdr - skb->data;
2927 /* remove payload length from outer checksum */
2928 paylen = skb->len - l4_offset;
2929 csum_replace_by_diff(&l4.udp->check,
2930 (__force __wsum)htonl(paylen));
2933 /* reset pointers to inner headers */
2934 ip.hdr = skb_inner_network_header(skb);
2935 l4.hdr = skb_inner_transport_header(skb);
2937 /* initialize inner IP header fields */
2938 if (ip.v4->version == 4) {
2942 ip.v6->payload_len = 0;
2946 /* determine offset of inner transport header */
2947 l4_offset = l4.hdr - skb->data;
2949 /* remove payload length from inner checksum */
2950 paylen = skb->len - l4_offset;
2951 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2953 /* compute length of segmentation header */
2954 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2956 /* pull values out of skb_shinfo */
2957 gso_size = skb_shinfo(skb)->gso_size;
2958 gso_segs = skb_shinfo(skb)->gso_segs;
2960 /* update GSO size and bytecount with header size */
2961 first->gso_segs = gso_segs;
2962 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2964 /* find the field values */
2965 cd_cmd = I40E_TX_CTX_DESC_TSO;
2966 cd_tso_len = skb->len - *hdr_len;
2968 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2969 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2970 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2975 * i40e_tsyn - set up the tsyn context descriptor
2976 * @tx_ring: ptr to the ring to send
2977 * @skb: ptr to the skb we're sending
2978 * @tx_flags: the collected send information
2979 * @cd_type_cmd_tso_mss: Quad Word 1
2981 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2983 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2984 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2988 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2991 /* Tx timestamps cannot be sampled when doing TSO */
2992 if (tx_flags & I40E_TX_FLAGS_TSO)
2995 /* only timestamp the outbound packet if the user has requested it and
2996 * we are not already transmitting a packet to be timestamped
2998 pf = i40e_netdev_to_pf(tx_ring->netdev);
2999 if (!(pf->flags & I40E_FLAG_PTP))
3003 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
3004 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3005 pf->ptp_tx_start = jiffies;
3006 pf->ptp_tx_skb = skb_get(skb);
3008 pf->tx_hwtstamp_skipped++;
3012 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
3013 I40E_TXD_CTX_QW1_CMD_SHIFT;
3019 * i40e_tx_enable_csum - Enable Tx checksum offloads
3021 * @tx_flags: pointer to Tx flags currently set
3022 * @td_cmd: Tx descriptor command bits to set
3023 * @td_offset: Tx descriptor header offsets to set
3024 * @tx_ring: Tx descriptor ring
3025 * @cd_tunneling: ptr to context desc bits
3027 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
3028 u32 *td_cmd, u32 *td_offset,
3029 struct i40e_ring *tx_ring,
3042 unsigned char *exthdr;
3043 u32 offset, cmd = 0;
3047 if (skb->ip_summed != CHECKSUM_PARTIAL)
3050 ip.hdr = skb_network_header(skb);
3051 l4.hdr = skb_transport_header(skb);
3053 /* compute outer L2 header size */
3054 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
3056 if (skb->encapsulation) {
3058 /* define outer network header type */
3059 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3060 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3061 I40E_TX_CTX_EXT_IP_IPV4 :
3062 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
3064 l4_proto = ip.v4->protocol;
3065 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3066 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
3068 exthdr = ip.hdr + sizeof(*ip.v6);
3069 l4_proto = ip.v6->nexthdr;
3070 if (l4.hdr != exthdr)
3071 ipv6_skip_exthdr(skb, exthdr - skb->data,
3072 &l4_proto, &frag_off);
3075 /* define outer transport */
3078 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
3079 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3082 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
3083 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3087 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
3088 l4.hdr = skb_inner_network_header(skb);
3091 if (*tx_flags & I40E_TX_FLAGS_TSO)
3094 skb_checksum_help(skb);
3098 /* compute outer L3 header size */
3099 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
3100 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
3102 /* switch IP header pointer from outer to inner header */
3103 ip.hdr = skb_inner_network_header(skb);
3105 /* compute tunnel header size */
3106 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
3107 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
3109 /* indicate if we need to offload outer UDP header */
3110 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
3111 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
3112 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
3113 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
3115 /* record tunnel offload values */
3116 *cd_tunneling |= tunnel;
3118 /* switch L4 header pointer from outer to inner */
3119 l4.hdr = skb_inner_transport_header(skb);
3122 /* reset type as we transition from outer to inner headers */
3123 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
3124 if (ip.v4->version == 4)
3125 *tx_flags |= I40E_TX_FLAGS_IPV4;
3126 if (ip.v6->version == 6)
3127 *tx_flags |= I40E_TX_FLAGS_IPV6;
3130 /* Enable IP checksum offloads */
3131 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
3132 l4_proto = ip.v4->protocol;
3133 /* the stack computes the IP header already, the only time we
3134 * need the hardware to recompute it is in the case of TSO.
3136 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
3137 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
3138 I40E_TX_DESC_CMD_IIPT_IPV4;
3139 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
3140 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
3142 exthdr = ip.hdr + sizeof(*ip.v6);
3143 l4_proto = ip.v6->nexthdr;
3144 if (l4.hdr != exthdr)
3145 ipv6_skip_exthdr(skb, exthdr - skb->data,
3146 &l4_proto, &frag_off);
3149 /* compute inner L3 header size */
3150 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
3152 /* Enable L4 checksum offloads */
3155 /* enable checksum offloads */
3156 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
3157 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3160 /* enable SCTP checksum offload */
3161 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
3162 offset |= (sizeof(struct sctphdr) >> 2) <<
3163 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3166 /* enable UDP checksum offload */
3167 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
3168 offset |= (sizeof(struct udphdr) >> 2) <<
3169 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
3172 if (*tx_flags & I40E_TX_FLAGS_TSO)
3174 skb_checksum_help(skb);
3179 *td_offset |= offset;
3185 * i40e_create_tx_ctx Build the Tx context descriptor
3186 * @tx_ring: ring to create the descriptor on
3187 * @cd_type_cmd_tso_mss: Quad Word 1
3188 * @cd_tunneling: Quad Word 0 - bits 0-31
3189 * @cd_l2tag2: Quad Word 0 - bits 32-63
3191 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
3192 const u64 cd_type_cmd_tso_mss,
3193 const u32 cd_tunneling, const u32 cd_l2tag2)
3195 struct i40e_tx_context_desc *context_desc;
3196 int i = tx_ring->next_to_use;
3198 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
3199 !cd_tunneling && !cd_l2tag2)
3202 /* grab the next descriptor */
3203 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
3206 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
3208 /* cpu_to_le32 and assign to struct fields */
3209 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
3210 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
3211 context_desc->rsvd = cpu_to_le16(0);
3212 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
3216 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3217 * @tx_ring: the ring to be checked
3218 * @size: the size buffer we want to assure is available
3220 * Returns -EBUSY if a stop is needed, else 0
3222 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
3224 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
3225 /* Memory barrier before checking head and tail */
3228 /* Check again in a case another CPU has just made room available. */
3229 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
3232 /* A reprieve! - use start_queue because it doesn't call schedule */
3233 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
3234 ++tx_ring->tx_stats.restart_queue;
3239 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3242 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
3243 * and so we need to figure out the cases where we need to linearize the skb.
3245 * For TSO we need to count the TSO header and segment payload separately.
3246 * As such we need to check cases where we have 7 fragments or more as we
3247 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3248 * the segment payload in the first descriptor, and another 7 for the
3251 bool __i40e_chk_linearize(struct sk_buff *skb)
3253 const struct skb_frag_struct *frag, *stale;
3256 /* no need to check if number of frags is less than 7 */
3257 nr_frags = skb_shinfo(skb)->nr_frags;
3258 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3261 /* We need to walk through the list and validate that each group
3262 * of 6 fragments totals at least gso_size.
3264 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3265 frag = &skb_shinfo(skb)->frags[0];
3267 /* Initialize size to the negative value of gso_size minus 1. We
3268 * use this as the worst case scenerio in which the frag ahead
3269 * of us only provides one byte which is why we are limited to 6
3270 * descriptors for a single transmit as the header and previous
3271 * fragment are already consuming 2 descriptors.
3273 sum = 1 - skb_shinfo(skb)->gso_size;
3275 /* Add size of frags 0 through 4 to create our initial sum */
3276 sum += skb_frag_size(frag++);
3277 sum += skb_frag_size(frag++);
3278 sum += skb_frag_size(frag++);
3279 sum += skb_frag_size(frag++);
3280 sum += skb_frag_size(frag++);
3282 /* Walk through fragments adding latest fragment, testing it, and
3283 * then removing stale fragments from the sum.
3285 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
3286 int stale_size = skb_frag_size(stale);
3288 sum += skb_frag_size(frag++);
3290 /* The stale fragment may present us with a smaller
3291 * descriptor than the actual fragment size. To account
3292 * for that we need to remove all the data on the front and
3293 * figure out what the remainder would be in the last
3294 * descriptor associated with the fragment.
3296 if (stale_size > I40E_MAX_DATA_PER_TXD) {
3297 int align_pad = -(stale->page_offset) &
3298 (I40E_MAX_READ_REQ_SIZE - 1);
3301 stale_size -= align_pad;
3304 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3305 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
3306 } while (stale_size > I40E_MAX_DATA_PER_TXD);
3309 /* if sum is negative we failed to make sufficient progress */
3323 * i40e_tx_map - Build the Tx descriptor
3324 * @tx_ring: ring to send buffer on
3326 * @first: first buffer info buffer to use
3327 * @tx_flags: collected send information
3328 * @hdr_len: size of the packet header
3329 * @td_cmd: the command field in the descriptor
3330 * @td_offset: offset for checksum or crc
3332 * Returns 0 on success, -1 on failure to DMA
3334 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3335 struct i40e_tx_buffer *first, u32 tx_flags,
3336 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3338 unsigned int data_len = skb->data_len;
3339 unsigned int size = skb_headlen(skb);
3340 struct skb_frag_struct *frag;
3341 struct i40e_tx_buffer *tx_bi;
3342 struct i40e_tx_desc *tx_desc;
3343 u16 i = tx_ring->next_to_use;
3348 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3349 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3350 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3351 I40E_TX_FLAGS_VLAN_SHIFT;
3354 first->tx_flags = tx_flags;
3356 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3358 tx_desc = I40E_TX_DESC(tx_ring, i);
3361 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3362 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3364 if (dma_mapping_error(tx_ring->dev, dma))
3367 /* record length, and DMA address */
3368 dma_unmap_len_set(tx_bi, len, size);
3369 dma_unmap_addr_set(tx_bi, dma, dma);
3371 /* align size to end of page */
3372 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3373 tx_desc->buffer_addr = cpu_to_le64(dma);
3375 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3376 tx_desc->cmd_type_offset_bsz =
3377 build_ctob(td_cmd, td_offset,
3384 if (i == tx_ring->count) {
3385 tx_desc = I40E_TX_DESC(tx_ring, 0);
3392 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3393 tx_desc->buffer_addr = cpu_to_le64(dma);
3396 if (likely(!data_len))
3399 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3406 if (i == tx_ring->count) {
3407 tx_desc = I40E_TX_DESC(tx_ring, 0);
3411 size = skb_frag_size(frag);
3414 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3417 tx_bi = &tx_ring->tx_bi[i];
3420 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3423 if (i == tx_ring->count)
3426 tx_ring->next_to_use = i;
3428 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3430 /* write last descriptor with EOP bit */
3431 td_cmd |= I40E_TX_DESC_CMD_EOP;
3433 /* We OR these values together to check both against 4 (WB_STRIDE)
3434 * below. This is safe since we don't re-use desc_count afterwards.
3436 desc_count |= ++tx_ring->packet_stride;
3438 if (desc_count >= WB_STRIDE) {
3439 /* write last descriptor with RS bit set */
3440 td_cmd |= I40E_TX_DESC_CMD_RS;
3441 tx_ring->packet_stride = 0;
3444 tx_desc->cmd_type_offset_bsz =
3445 build_ctob(td_cmd, td_offset, size, td_tag);
3447 /* Force memory writes to complete before letting h/w know there
3448 * are new descriptors to fetch.
3450 * We also use this memory barrier to make certain all of the
3451 * status bits have been updated before next_to_watch is written.
3455 /* set next_to_watch value indicating a packet is present */
3456 first->next_to_watch = tx_desc;
3458 /* notify HW of packet */
3459 if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
3460 writel(i, tx_ring->tail);
3462 /* we need this if more than one processor can write to our tail
3463 * at a time, it synchronizes IO on IA64/Altix systems
3471 dev_info(tx_ring->dev, "TX DMA map failed\n");
3473 /* clear dma mappings for failed tx_bi map */
3475 tx_bi = &tx_ring->tx_bi[i];
3476 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3484 tx_ring->next_to_use = i;
3490 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3491 * @xdp: data to transmit
3492 * @xdp_ring: XDP Tx ring
3494 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
3495 struct i40e_ring *xdp_ring)
3497 u16 i = xdp_ring->next_to_use;
3498 struct i40e_tx_buffer *tx_bi;
3499 struct i40e_tx_desc *tx_desc;
3500 u32 size = xdpf->len;
3503 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3504 xdp_ring->tx_stats.tx_busy++;
3505 return I40E_XDP_CONSUMED;
3508 dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
3509 if (dma_mapping_error(xdp_ring->dev, dma))
3510 return I40E_XDP_CONSUMED;
3512 tx_bi = &xdp_ring->tx_bi[i];
3513 tx_bi->bytecount = size;
3514 tx_bi->gso_segs = 1;
3517 /* record length, and DMA address */
3518 dma_unmap_len_set(tx_bi, len, size);
3519 dma_unmap_addr_set(tx_bi, dma, dma);
3521 tx_desc = I40E_TX_DESC(xdp_ring, i);
3522 tx_desc->buffer_addr = cpu_to_le64(dma);
3523 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3527 /* Make certain all of the status bits have been updated
3528 * before next_to_watch is written.
3533 if (i == xdp_ring->count)
3536 tx_bi->next_to_watch = tx_desc;
3537 xdp_ring->next_to_use = i;
3543 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3545 * @tx_ring: ring to send buffer on
3547 * Returns NETDEV_TX_OK if sent, else an error code
3549 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3550 struct i40e_ring *tx_ring)
3552 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3553 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3554 struct i40e_tx_buffer *first;
3563 /* prefetch the data, we'll need it later */
3564 prefetch(skb->data);
3566 i40e_trace(xmit_frame_ring, skb, tx_ring);
3568 count = i40e_xmit_descriptor_count(skb);
3569 if (i40e_chk_linearize(skb, count)) {
3570 if (__skb_linearize(skb)) {
3571 dev_kfree_skb_any(skb);
3572 return NETDEV_TX_OK;
3574 count = i40e_txd_use_count(skb->len);
3575 tx_ring->tx_stats.tx_linearize++;
3578 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3579 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3580 * + 4 desc gap to avoid the cache line where head is,
3581 * + 1 desc for context descriptor,
3582 * otherwise try next time
3584 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3585 tx_ring->tx_stats.tx_busy++;
3586 return NETDEV_TX_BUSY;
3589 /* record the location of the first descriptor for this packet */
3590 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3592 first->bytecount = skb->len;
3593 first->gso_segs = 1;
3595 /* prepare the xmit flags */
3596 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3599 /* obtain protocol of skb */
3600 protocol = vlan_get_protocol(skb);
3602 /* setup IPv4/IPv6 offloads */
3603 if (protocol == htons(ETH_P_IP))
3604 tx_flags |= I40E_TX_FLAGS_IPV4;
3605 else if (protocol == htons(ETH_P_IPV6))
3606 tx_flags |= I40E_TX_FLAGS_IPV6;
3608 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3613 tx_flags |= I40E_TX_FLAGS_TSO;
3615 /* Always offload the checksum, since it's in the data descriptor */
3616 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3617 tx_ring, &cd_tunneling);
3621 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3624 tx_flags |= I40E_TX_FLAGS_TSYN;
3626 skb_tx_timestamp(skb);
3628 /* always enable CRC insertion offload */
3629 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3631 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3632 cd_tunneling, cd_l2tag2);
3634 /* Add Flow Director ATR if it's enabled.
3636 * NOTE: this must always be directly before the data descriptor.
3638 i40e_atr(tx_ring, skb, tx_flags);
3640 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3642 goto cleanup_tx_tstamp;
3644 return NETDEV_TX_OK;
3647 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3648 dev_kfree_skb_any(first->skb);
3651 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3652 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3654 dev_kfree_skb_any(pf->ptp_tx_skb);
3655 pf->ptp_tx_skb = NULL;
3656 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3659 return NETDEV_TX_OK;
3663 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3665 * @netdev: network interface device structure
3667 * Returns NETDEV_TX_OK if sent, else an error code
3669 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3671 struct i40e_netdev_priv *np = netdev_priv(netdev);
3672 struct i40e_vsi *vsi = np->vsi;
3673 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3675 /* hardware can't handle really short frames, hardware padding works
3678 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3679 return NETDEV_TX_OK;
3681 return i40e_xmit_frame_ring(skb, tx_ring);
3685 * i40e_xdp_xmit - Implements ndo_xdp_xmit
3689 * Returns number of frames successfully sent. Frames that fail are
3690 * free'ed via XDP return API.
3692 * For error cases, a negative errno code is returned and no-frames
3693 * are transmitted (caller must handle freeing frames).
3695 int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
3698 struct i40e_netdev_priv *np = netdev_priv(dev);
3699 unsigned int queue_index = smp_processor_id();
3700 struct i40e_vsi *vsi = np->vsi;
3701 struct i40e_ring *xdp_ring;
3705 if (test_bit(__I40E_VSI_DOWN, vsi->state))
3708 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
3711 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3714 xdp_ring = vsi->xdp_rings[queue_index];
3716 for (i = 0; i < n; i++) {
3717 struct xdp_frame *xdpf = frames[i];
3720 err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
3721 if (err != I40E_XDP_TX) {
3722 xdp_return_frame_rx_napi(xdpf);
3727 if (unlikely(flags & XDP_XMIT_FLUSH))
3728 i40e_xdp_ring_update_tail(xdp_ring);