1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
29 #include <linux/bpf_trace.h>
31 #include "i40e_trace.h"
32 #include "i40e_prototype.h"
34 static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
37 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA |
38 ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
39 ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
40 ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
41 ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
44 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
46 * i40e_fdir - Generate a Flow Director descriptor based on fdata
47 * @tx_ring: Tx ring to send buffer on
48 * @fdata: Flow director filter data
49 * @add: Indicate if we are adding a rule or deleting one
52 static void i40e_fdir(struct i40e_ring *tx_ring,
53 struct i40e_fdir_filter *fdata, bool add)
55 struct i40e_filter_program_desc *fdir_desc;
56 struct i40e_pf *pf = tx_ring->vsi->back;
57 u32 flex_ptype, dtype_cmd;
60 /* grab the next descriptor */
61 i = tx_ring->next_to_use;
62 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
65 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
67 flex_ptype = I40E_TXD_FLTR_QW0_QINDEX_MASK &
68 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT);
70 flex_ptype |= I40E_TXD_FLTR_QW0_FLEXOFF_MASK &
71 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
73 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
74 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
76 flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
77 (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
79 /* Use LAN VSI Id if not programmed by user */
80 flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
81 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
82 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
84 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
87 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
88 I40E_TXD_FLTR_QW1_PCMD_SHIFT :
89 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
90 I40E_TXD_FLTR_QW1_PCMD_SHIFT;
92 dtype_cmd |= I40E_TXD_FLTR_QW1_DEST_MASK &
93 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT);
95 dtype_cmd |= I40E_TXD_FLTR_QW1_FD_STATUS_MASK &
96 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT);
98 if (fdata->cnt_index) {
99 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
100 dtype_cmd |= I40E_TXD_FLTR_QW1_CNTINDEX_MASK &
101 ((u32)fdata->cnt_index <<
102 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT);
105 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
106 fdir_desc->rsvd = cpu_to_le32(0);
107 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
108 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id);
111 #define I40E_FD_CLEAN_DELAY 10
113 * i40e_program_fdir_filter - Program a Flow Director filter
114 * @fdir_data: Packet data that will be filter parameters
115 * @raw_packet: the pre-allocated packet buffer for FDir
116 * @pf: The PF pointer
117 * @add: True for add/update, False for remove
119 static int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data,
120 u8 *raw_packet, struct i40e_pf *pf,
123 struct i40e_tx_buffer *tx_buf, *first;
124 struct i40e_tx_desc *tx_desc;
125 struct i40e_ring *tx_ring;
126 struct i40e_vsi *vsi;
132 /* find existing FDIR VSI */
133 vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
137 tx_ring = vsi->tx_rings[0];
140 /* we need two descriptors to add/del a filter and we can wait */
141 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) {
144 msleep_interruptible(1);
147 dma = dma_map_single(dev, raw_packet,
148 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
149 if (dma_mapping_error(dev, dma))
152 /* grab the next descriptor */
153 i = tx_ring->next_to_use;
154 first = &tx_ring->tx_bi[i];
155 i40e_fdir(tx_ring, fdir_data, add);
157 /* Now program a dummy descriptor */
158 i = tx_ring->next_to_use;
159 tx_desc = I40E_TX_DESC(tx_ring, i);
160 tx_buf = &tx_ring->tx_bi[i];
162 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
164 memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
166 /* record length, and DMA address */
167 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
168 dma_unmap_addr_set(tx_buf, dma, dma);
170 tx_desc->buffer_addr = cpu_to_le64(dma);
171 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
173 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
174 tx_buf->raw_buf = (void *)raw_packet;
176 tx_desc->cmd_type_offset_bsz =
177 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
179 /* Force memory writes to complete before letting h/w
180 * know there are new descriptors to fetch.
184 /* Mark the data descriptor to be watched */
185 first->next_to_watch = tx_desc;
187 writel(tx_ring->next_to_use, tx_ring->tail);
194 #define IP_HEADER_OFFSET 14
195 #define I40E_UDPIP_DUMMY_PACKET_LEN 42
197 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
198 * @vsi: pointer to the targeted VSI
199 * @fd_data: the flow director data required for the FDir descriptor
200 * @add: true adds a filter, false removes it
202 * Returns 0 if the filters were successfully added or removed
204 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
205 struct i40e_fdir_filter *fd_data,
208 struct i40e_pf *pf = vsi->back;
213 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
214 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
215 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
217 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
220 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
222 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
223 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
224 + sizeof(struct iphdr));
226 ip->daddr = fd_data->dst_ip;
227 udp->dest = fd_data->dst_port;
228 ip->saddr = fd_data->src_ip;
229 udp->source = fd_data->src_port;
231 if (fd_data->flex_filter) {
232 u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
233 __be16 pattern = fd_data->flex_word;
234 u16 off = fd_data->flex_offset;
236 *((__force __be16 *)(payload + off)) = pattern;
239 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
240 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
242 dev_info(&pf->pdev->dev,
243 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
244 fd_data->pctype, fd_data->fd_id, ret);
245 /* Free the packet buffer since it wasn't added to the ring */
248 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
250 dev_info(&pf->pdev->dev,
251 "Filter OK for PCTYPE %d loc = %d\n",
252 fd_data->pctype, fd_data->fd_id);
254 dev_info(&pf->pdev->dev,
255 "Filter deleted for PCTYPE %d loc = %d\n",
256 fd_data->pctype, fd_data->fd_id);
260 pf->fd_udp4_filter_cnt++;
262 pf->fd_udp4_filter_cnt--;
267 #define I40E_TCPIP_DUMMY_PACKET_LEN 54
269 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
270 * @vsi: pointer to the targeted VSI
271 * @fd_data: the flow director data required for the FDir descriptor
272 * @add: true adds a filter, false removes it
274 * Returns 0 if the filters were successfully added or removed
276 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
277 struct i40e_fdir_filter *fd_data,
280 struct i40e_pf *pf = vsi->back;
286 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
287 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
288 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
289 0x0, 0x72, 0, 0, 0, 0};
291 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
294 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
296 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
297 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
298 + sizeof(struct iphdr));
300 ip->daddr = fd_data->dst_ip;
301 tcp->dest = fd_data->dst_port;
302 ip->saddr = fd_data->src_ip;
303 tcp->source = fd_data->src_port;
305 if (fd_data->flex_filter) {
306 u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
307 __be16 pattern = fd_data->flex_word;
308 u16 off = fd_data->flex_offset;
310 *((__force __be16 *)(payload + off)) = pattern;
313 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
314 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
316 dev_info(&pf->pdev->dev,
317 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
318 fd_data->pctype, fd_data->fd_id, ret);
319 /* Free the packet buffer since it wasn't added to the ring */
322 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
324 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
325 fd_data->pctype, fd_data->fd_id);
327 dev_info(&pf->pdev->dev,
328 "Filter deleted for PCTYPE %d loc = %d\n",
329 fd_data->pctype, fd_data->fd_id);
333 pf->fd_tcp4_filter_cnt++;
334 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
335 I40E_DEBUG_FD & pf->hw.debug_mask)
336 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
337 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
339 pf->fd_tcp4_filter_cnt--;
345 #define I40E_SCTPIP_DUMMY_PACKET_LEN 46
347 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
348 * a specific flow spec
349 * @vsi: pointer to the targeted VSI
350 * @fd_data: the flow director data required for the FDir descriptor
351 * @add: true adds a filter, false removes it
353 * Returns 0 if the filters were successfully added or removed
355 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
356 struct i40e_fdir_filter *fd_data,
359 struct i40e_pf *pf = vsi->back;
360 struct sctphdr *sctp;
365 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
366 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
369 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
372 memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
374 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
375 sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
376 + sizeof(struct iphdr));
378 ip->daddr = fd_data->dst_ip;
379 sctp->dest = fd_data->dst_port;
380 ip->saddr = fd_data->src_ip;
381 sctp->source = fd_data->src_port;
383 if (fd_data->flex_filter) {
384 u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
385 __be16 pattern = fd_data->flex_word;
386 u16 off = fd_data->flex_offset;
388 *((__force __be16 *)(payload + off)) = pattern;
391 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
392 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
394 dev_info(&pf->pdev->dev,
395 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
396 fd_data->pctype, fd_data->fd_id, ret);
397 /* Free the packet buffer since it wasn't added to the ring */
400 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
402 dev_info(&pf->pdev->dev,
403 "Filter OK for PCTYPE %d loc = %d\n",
404 fd_data->pctype, fd_data->fd_id);
406 dev_info(&pf->pdev->dev,
407 "Filter deleted for PCTYPE %d loc = %d\n",
408 fd_data->pctype, fd_data->fd_id);
412 pf->fd_sctp4_filter_cnt++;
414 pf->fd_sctp4_filter_cnt--;
419 #define I40E_IP_DUMMY_PACKET_LEN 34
421 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
422 * a specific flow spec
423 * @vsi: pointer to the targeted VSI
424 * @fd_data: the flow director data required for the FDir descriptor
425 * @add: true adds a filter, false removes it
427 * Returns 0 if the filters were successfully added or removed
429 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
430 struct i40e_fdir_filter *fd_data,
433 struct i40e_pf *pf = vsi->back;
438 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
439 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
442 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
443 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
444 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
447 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
448 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
450 ip->saddr = fd_data->src_ip;
451 ip->daddr = fd_data->dst_ip;
454 if (fd_data->flex_filter) {
455 u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
456 __be16 pattern = fd_data->flex_word;
457 u16 off = fd_data->flex_offset;
459 *((__force __be16 *)(payload + off)) = pattern;
463 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
465 dev_info(&pf->pdev->dev,
466 "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
467 fd_data->pctype, fd_data->fd_id, ret);
468 /* The packet buffer wasn't added to the ring so we
469 * need to free it now.
473 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
475 dev_info(&pf->pdev->dev,
476 "Filter OK for PCTYPE %d loc = %d\n",
477 fd_data->pctype, fd_data->fd_id);
479 dev_info(&pf->pdev->dev,
480 "Filter deleted for PCTYPE %d loc = %d\n",
481 fd_data->pctype, fd_data->fd_id);
486 pf->fd_ip4_filter_cnt++;
488 pf->fd_ip4_filter_cnt--;
494 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
495 * @vsi: pointer to the targeted VSI
496 * @cmd: command to get or set RX flow classification rules
497 * @add: true adds a filter, false removes it
500 int i40e_add_del_fdir(struct i40e_vsi *vsi,
501 struct i40e_fdir_filter *input, bool add)
503 struct i40e_pf *pf = vsi->back;
506 switch (input->flow_type & ~FLOW_EXT) {
508 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
511 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
514 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
517 switch (input->ip4_proto) {
519 ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
522 ret = i40e_add_del_fdir_udpv4(vsi, input, add);
525 ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
528 ret = i40e_add_del_fdir_ipv4(vsi, input, add);
531 /* We cannot support masking based on protocol */
532 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
538 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n",
543 /* The buffer allocated here will be normally be freed by
544 * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
545 * completion. In the event of an error adding the buffer to the FDIR
546 * ring, it will immediately be freed. It may also be freed by
547 * i40e_clean_tx_ring() when closing the VSI.
553 * i40e_fd_handle_status - check the Programming Status for FD
554 * @rx_ring: the Rx ring for this descriptor
555 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
556 * @prog_id: the id originally used for programming
558 * This is used to verify if the FD programming or invalidation
559 * requested by SW to the HW is successful or not and take actions accordingly.
561 static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
562 union i40e_rx_desc *rx_desc, u8 prog_id)
564 struct i40e_pf *pf = rx_ring->vsi->back;
565 struct pci_dev *pdev = pf->pdev;
566 u32 fcnt_prog, fcnt_avail;
570 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
571 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
572 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
574 if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
575 pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
576 if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
577 (I40E_DEBUG_FD & pf->hw.debug_mask))
578 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
581 /* Check if the programming error is for ATR.
582 * If so, auto disable ATR and set a state for
583 * flush in progress. Next time we come here if flush is in
584 * progress do nothing, once flush is complete the state will
587 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state))
591 /* store the current atr filter count */
592 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf);
594 if ((rx_desc->wb.qword0.hi_dword.fd_id == 0) &&
595 pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED) {
596 pf->flags |= I40E_FLAG_FD_ATR_AUTO_DISABLED;
597 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
600 /* filter programming failed most likely due to table full */
601 fcnt_prog = i40e_get_global_fd_count(pf);
602 fcnt_avail = pf->fdir_pf_filter_count;
603 /* If ATR is running fcnt_prog can quickly change,
604 * if we are very close to full, it makes sense to disable
605 * FD ATR/SB and then re-enable it when there is room.
607 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
608 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
609 !(pf->flags & I40E_FLAG_FD_SB_AUTO_DISABLED)) {
610 pf->flags |= I40E_FLAG_FD_SB_AUTO_DISABLED;
611 if (I40E_DEBUG_FD & pf->hw.debug_mask)
612 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
615 } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
616 if (I40E_DEBUG_FD & pf->hw.debug_mask)
617 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
618 rx_desc->wb.qword0.hi_dword.fd_id);
623 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
624 * @ring: the ring that owns the buffer
625 * @tx_buffer: the buffer to free
627 static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
628 struct i40e_tx_buffer *tx_buffer)
630 if (tx_buffer->skb) {
631 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
632 kfree(tx_buffer->raw_buf);
633 else if (ring_is_xdp(ring))
634 page_frag_free(tx_buffer->raw_buf);
636 dev_kfree_skb_any(tx_buffer->skb);
637 if (dma_unmap_len(tx_buffer, len))
638 dma_unmap_single(ring->dev,
639 dma_unmap_addr(tx_buffer, dma),
640 dma_unmap_len(tx_buffer, len),
642 } else if (dma_unmap_len(tx_buffer, len)) {
643 dma_unmap_page(ring->dev,
644 dma_unmap_addr(tx_buffer, dma),
645 dma_unmap_len(tx_buffer, len),
649 tx_buffer->next_to_watch = NULL;
650 tx_buffer->skb = NULL;
651 dma_unmap_len_set(tx_buffer, len, 0);
652 /* tx_buffer must be completely set up in the transmit path */
656 * i40e_clean_tx_ring - Free any empty Tx buffers
657 * @tx_ring: ring to be cleaned
659 void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
661 unsigned long bi_size;
664 /* ring already cleared, nothing to do */
668 /* Free all the Tx ring sk_buffs */
669 for (i = 0; i < tx_ring->count; i++)
670 i40e_unmap_and_free_tx_resource(tx_ring, &tx_ring->tx_bi[i]);
672 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
673 memset(tx_ring->tx_bi, 0, bi_size);
675 /* Zero out the descriptor ring */
676 memset(tx_ring->desc, 0, tx_ring->size);
678 tx_ring->next_to_use = 0;
679 tx_ring->next_to_clean = 0;
681 if (!tx_ring->netdev)
684 /* cleanup Tx queue statistics */
685 netdev_tx_reset_queue(txring_txq(tx_ring));
689 * i40e_free_tx_resources - Free Tx resources per queue
690 * @tx_ring: Tx descriptor ring for a specific queue
692 * Free all transmit software resources
694 void i40e_free_tx_resources(struct i40e_ring *tx_ring)
696 i40e_clean_tx_ring(tx_ring);
697 kfree(tx_ring->tx_bi);
698 tx_ring->tx_bi = NULL;
701 dma_free_coherent(tx_ring->dev, tx_ring->size,
702 tx_ring->desc, tx_ring->dma);
703 tx_ring->desc = NULL;
708 * i40e_get_tx_pending - how many tx descriptors not processed
709 * @tx_ring: the ring of descriptors
711 * Since there is no access to the ring head register
712 * in XL710, we need to use our local copies
714 u32 i40e_get_tx_pending(struct i40e_ring *ring)
718 head = i40e_get_head(ring);
719 tail = readl(ring->tail);
722 return (head < tail) ?
723 tail - head : (tail + ring->count - head);
731 * i40e_clean_tx_irq - Reclaim resources after transmit completes
732 * @vsi: the VSI we care about
733 * @tx_ring: Tx ring to clean
734 * @napi_budget: Used to determine if we are in netpoll
736 * Returns true if there's any budget left (e.g. the clean is finished)
738 static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
739 struct i40e_ring *tx_ring, int napi_budget)
741 u16 i = tx_ring->next_to_clean;
742 struct i40e_tx_buffer *tx_buf;
743 struct i40e_tx_desc *tx_head;
744 struct i40e_tx_desc *tx_desc;
745 unsigned int total_bytes = 0, total_packets = 0;
746 unsigned int budget = vsi->work_limit;
748 tx_buf = &tx_ring->tx_bi[i];
749 tx_desc = I40E_TX_DESC(tx_ring, i);
752 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
755 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
757 /* if next_to_watch is not set then there is no work pending */
761 /* prevent any other reads prior to eop_desc */
762 read_barrier_depends();
764 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
765 /* we have caught up to head, no work left to do */
766 if (tx_head == tx_desc)
769 /* clear next_to_watch to prevent false hangs */
770 tx_buf->next_to_watch = NULL;
772 /* update the statistics for this packet */
773 total_bytes += tx_buf->bytecount;
774 total_packets += tx_buf->gso_segs;
776 /* free the skb/XDP data */
777 if (ring_is_xdp(tx_ring))
778 page_frag_free(tx_buf->raw_buf);
780 napi_consume_skb(tx_buf->skb, napi_budget);
782 /* unmap skb header data */
783 dma_unmap_single(tx_ring->dev,
784 dma_unmap_addr(tx_buf, dma),
785 dma_unmap_len(tx_buf, len),
788 /* clear tx_buffer data */
790 dma_unmap_len_set(tx_buf, len, 0);
792 /* unmap remaining buffers */
793 while (tx_desc != eop_desc) {
794 i40e_trace(clean_tx_irq_unmap,
795 tx_ring, tx_desc, tx_buf);
802 tx_buf = tx_ring->tx_bi;
803 tx_desc = I40E_TX_DESC(tx_ring, 0);
806 /* unmap any remaining paged data */
807 if (dma_unmap_len(tx_buf, len)) {
808 dma_unmap_page(tx_ring->dev,
809 dma_unmap_addr(tx_buf, dma),
810 dma_unmap_len(tx_buf, len),
812 dma_unmap_len_set(tx_buf, len, 0);
816 /* move us one more past the eop_desc for start of next pkt */
822 tx_buf = tx_ring->tx_bi;
823 tx_desc = I40E_TX_DESC(tx_ring, 0);
828 /* update budget accounting */
830 } while (likely(budget));
833 tx_ring->next_to_clean = i;
834 u64_stats_update_begin(&tx_ring->syncp);
835 tx_ring->stats.bytes += total_bytes;
836 tx_ring->stats.packets += total_packets;
837 u64_stats_update_end(&tx_ring->syncp);
838 tx_ring->q_vector->tx.total_bytes += total_bytes;
839 tx_ring->q_vector->tx.total_packets += total_packets;
841 if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
842 /* check to see if there are < 4 descriptors
843 * waiting to be written back, then kick the hardware to force
844 * them to be written back in case we stay in NAPI.
845 * In this mode on X722 we do not enable Interrupt.
847 unsigned int j = i40e_get_tx_pending(tx_ring);
850 ((j / WB_STRIDE) == 0) && (j > 0) &&
851 !test_bit(__I40E_VSI_DOWN, vsi->state) &&
852 (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
853 tx_ring->arm_wb = true;
856 if (ring_is_xdp(tx_ring))
859 /* notify netdev of completed buffers */
860 netdev_tx_completed_queue(txring_txq(tx_ring),
861 total_packets, total_bytes);
863 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
864 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
865 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
866 /* Make sure that anybody stopping the queue after this
867 * sees the new next_to_clean.
870 if (__netif_subqueue_stopped(tx_ring->netdev,
871 tx_ring->queue_index) &&
872 !test_bit(__I40E_VSI_DOWN, vsi->state)) {
873 netif_wake_subqueue(tx_ring->netdev,
874 tx_ring->queue_index);
875 ++tx_ring->tx_stats.restart_queue;
883 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
884 * @vsi: the VSI we care about
885 * @q_vector: the vector on which to enable writeback
888 static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
889 struct i40e_q_vector *q_vector)
891 u16 flags = q_vector->tx.ring[0].flags;
894 if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
897 if (q_vector->arm_wb_state)
900 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
901 val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
902 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
905 I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
908 val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
909 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
911 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
913 q_vector->arm_wb_state = true;
917 * i40e_force_wb - Issue SW Interrupt so HW does a wb
918 * @vsi: the VSI we care about
919 * @q_vector: the vector on which to force writeback
922 void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
924 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
925 u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
926 I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
927 I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
928 I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK;
929 /* allow 00 to be written to the index */
932 I40E_PFINT_DYN_CTLN(q_vector->v_idx +
933 vsi->base_vector - 1), val);
935 u32 val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
936 I40E_PFINT_DYN_CTL0_ITR_INDX_MASK | /* set noitr */
937 I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
938 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK;
939 /* allow 00 to be written to the index */
941 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
946 * i40e_set_new_dynamic_itr - Find new ITR level
947 * @rc: structure containing ring performance data
949 * Returns true if ITR changed, false if not
951 * Stores a new ITR value based on packets and byte counts during
952 * the last interrupt. The advantage of per interrupt computation
953 * is faster updates and more accurate ITR for the current traffic
954 * pattern. Constants in this function were computed based on
955 * theoretical maximum wire speed and thresholds were set based on
956 * testing data as well as attempting to minimize response time
957 * while increasing bulk throughput.
959 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
961 enum i40e_latency_range new_latency_range = rc->latency_range;
962 u32 new_itr = rc->itr;
964 unsigned int usecs, estimated_usecs;
966 if (rc->total_packets == 0 || !rc->itr)
969 usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
970 bytes_per_int = rc->total_bytes / usecs;
972 /* The calculations in this algorithm depend on interrupts actually
973 * firing at the ITR rate. This may not happen if the packet rate is
974 * really low, or if we've been napi polling. Check to make sure
975 * that's not the case before we continue.
977 estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
978 if (estimated_usecs > usecs) {
979 new_latency_range = I40E_LOW_LATENCY;
983 /* simple throttlerate management
984 * 0-10MB/s lowest (50000 ints/s)
985 * 10-20MB/s low (20000 ints/s)
986 * 20-1249MB/s bulk (18000 ints/s)
988 * The math works out because the divisor is in 10^(-6) which
989 * turns the bytes/us input value into MB/s values, but
990 * make sure to use usecs, as the register values written
991 * are in 2 usec increments in the ITR registers, and make sure
992 * to use the smoothed values that the countdown timer gives us.
994 switch (new_latency_range) {
995 case I40E_LOWEST_LATENCY:
996 if (bytes_per_int > 10)
997 new_latency_range = I40E_LOW_LATENCY;
999 case I40E_LOW_LATENCY:
1000 if (bytes_per_int > 20)
1001 new_latency_range = I40E_BULK_LATENCY;
1002 else if (bytes_per_int <= 10)
1003 new_latency_range = I40E_LOWEST_LATENCY;
1005 case I40E_BULK_LATENCY:
1007 if (bytes_per_int <= 20)
1008 new_latency_range = I40E_LOW_LATENCY;
1013 rc->latency_range = new_latency_range;
1015 switch (new_latency_range) {
1016 case I40E_LOWEST_LATENCY:
1017 new_itr = I40E_ITR_50K;
1019 case I40E_LOW_LATENCY:
1020 new_itr = I40E_ITR_20K;
1022 case I40E_BULK_LATENCY:
1023 new_itr = I40E_ITR_18K;
1029 rc->total_bytes = 0;
1030 rc->total_packets = 0;
1031 rc->last_itr_update = jiffies;
1033 if (new_itr != rc->itr) {
1041 * i40e_rx_is_programming_status - check for programming status descriptor
1042 * @qw: qword representing status_error_len in CPU ordering
1044 * The value of in the descriptor length field indicate if this
1045 * is a programming status descriptor for flow director or FCoE
1046 * by the value of I40E_RX_PROG_STATUS_DESC_LENGTH, otherwise
1047 * it is a packet descriptor.
1049 static inline bool i40e_rx_is_programming_status(u64 qw)
1051 /* The Rx filter programming status and SPH bit occupy the same
1052 * spot in the descriptor. Since we don't support packet split we
1053 * can just reuse the bit as an indication that this is a
1054 * programming status descriptor.
1056 return qw & I40E_RXD_QW1_LENGTH_SPH_MASK;
1060 * i40e_clean_programming_status - clean the programming status descriptor
1061 * @rx_ring: the rx ring that has this descriptor
1062 * @rx_desc: the rx descriptor written back by HW
1063 * @qw: qword representing status_error_len in CPU ordering
1065 * Flow director should handle FD_FILTER_STATUS to check its filter programming
1066 * status being successful or not and take actions accordingly. FCoE should
1067 * handle its context/filter programming/invalidation status and take actions.
1070 static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
1071 union i40e_rx_desc *rx_desc,
1074 u32 ntc = rx_ring->next_to_clean + 1;
1077 /* fetch, update, and store next to clean */
1078 ntc = (ntc < rx_ring->count) ? ntc : 0;
1079 rx_ring->next_to_clean = ntc;
1081 prefetch(I40E_RX_DESC(rx_ring, ntc));
1083 id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1084 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1086 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
1087 i40e_fd_handle_status(rx_ring, rx_desc, id);
1091 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1092 * @tx_ring: the tx ring to set up
1094 * Return 0 on success, negative on error
1096 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
1098 struct device *dev = tx_ring->dev;
1104 /* warn if we are about to overwrite the pointer */
1105 WARN_ON(tx_ring->tx_bi);
1106 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count;
1107 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL);
1108 if (!tx_ring->tx_bi)
1111 u64_stats_init(&tx_ring->syncp);
1113 /* round up to nearest 4K */
1114 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
1115 /* add u32 for head writeback, align after this takes care of
1116 * guaranteeing this is at least one cache line in size
1118 tx_ring->size += sizeof(u32);
1119 tx_ring->size = ALIGN(tx_ring->size, 4096);
1120 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
1121 &tx_ring->dma, GFP_KERNEL);
1122 if (!tx_ring->desc) {
1123 dev_info(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
1128 tx_ring->next_to_use = 0;
1129 tx_ring->next_to_clean = 0;
1133 kfree(tx_ring->tx_bi);
1134 tx_ring->tx_bi = NULL;
1139 * i40e_clean_rx_ring - Free Rx buffers
1140 * @rx_ring: ring to be cleaned
1142 void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
1144 unsigned long bi_size;
1147 /* ring already cleared, nothing to do */
1148 if (!rx_ring->rx_bi)
1152 dev_kfree_skb(rx_ring->skb);
1153 rx_ring->skb = NULL;
1156 /* Free all the Rx ring sk_buffs */
1157 for (i = 0; i < rx_ring->count; i++) {
1158 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
1163 /* Invalidate cache lines that may have been written to by
1164 * device so that we avoid corrupting memory.
1166 dma_sync_single_range_for_cpu(rx_ring->dev,
1169 rx_ring->rx_buf_len,
1172 /* free resources associated with mapping */
1173 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
1174 i40e_rx_pg_size(rx_ring),
1178 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
1181 rx_bi->page_offset = 0;
1184 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1185 memset(rx_ring->rx_bi, 0, bi_size);
1187 /* Zero out the descriptor ring */
1188 memset(rx_ring->desc, 0, rx_ring->size);
1190 rx_ring->next_to_alloc = 0;
1191 rx_ring->next_to_clean = 0;
1192 rx_ring->next_to_use = 0;
1196 * i40e_free_rx_resources - Free Rx resources
1197 * @rx_ring: ring to clean the resources from
1199 * Free all receive software resources
1201 void i40e_free_rx_resources(struct i40e_ring *rx_ring)
1203 i40e_clean_rx_ring(rx_ring);
1204 rx_ring->xdp_prog = NULL;
1205 kfree(rx_ring->rx_bi);
1206 rx_ring->rx_bi = NULL;
1208 if (rx_ring->desc) {
1209 dma_free_coherent(rx_ring->dev, rx_ring->size,
1210 rx_ring->desc, rx_ring->dma);
1211 rx_ring->desc = NULL;
1216 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1217 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
1219 * Returns 0 on success, negative on failure
1221 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
1223 struct device *dev = rx_ring->dev;
1226 /* warn if we are about to overwrite the pointer */
1227 WARN_ON(rx_ring->rx_bi);
1228 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
1229 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
1230 if (!rx_ring->rx_bi)
1233 u64_stats_init(&rx_ring->syncp);
1235 /* Round up to nearest 4K */
1236 rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
1237 rx_ring->size = ALIGN(rx_ring->size, 4096);
1238 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
1239 &rx_ring->dma, GFP_KERNEL);
1241 if (!rx_ring->desc) {
1242 dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
1247 rx_ring->next_to_alloc = 0;
1248 rx_ring->next_to_clean = 0;
1249 rx_ring->next_to_use = 0;
1251 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
1255 kfree(rx_ring->rx_bi);
1256 rx_ring->rx_bi = NULL;
1261 * i40e_release_rx_desc - Store the new tail and head values
1262 * @rx_ring: ring to bump
1263 * @val: new head index
1265 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
1267 rx_ring->next_to_use = val;
1269 /* update next to alloc since we have filled the ring */
1270 rx_ring->next_to_alloc = val;
1272 /* Force memory writes to complete before letting h/w
1273 * know there are new descriptors to fetch. (Only
1274 * applicable for weak-ordered memory model archs,
1278 writel(val, rx_ring->tail);
1282 * i40e_rx_offset - Return expected offset into page to access data
1283 * @rx_ring: Ring we are requesting offset of
1285 * Returns the offset value for ring into the data buffer.
1287 static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
1289 return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
1293 * i40e_alloc_mapped_page - recycle or make a new page
1294 * @rx_ring: ring to use
1295 * @bi: rx_buffer struct to modify
1297 * Returns true if the page was successfully allocated or
1300 static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
1301 struct i40e_rx_buffer *bi)
1303 struct page *page = bi->page;
1306 /* since we are recycling buffers we should seldom need to alloc */
1308 rx_ring->rx_stats.page_reuse_count++;
1312 /* alloc new page for storage */
1313 page = dev_alloc_pages(i40e_rx_pg_order(rx_ring));
1314 if (unlikely(!page)) {
1315 rx_ring->rx_stats.alloc_page_failed++;
1319 /* map page for use */
1320 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1321 i40e_rx_pg_size(rx_ring),
1325 /* if mapping failed free memory back to system since
1326 * there isn't much point in holding memory we can't use
1328 if (dma_mapping_error(rx_ring->dev, dma)) {
1329 __free_pages(page, i40e_rx_pg_order(rx_ring));
1330 rx_ring->rx_stats.alloc_page_failed++;
1336 bi->page_offset = i40e_rx_offset(rx_ring);
1338 /* initialize pagecnt_bias to 1 representing we fully own page */
1339 bi->pagecnt_bias = 1;
1345 * i40e_receive_skb - Send a completed packet up the stack
1346 * @rx_ring: rx ring in play
1347 * @skb: packet to send up
1348 * @vlan_tag: vlan tag for packet
1350 static void i40e_receive_skb(struct i40e_ring *rx_ring,
1351 struct sk_buff *skb, u16 vlan_tag)
1353 struct i40e_q_vector *q_vector = rx_ring->q_vector;
1355 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1356 (vlan_tag & VLAN_VID_MASK))
1357 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
1359 napi_gro_receive(&q_vector->napi, skb);
1363 * i40e_alloc_rx_buffers - Replace used receive buffers
1364 * @rx_ring: ring to place buffers on
1365 * @cleaned_count: number of buffers to replace
1367 * Returns false if all allocations were successful, true if any fail
1369 bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
1371 u16 ntu = rx_ring->next_to_use;
1372 union i40e_rx_desc *rx_desc;
1373 struct i40e_rx_buffer *bi;
1375 /* do nothing if no valid netdev defined */
1376 if (!rx_ring->netdev || !cleaned_count)
1379 rx_desc = I40E_RX_DESC(rx_ring, ntu);
1380 bi = &rx_ring->rx_bi[ntu];
1383 if (!i40e_alloc_mapped_page(rx_ring, bi))
1386 /* sync the buffer for use by the device */
1387 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1389 rx_ring->rx_buf_len,
1392 /* Refresh the desc even if buffer_addrs didn't change
1393 * because each write-back erases this info.
1395 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1400 if (unlikely(ntu == rx_ring->count)) {
1401 rx_desc = I40E_RX_DESC(rx_ring, 0);
1402 bi = rx_ring->rx_bi;
1406 /* clear the status bits for the next_to_use descriptor */
1407 rx_desc->wb.qword1.status_error_len = 0;
1410 } while (cleaned_count);
1412 if (rx_ring->next_to_use != ntu)
1413 i40e_release_rx_desc(rx_ring, ntu);
1418 if (rx_ring->next_to_use != ntu)
1419 i40e_release_rx_desc(rx_ring, ntu);
1421 /* make sure to come back via polling to try again after
1422 * allocation failure
1428 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1429 * @vsi: the VSI we care about
1430 * @skb: skb currently being received and modified
1431 * @rx_desc: the receive descriptor
1433 static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
1434 struct sk_buff *skb,
1435 union i40e_rx_desc *rx_desc)
1437 struct i40e_rx_ptype_decoded decoded;
1438 u32 rx_error, rx_status;
1443 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1444 ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
1445 rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
1446 I40E_RXD_QW1_ERROR_SHIFT;
1447 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1448 I40E_RXD_QW1_STATUS_SHIFT;
1449 decoded = decode_rx_desc_ptype(ptype);
1451 skb->ip_summed = CHECKSUM_NONE;
1453 skb_checksum_none_assert(skb);
1455 /* Rx csum enabled and ip headers found? */
1456 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
1459 /* did the hardware decode the packet and checksum? */
1460 if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
1463 /* both known and outer_ip must be set for the below code to work */
1464 if (!(decoded.known && decoded.outer_ip))
1467 ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1468 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
1469 ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
1470 (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
1473 (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
1474 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
1477 /* likely incorrect csum if alternate IP extension headers found */
1479 rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
1480 /* don't increment checksum err here, non-fatal err */
1483 /* there was some L4 error, count error and punt packet to the stack */
1484 if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
1487 /* handle packets that were not able to be checksummed due
1488 * to arrival speed, in this case the stack can compute
1491 if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
1494 /* If there is an outer header present that might contain a checksum
1495 * we need to bump the checksum level by 1 to reflect the fact that
1496 * we are indicating we validated the inner checksum.
1498 if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
1499 skb->csum_level = 1;
1501 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
1502 switch (decoded.inner_prot) {
1503 case I40E_RX_PTYPE_INNER_PROT_TCP:
1504 case I40E_RX_PTYPE_INNER_PROT_UDP:
1505 case I40E_RX_PTYPE_INNER_PROT_SCTP:
1506 skb->ip_summed = CHECKSUM_UNNECESSARY;
1515 vsi->back->hw_csum_rx_error++;
1519 * i40e_ptype_to_htype - get a hash type
1520 * @ptype: the ptype value from the descriptor
1522 * Returns a hash type to be used by skb_set_hash
1524 static inline int i40e_ptype_to_htype(u8 ptype)
1526 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1529 return PKT_HASH_TYPE_NONE;
1531 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1532 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1533 return PKT_HASH_TYPE_L4;
1534 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1535 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1536 return PKT_HASH_TYPE_L3;
1538 return PKT_HASH_TYPE_L2;
1542 * i40e_rx_hash - set the hash value in the skb
1543 * @ring: descriptor ring
1544 * @rx_desc: specific descriptor
1546 static inline void i40e_rx_hash(struct i40e_ring *ring,
1547 union i40e_rx_desc *rx_desc,
1548 struct sk_buff *skb,
1552 const __le64 rss_mask =
1553 cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
1554 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
1556 if (!(ring->netdev->features & NETIF_F_RXHASH))
1559 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
1560 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
1561 skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
1566 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1567 * @rx_ring: rx descriptor ring packet is being transacted on
1568 * @rx_desc: pointer to the EOP Rx descriptor
1569 * @skb: pointer to current skb being populated
1570 * @rx_ptype: the packet type decoded by hardware
1572 * This function checks the ring, descriptor, and packet information in
1573 * order to populate the hash, checksum, VLAN, protocol, and
1574 * other fields within the skb.
1577 void i40e_process_skb_fields(struct i40e_ring *rx_ring,
1578 union i40e_rx_desc *rx_desc, struct sk_buff *skb,
1581 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
1582 u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
1583 I40E_RXD_QW1_STATUS_SHIFT;
1584 u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
1585 u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
1586 I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
1588 if (unlikely(tsynvalid))
1589 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
1591 i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
1593 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
1595 skb_record_rx_queue(skb, rx_ring->queue_index);
1597 /* modifies the skb - consumes the enet header */
1598 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1602 * i40e_cleanup_headers - Correct empty headers
1603 * @rx_ring: rx descriptor ring packet is being transacted on
1604 * @skb: pointer to current skb being fixed
1605 * @rx_desc: pointer to the EOP Rx descriptor
1607 * Also address the case where we are pulling data in on pages only
1608 * and as such no data is present in the skb header.
1610 * In addition if skb is not at least 60 bytes we need to pad it so that
1611 * it is large enough to qualify as a valid Ethernet frame.
1613 * Returns true if an error was encountered and skb was freed.
1615 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
1616 union i40e_rx_desc *rx_desc)
1619 /* XDP packets use error pointer so abort at this point */
1623 /* ERR_MASK will only have valid bits if EOP set, and
1624 * what we are doing here is actually checking
1625 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1628 if (unlikely(i40e_test_staterr(rx_desc,
1629 BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
1630 dev_kfree_skb_any(skb);
1634 /* if eth_skb_pad returns an error the skb was freed */
1635 if (eth_skb_pad(skb))
1642 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1643 * @rx_ring: rx descriptor ring to store buffers on
1644 * @old_buff: donor buffer to have page reused
1646 * Synchronizes page for reuse by the adapter
1648 static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
1649 struct i40e_rx_buffer *old_buff)
1651 struct i40e_rx_buffer *new_buff;
1652 u16 nta = rx_ring->next_to_alloc;
1654 new_buff = &rx_ring->rx_bi[nta];
1656 /* update, and store next to alloc */
1658 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1660 /* transfer page from old buffer to new buffer */
1661 new_buff->dma = old_buff->dma;
1662 new_buff->page = old_buff->page;
1663 new_buff->page_offset = old_buff->page_offset;
1664 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1668 * i40e_page_is_reusable - check if any reuse is possible
1669 * @page: page struct to check
1671 * A page is not reusable if it was allocated under low memory
1672 * conditions, or it's not in the same NUMA node as this CPU.
1674 static inline bool i40e_page_is_reusable(struct page *page)
1676 return (page_to_nid(page) == numa_mem_id()) &&
1677 !page_is_pfmemalloc(page);
1681 * i40e_can_reuse_rx_page - Determine if this page can be reused by
1682 * the adapter for another receive
1684 * @rx_buffer: buffer containing the page
1686 * If page is reusable, rx_buffer->page_offset is adjusted to point to
1687 * an unused region in the page.
1689 * For small pages, @truesize will be a constant value, half the size
1690 * of the memory at page. We'll attempt to alternate between high and
1691 * low halves of the page, with one half ready for use by the hardware
1692 * and the other half being consumed by the stack. We use the page
1693 * ref count to determine whether the stack has finished consuming the
1694 * portion of this page that was passed up with a previous packet. If
1695 * the page ref count is >1, we'll assume the "other" half page is
1696 * still busy, and this page cannot be reused.
1698 * For larger pages, @truesize will be the actual space used by the
1699 * received packet (adjusted upward to an even multiple of the cache
1700 * line size). This will advance through the page by the amount
1701 * actually consumed by the received packets while there is still
1702 * space for a buffer. Each region of larger pages will be used at
1703 * most once, after which the page will not be reused.
1705 * In either case, if the page is reusable its refcount is increased.
1707 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
1709 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1710 struct page *page = rx_buffer->page;
1712 /* Is any reuse possible? */
1713 if (unlikely(!i40e_page_is_reusable(page)))
1716 #if (PAGE_SIZE < 8192)
1717 /* if we are only owner of page we can reuse it */
1718 if (unlikely((page_count(page) - pagecnt_bias) > 1))
1721 #define I40E_LAST_OFFSET \
1722 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1723 if (rx_buffer->page_offset > I40E_LAST_OFFSET)
1727 /* If we have drained the page fragment pool we need to update
1728 * the pagecnt_bias and page count so that we fully restock the
1729 * number of references the driver holds.
1731 if (unlikely(!pagecnt_bias)) {
1732 page_ref_add(page, USHRT_MAX);
1733 rx_buffer->pagecnt_bias = USHRT_MAX;
1740 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1741 * @rx_ring: rx descriptor ring to transact packets on
1742 * @rx_buffer: buffer containing page to add
1743 * @skb: sk_buff to place the data into
1744 * @size: packet length from rx_desc
1746 * This function will add the data contained in rx_buffer->page to the skb.
1747 * It will just attach the page as a frag to the skb.
1749 * The function will then update the page offset.
1751 static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
1752 struct i40e_rx_buffer *rx_buffer,
1753 struct sk_buff *skb,
1756 #if (PAGE_SIZE < 8192)
1757 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1759 unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
1762 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1763 rx_buffer->page_offset, size, truesize);
1765 /* page is being used so we must update the page offset */
1766 #if (PAGE_SIZE < 8192)
1767 rx_buffer->page_offset ^= truesize;
1769 rx_buffer->page_offset += truesize;
1774 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1775 * @rx_ring: rx descriptor ring to transact packets on
1776 * @size: size of buffer to add to skb
1778 * This function will pull an Rx buffer from the ring and synchronize it
1779 * for use by the CPU.
1781 static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
1782 const unsigned int size)
1784 struct i40e_rx_buffer *rx_buffer;
1786 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
1787 prefetchw(rx_buffer->page);
1789 /* we are reusing so sync this buffer for CPU use */
1790 dma_sync_single_range_for_cpu(rx_ring->dev,
1792 rx_buffer->page_offset,
1796 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1797 rx_buffer->pagecnt_bias--;
1803 * i40e_construct_skb - Allocate skb and populate it
1804 * @rx_ring: rx descriptor ring to transact packets on
1805 * @rx_buffer: rx buffer to pull data from
1806 * @xdp: xdp_buff pointing to the data
1808 * This function allocates an skb. It then populates it with the page
1809 * data from the current receive descriptor, taking care to set up the
1812 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1813 struct i40e_rx_buffer *rx_buffer,
1814 struct xdp_buff *xdp)
1816 unsigned int size = xdp->data_end - xdp->data;
1817 #if (PAGE_SIZE < 8192)
1818 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1820 unsigned int truesize = SKB_DATA_ALIGN(size);
1822 unsigned int headlen;
1823 struct sk_buff *skb;
1825 /* prefetch first cache line of first page */
1826 prefetch(xdp->data);
1827 #if L1_CACHE_BYTES < 128
1828 prefetch(xdp->data + L1_CACHE_BYTES);
1831 /* allocate a skb to store the frags */
1832 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1834 GFP_ATOMIC | __GFP_NOWARN);
1838 /* Determine available headroom for copy */
1840 if (headlen > I40E_RX_HDR_SIZE)
1841 headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
1843 /* align pull length to size of long to optimize memcpy performance */
1844 memcpy(__skb_put(skb, headlen), xdp->data,
1845 ALIGN(headlen, sizeof(long)));
1847 /* update all of the pointers */
1850 skb_add_rx_frag(skb, 0, rx_buffer->page,
1851 rx_buffer->page_offset + headlen,
1854 /* buffer is used by skb, update page_offset */
1855 #if (PAGE_SIZE < 8192)
1856 rx_buffer->page_offset ^= truesize;
1858 rx_buffer->page_offset += truesize;
1861 /* buffer is unused, reset bias back to rx_buffer */
1862 rx_buffer->pagecnt_bias++;
1869 * i40e_build_skb - Build skb around an existing buffer
1870 * @rx_ring: Rx descriptor ring to transact packets on
1871 * @rx_buffer: Rx buffer to pull data from
1872 * @xdp: xdp_buff pointing to the data
1874 * This function builds an skb around an existing Rx buffer, taking care
1875 * to set up the skb correctly and avoid any memcpy overhead.
1877 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
1878 struct i40e_rx_buffer *rx_buffer,
1879 struct xdp_buff *xdp)
1881 unsigned int size = xdp->data_end - xdp->data;
1882 #if (PAGE_SIZE < 8192)
1883 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
1885 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1886 SKB_DATA_ALIGN(I40E_SKB_PAD + size);
1888 struct sk_buff *skb;
1890 /* prefetch first cache line of first page */
1891 prefetch(xdp->data);
1892 #if L1_CACHE_BYTES < 128
1893 prefetch(xdp->data + L1_CACHE_BYTES);
1895 /* build an skb around the page buffer */
1896 skb = build_skb(xdp->data_hard_start, truesize);
1900 /* update pointers within the skb to store the data */
1901 skb_reserve(skb, I40E_SKB_PAD);
1902 __skb_put(skb, size);
1904 /* buffer is used by skb, update page_offset */
1905 #if (PAGE_SIZE < 8192)
1906 rx_buffer->page_offset ^= truesize;
1908 rx_buffer->page_offset += truesize;
1915 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
1916 * @rx_ring: rx descriptor ring to transact packets on
1917 * @rx_buffer: rx buffer to pull data from
1919 * This function will clean up the contents of the rx_buffer. It will
1920 * either recycle the bufer or unmap it and free the associated resources.
1922 static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
1923 struct i40e_rx_buffer *rx_buffer)
1925 if (i40e_can_reuse_rx_page(rx_buffer)) {
1926 /* hand second half of page back to the ring */
1927 i40e_reuse_rx_page(rx_ring, rx_buffer);
1928 rx_ring->rx_stats.page_reuse_count++;
1930 /* we are not reusing the buffer so unmap it */
1931 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1932 i40e_rx_pg_size(rx_ring),
1933 DMA_FROM_DEVICE, I40E_RX_DMA_ATTR);
1934 __page_frag_cache_drain(rx_buffer->page,
1935 rx_buffer->pagecnt_bias);
1938 /* clear contents of buffer_info */
1939 rx_buffer->page = NULL;
1943 * i40e_is_non_eop - process handling of non-EOP buffers
1944 * @rx_ring: Rx ring being processed
1945 * @rx_desc: Rx descriptor for current buffer
1946 * @skb: Current socket buffer containing buffer in progress
1948 * This function updates next to clean. If the buffer is an EOP buffer
1949 * this function exits returning false, otherwise it will place the
1950 * sk_buff in the next buffer to be chained and return true indicating
1951 * that this is in fact a non-EOP buffer.
1953 static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
1954 union i40e_rx_desc *rx_desc,
1955 struct sk_buff *skb)
1957 u32 ntc = rx_ring->next_to_clean + 1;
1959 /* fetch, update, and store next to clean */
1960 ntc = (ntc < rx_ring->count) ? ntc : 0;
1961 rx_ring->next_to_clean = ntc;
1963 prefetch(I40E_RX_DESC(rx_ring, ntc));
1965 /* if we are the last buffer then there is nothing else to do */
1966 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1967 if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
1970 rx_ring->rx_stats.non_eop_descs++;
1975 #define I40E_XDP_PASS 0
1976 #define I40E_XDP_CONSUMED 1
1977 #define I40E_XDP_TX 2
1979 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
1980 struct i40e_ring *xdp_ring);
1983 * i40e_run_xdp - run an XDP program
1984 * @rx_ring: Rx ring being processed
1985 * @xdp: XDP buffer containing the frame
1987 static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
1988 struct xdp_buff *xdp)
1990 int result = I40E_XDP_PASS;
1991 struct i40e_ring *xdp_ring;
1992 struct bpf_prog *xdp_prog;
1996 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
2001 act = bpf_prog_run_xdp(xdp_prog, xdp);
2006 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2007 result = i40e_xmit_xdp_ring(xdp, xdp_ring);
2010 bpf_warn_invalid_xdp_action(act);
2012 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
2013 /* fallthrough -- handle aborts by dropping packet */
2015 result = I40E_XDP_CONSUMED;
2020 return ERR_PTR(-result);
2024 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2026 * @rx_buffer: Rx buffer to adjust
2027 * @size: Size of adjustment
2029 static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring,
2030 struct i40e_rx_buffer *rx_buffer,
2033 #if (PAGE_SIZE < 8192)
2034 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
2036 rx_buffer->page_offset ^= truesize;
2038 unsigned int truesize = SKB_DATA_ALIGN(i40e_rx_offset(rx_ring) + size);
2040 rx_buffer->page_offset += truesize;
2045 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2046 * @rx_ring: rx descriptor ring to transact packets on
2047 * @budget: Total limit on number of packets to process
2049 * This function provides a "bounce buffer" approach to Rx interrupt
2050 * processing. The advantage to this is that on systems that have
2051 * expensive overhead for IOMMU access this provides a means of avoiding
2052 * it by maintaining the mapping of the page to the system.
2054 * Returns amount of work completed
2056 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2058 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2059 struct sk_buff *skb = rx_ring->skb;
2060 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2061 bool failure = false, xdp_xmit = false;
2063 while (likely(total_rx_packets < (unsigned int)budget)) {
2064 struct i40e_rx_buffer *rx_buffer;
2065 union i40e_rx_desc *rx_desc;
2066 struct xdp_buff xdp;
2072 /* return some buffers to hardware, one at a time is too slow */
2073 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
2074 failure = failure ||
2075 i40e_alloc_rx_buffers(rx_ring, cleaned_count);
2079 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
2081 /* status_error_len will always be zero for unused descriptors
2082 * because it's cleared in cleanup, and overlaps with hdr_addr
2083 * which is always zero because packet split isn't used, if the
2084 * hardware wrote DD then the length will be non-zero
2086 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2088 /* This memory barrier is needed to keep us from reading
2089 * any other fields out of the rx_desc until we have
2090 * verified the descriptor has been written back.
2094 if (unlikely(i40e_rx_is_programming_status(qword))) {
2095 i40e_clean_programming_status(rx_ring, rx_desc, qword);
2098 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
2099 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
2103 i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
2104 rx_buffer = i40e_get_rx_buffer(rx_ring, size);
2106 /* retrieve a buffer from the ring */
2108 xdp.data = page_address(rx_buffer->page) +
2109 rx_buffer->page_offset;
2110 xdp.data_hard_start = xdp.data -
2111 i40e_rx_offset(rx_ring);
2112 xdp.data_end = xdp.data + size;
2114 skb = i40e_run_xdp(rx_ring, &xdp);
2118 if (PTR_ERR(skb) == -I40E_XDP_TX) {
2120 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2122 rx_buffer->pagecnt_bias++;
2124 total_rx_bytes += size;
2127 i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
2128 } else if (ring_uses_build_skb(rx_ring)) {
2129 skb = i40e_build_skb(rx_ring, rx_buffer, &xdp);
2131 skb = i40e_construct_skb(rx_ring, rx_buffer, &xdp);
2134 /* exit if we failed to retrieve a buffer */
2136 rx_ring->rx_stats.alloc_buff_failed++;
2137 rx_buffer->pagecnt_bias++;
2141 i40e_put_rx_buffer(rx_ring, rx_buffer);
2144 if (i40e_is_non_eop(rx_ring, rx_desc, skb))
2147 if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
2152 /* probably a little skewed due to removing CRC */
2153 total_rx_bytes += skb->len;
2155 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
2156 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
2157 I40E_RXD_QW1_PTYPE_SHIFT;
2159 /* populate checksum, VLAN, and protocol */
2160 i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
2162 vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
2163 le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
2165 i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
2166 i40e_receive_skb(rx_ring, skb, vlan_tag);
2169 /* update budget accounting */
2174 struct i40e_ring *xdp_ring;
2176 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2178 /* Force memory writes to complete before letting h/w
2179 * know there are new descriptors to fetch.
2183 writel(xdp_ring->next_to_use, xdp_ring->tail);
2188 u64_stats_update_begin(&rx_ring->syncp);
2189 rx_ring->stats.packets += total_rx_packets;
2190 rx_ring->stats.bytes += total_rx_bytes;
2191 u64_stats_update_end(&rx_ring->syncp);
2192 rx_ring->q_vector->rx.total_packets += total_rx_packets;
2193 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
2195 /* guarantee a trip back through this routine if there was a failure */
2196 return failure ? budget : (int)total_rx_packets;
2199 static u32 i40e_buildreg_itr(const int type, const u16 itr)
2203 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
2204 /* Don't clear PBA because that can cause lost interrupts that
2205 * came in while we were cleaning/polling
2207 (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
2208 (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
2213 /* a small macro to shorten up some long lines */
2214 #define INTREG I40E_PFINT_DYN_CTLN
2215 static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
2217 return vsi->rx_rings[idx]->rx_itr_setting;
2220 static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
2222 return vsi->tx_rings[idx]->tx_itr_setting;
2226 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2227 * @vsi: the VSI we care about
2228 * @q_vector: q_vector for which itr is being updated and interrupt enabled
2231 static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
2232 struct i40e_q_vector *q_vector)
2234 struct i40e_hw *hw = &vsi->back->hw;
2235 bool rx = false, tx = false;
2238 int idx = q_vector->v_idx;
2239 int rx_itr_setting, tx_itr_setting;
2241 /* If we don't have MSIX, then we only need to re-enable icr0 */
2242 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
2243 i40e_irq_dynamic_enable_icr0(vsi->back, false);
2247 vector = (q_vector->v_idx + vsi->base_vector);
2249 /* avoid dynamic calculation if in countdown mode OR if
2250 * all dynamic is disabled
2252 rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
2254 rx_itr_setting = get_rx_itr(vsi, idx);
2255 tx_itr_setting = get_tx_itr(vsi, idx);
2257 if (q_vector->itr_countdown > 0 ||
2258 (!ITR_IS_DYNAMIC(rx_itr_setting) &&
2259 !ITR_IS_DYNAMIC(tx_itr_setting))) {
2263 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2264 rx = i40e_set_new_dynamic_itr(&q_vector->rx);
2265 rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
2268 if (ITR_IS_DYNAMIC(tx_itr_setting)) {
2269 tx = i40e_set_new_dynamic_itr(&q_vector->tx);
2270 txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
2274 /* get the higher of the two ITR adjustments and
2275 * use the same value for both ITR registers
2276 * when in adaptive mode (Rx and/or Tx)
2278 u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
2280 q_vector->tx.itr = q_vector->rx.itr = itr;
2281 txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
2283 rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
2287 /* only need to enable the interrupt once, but need
2288 * to possibly update both ITR values
2291 /* set the INTENA_MSK_MASK so that this first write
2292 * won't actually enable the interrupt, instead just
2293 * updating the ITR (it's bit 31 PF and VF)
2296 /* don't check _DOWN because interrupt isn't being enabled */
2297 wr32(hw, INTREG(vector - 1), rxval);
2301 if (!test_bit(__I40E_VSI_DOWN, vsi->state))
2302 wr32(hw, INTREG(vector - 1), txval);
2304 if (q_vector->itr_countdown)
2305 q_vector->itr_countdown--;
2307 q_vector->itr_countdown = ITR_COUNTDOWN_START;
2311 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2312 * @napi: napi struct with our devices info in it
2313 * @budget: amount of work driver is allowed to do this pass, in packets
2315 * This function will clean all queues associated with a q_vector.
2317 * Returns the amount of work done
2319 int i40e_napi_poll(struct napi_struct *napi, int budget)
2321 struct i40e_q_vector *q_vector =
2322 container_of(napi, struct i40e_q_vector, napi);
2323 struct i40e_vsi *vsi = q_vector->vsi;
2324 struct i40e_ring *ring;
2325 bool clean_complete = true;
2326 bool arm_wb = false;
2327 int budget_per_ring;
2330 if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
2331 napi_complete(napi);
2335 /* Since the actual Tx work is minimal, we can give the Tx a larger
2336 * budget and be more aggressive about cleaning up the Tx descriptors.
2338 i40e_for_each_ring(ring, q_vector->tx) {
2339 if (!i40e_clean_tx_irq(vsi, ring, budget)) {
2340 clean_complete = false;
2343 arm_wb |= ring->arm_wb;
2344 ring->arm_wb = false;
2347 /* Handle case where we are called by netpoll with a budget of 0 */
2351 /* We attempt to distribute budget to each Rx queue fairly, but don't
2352 * allow the budget to go below 1 because that would exit polling early.
2354 budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
2356 i40e_for_each_ring(ring, q_vector->rx) {
2357 int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
2359 work_done += cleaned;
2360 /* if we clean as many as budgeted, we must not be done */
2361 if (cleaned >= budget_per_ring)
2362 clean_complete = false;
2365 /* If work not completed, return budget and polling will return */
2366 if (!clean_complete) {
2367 int cpu_id = smp_processor_id();
2369 /* It is possible that the interrupt affinity has changed but,
2370 * if the cpu is pegged at 100%, polling will never exit while
2371 * traffic continues and the interrupt will be stuck on this
2372 * cpu. We check to make sure affinity is correct before we
2373 * continue to poll, otherwise we must stop polling so the
2374 * interrupt can move to the correct cpu.
2376 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) {
2377 /* Tell napi that we are done polling */
2378 napi_complete_done(napi, work_done);
2380 /* Force an interrupt */
2381 i40e_force_wb(vsi, q_vector);
2383 /* Return budget-1 so that polling stops */
2388 q_vector->tx.ring[0].tx_stats.tx_force_wb++;
2389 i40e_enable_wb_on_itr(vsi, q_vector);
2394 if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
2395 q_vector->arm_wb_state = false;
2397 /* Work is done so exit the polling mode and re-enable the interrupt */
2398 napi_complete_done(napi, work_done);
2400 i40e_update_enable_itr(vsi, q_vector);
2402 return min(work_done, budget - 1);
2406 * i40e_atr - Add a Flow Director ATR filter
2407 * @tx_ring: ring to add programming descriptor to
2409 * @tx_flags: send tx flags
2411 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
2414 struct i40e_filter_program_desc *fdir_desc;
2415 struct i40e_pf *pf = tx_ring->vsi->back;
2417 unsigned char *network;
2419 struct ipv6hdr *ipv6;
2423 u32 flex_ptype, dtype_cmd;
2427 /* make sure ATR is enabled */
2428 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
2431 if (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)
2434 /* if sampling is disabled do nothing */
2435 if (!tx_ring->atr_sample_rate)
2438 /* Currently only IPv4/IPv6 with TCP is supported */
2439 if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
2442 /* snag network header to get L4 type and address */
2443 hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ?
2444 skb_inner_network_header(skb) : skb_network_header(skb);
2446 /* Note: tx_flags gets modified to reflect inner protocols in
2447 * tx_enable_csum function if encap is enabled.
2449 if (tx_flags & I40E_TX_FLAGS_IPV4) {
2450 /* access ihl as u8 to avoid unaligned access on ia64 */
2451 hlen = (hdr.network[0] & 0x0F) << 2;
2452 l4_proto = hdr.ipv4->protocol;
2454 /* find the start of the innermost ipv6 header */
2455 unsigned int inner_hlen = hdr.network - skb->data;
2456 unsigned int h_offset = inner_hlen;
2458 /* this function updates h_offset to the end of the header */
2460 ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL);
2461 /* hlen will contain our best estimate of the tcp header */
2462 hlen = h_offset - inner_hlen;
2465 if (l4_proto != IPPROTO_TCP)
2468 th = (struct tcphdr *)(hdr.network + hlen);
2470 /* Due to lack of space, no more new filters can be programmed */
2471 if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED))
2473 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) {
2474 /* HW ATR eviction will take care of removing filters on FIN
2477 if (th->fin || th->rst)
2481 tx_ring->atr_count++;
2483 /* sample on all syn/fin/rst packets or once every atr sample rate */
2487 (tx_ring->atr_count < tx_ring->atr_sample_rate))
2490 tx_ring->atr_count = 0;
2492 /* grab the next descriptor */
2493 i = tx_ring->next_to_use;
2494 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
2497 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2499 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
2500 I40E_TXD_FLTR_QW0_QINDEX_MASK;
2501 flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ?
2502 (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
2503 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
2504 (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
2505 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
2507 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
2509 dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
2511 dtype_cmd |= (th->fin || th->rst) ?
2512 (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
2513 I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
2514 (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
2515 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
2517 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
2518 I40E_TXD_FLTR_QW1_DEST_SHIFT;
2520 dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
2521 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
2523 dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
2524 if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL))
2526 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
2527 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2528 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2531 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
2532 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
2533 I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
2535 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED)
2536 dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK;
2538 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
2539 fdir_desc->rsvd = cpu_to_le32(0);
2540 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
2541 fdir_desc->fd_id = cpu_to_le32(0);
2545 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
2547 * @tx_ring: ring to send buffer on
2548 * @flags: the tx flags to be set
2550 * Checks the skb and set up correspondingly several generic transmit flags
2551 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2553 * Returns error code indicate the frame should be dropped upon error and the
2554 * otherwise returns 0 to indicate the flags has been set properly.
2556 static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
2557 struct i40e_ring *tx_ring,
2560 __be16 protocol = skb->protocol;
2563 if (protocol == htons(ETH_P_8021Q) &&
2564 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
2565 /* When HW VLAN acceleration is turned off by the user the
2566 * stack sets the protocol to 8021q so that the driver
2567 * can take any steps required to support the SW only
2568 * VLAN handling. In our case the driver doesn't need
2569 * to take any further steps so just set the protocol
2570 * to the encapsulated ethertype.
2572 skb->protocol = vlan_get_protocol(skb);
2576 /* if we have a HW VLAN tag being added, default to the HW one */
2577 if (skb_vlan_tag_present(skb)) {
2578 tx_flags |= skb_vlan_tag_get(skb) << I40E_TX_FLAGS_VLAN_SHIFT;
2579 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2580 /* else if it is a SW VLAN, check the next protocol and store the tag */
2581 } else if (protocol == htons(ETH_P_8021Q)) {
2582 struct vlan_hdr *vhdr, _vhdr;
2584 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
2588 protocol = vhdr->h_vlan_encapsulated_proto;
2589 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT;
2590 tx_flags |= I40E_TX_FLAGS_SW_VLAN;
2593 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED))
2596 /* Insert 802.1p priority into VLAN header */
2597 if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
2598 (skb->priority != TC_PRIO_CONTROL)) {
2599 tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
2600 tx_flags |= (skb->priority & 0x7) <<
2601 I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
2602 if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
2603 struct vlan_ethhdr *vhdr;
2606 rc = skb_cow_head(skb, 0);
2609 vhdr = (struct vlan_ethhdr *)skb->data;
2610 vhdr->h_vlan_TCI = htons(tx_flags >>
2611 I40E_TX_FLAGS_VLAN_SHIFT);
2613 tx_flags |= I40E_TX_FLAGS_HW_VLAN;
2623 * i40e_tso - set up the tso context descriptor
2624 * @first: pointer to first Tx buffer for xmit
2625 * @hdr_len: ptr to the size of the packet header
2626 * @cd_type_cmd_tso_mss: Quad Word 1
2628 * Returns 0 if no TSO can happen, 1 if tso is going, or error
2630 static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
2631 u64 *cd_type_cmd_tso_mss)
2633 struct sk_buff *skb = first->skb;
2634 u64 cd_cmd, cd_tso_len, cd_mss;
2645 u32 paylen, l4_offset;
2646 u16 gso_segs, gso_size;
2649 if (skb->ip_summed != CHECKSUM_PARTIAL)
2652 if (!skb_is_gso(skb))
2655 err = skb_cow_head(skb, 0);
2659 ip.hdr = skb_network_header(skb);
2660 l4.hdr = skb_transport_header(skb);
2662 /* initialize outer IP header fields */
2663 if (ip.v4->version == 4) {
2667 ip.v6->payload_len = 0;
2670 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2674 SKB_GSO_UDP_TUNNEL |
2675 SKB_GSO_UDP_TUNNEL_CSUM)) {
2676 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2677 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2680 /* determine offset of outer transport header */
2681 l4_offset = l4.hdr - skb->data;
2683 /* remove payload length from outer checksum */
2684 paylen = skb->len - l4_offset;
2685 csum_replace_by_diff(&l4.udp->check,
2686 (__force __wsum)htonl(paylen));
2689 /* reset pointers to inner headers */
2690 ip.hdr = skb_inner_network_header(skb);
2691 l4.hdr = skb_inner_transport_header(skb);
2693 /* initialize inner IP header fields */
2694 if (ip.v4->version == 4) {
2698 ip.v6->payload_len = 0;
2702 /* determine offset of inner transport header */
2703 l4_offset = l4.hdr - skb->data;
2705 /* remove payload length from inner checksum */
2706 paylen = skb->len - l4_offset;
2707 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
2709 /* compute length of segmentation header */
2710 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
2712 /* pull values out of skb_shinfo */
2713 gso_size = skb_shinfo(skb)->gso_size;
2714 gso_segs = skb_shinfo(skb)->gso_segs;
2716 /* update GSO size and bytecount with header size */
2717 first->gso_segs = gso_segs;
2718 first->bytecount += (first->gso_segs - 1) * *hdr_len;
2720 /* find the field values */
2721 cd_cmd = I40E_TX_CTX_DESC_TSO;
2722 cd_tso_len = skb->len - *hdr_len;
2724 *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
2725 (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
2726 (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
2731 * i40e_tsyn - set up the tsyn context descriptor
2732 * @tx_ring: ptr to the ring to send
2733 * @skb: ptr to the skb we're sending
2734 * @tx_flags: the collected send information
2735 * @cd_type_cmd_tso_mss: Quad Word 1
2737 * Returns 0 if no Tx timestamp can happen and 1 if the timestamp will happen
2739 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
2740 u32 tx_flags, u64 *cd_type_cmd_tso_mss)
2744 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2747 /* Tx timestamps cannot be sampled when doing TSO */
2748 if (tx_flags & I40E_TX_FLAGS_TSO)
2751 /* only timestamp the outbound packet if the user has requested it and
2752 * we are not already transmitting a packet to be timestamped
2754 pf = i40e_netdev_to_pf(tx_ring->netdev);
2755 if (!(pf->flags & I40E_FLAG_PTP))
2759 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) {
2760 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2761 pf->ptp_tx_start = jiffies;
2762 pf->ptp_tx_skb = skb_get(skb);
2764 pf->tx_hwtstamp_skipped++;
2768 *cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
2769 I40E_TXD_CTX_QW1_CMD_SHIFT;
2775 * i40e_tx_enable_csum - Enable Tx checksum offloads
2777 * @tx_flags: pointer to Tx flags currently set
2778 * @td_cmd: Tx descriptor command bits to set
2779 * @td_offset: Tx descriptor header offsets to set
2780 * @tx_ring: Tx descriptor ring
2781 * @cd_tunneling: ptr to context desc bits
2783 static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
2784 u32 *td_cmd, u32 *td_offset,
2785 struct i40e_ring *tx_ring,
2798 unsigned char *exthdr;
2799 u32 offset, cmd = 0;
2803 if (skb->ip_summed != CHECKSUM_PARTIAL)
2806 ip.hdr = skb_network_header(skb);
2807 l4.hdr = skb_transport_header(skb);
2809 /* compute outer L2 header size */
2810 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
2812 if (skb->encapsulation) {
2814 /* define outer network header type */
2815 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2816 tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2817 I40E_TX_CTX_EXT_IP_IPV4 :
2818 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
2820 l4_proto = ip.v4->protocol;
2821 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2822 tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
2824 exthdr = ip.hdr + sizeof(*ip.v6);
2825 l4_proto = ip.v6->nexthdr;
2826 if (l4.hdr != exthdr)
2827 ipv6_skip_exthdr(skb, exthdr - skb->data,
2828 &l4_proto, &frag_off);
2831 /* define outer transport */
2834 tunnel |= I40E_TXD_CTX_UDP_TUNNELING;
2835 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2838 tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
2839 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2843 *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
2844 l4.hdr = skb_inner_network_header(skb);
2847 if (*tx_flags & I40E_TX_FLAGS_TSO)
2850 skb_checksum_help(skb);
2854 /* compute outer L3 header size */
2855 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
2856 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
2858 /* switch IP header pointer from outer to inner header */
2859 ip.hdr = skb_inner_network_header(skb);
2861 /* compute tunnel header size */
2862 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
2863 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
2865 /* indicate if we need to offload outer UDP header */
2866 if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
2867 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2868 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
2869 tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
2871 /* record tunnel offload values */
2872 *cd_tunneling |= tunnel;
2874 /* switch L4 header pointer from outer to inner */
2875 l4.hdr = skb_inner_transport_header(skb);
2878 /* reset type as we transition from outer to inner headers */
2879 *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6);
2880 if (ip.v4->version == 4)
2881 *tx_flags |= I40E_TX_FLAGS_IPV4;
2882 if (ip.v6->version == 6)
2883 *tx_flags |= I40E_TX_FLAGS_IPV6;
2886 /* Enable IP checksum offloads */
2887 if (*tx_flags & I40E_TX_FLAGS_IPV4) {
2888 l4_proto = ip.v4->protocol;
2889 /* the stack computes the IP header already, the only time we
2890 * need the hardware to recompute it is in the case of TSO.
2892 cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
2893 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM :
2894 I40E_TX_DESC_CMD_IIPT_IPV4;
2895 } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
2896 cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
2898 exthdr = ip.hdr + sizeof(*ip.v6);
2899 l4_proto = ip.v6->nexthdr;
2900 if (l4.hdr != exthdr)
2901 ipv6_skip_exthdr(skb, exthdr - skb->data,
2902 &l4_proto, &frag_off);
2905 /* compute inner L3 header size */
2906 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
2908 /* Enable L4 checksum offloads */
2911 /* enable checksum offloads */
2912 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
2913 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2916 /* enable SCTP checksum offload */
2917 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
2918 offset |= (sizeof(struct sctphdr) >> 2) <<
2919 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2922 /* enable UDP checksum offload */
2923 cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP;
2924 offset |= (sizeof(struct udphdr) >> 2) <<
2925 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
2928 if (*tx_flags & I40E_TX_FLAGS_TSO)
2930 skb_checksum_help(skb);
2935 *td_offset |= offset;
2941 * i40e_create_tx_ctx Build the Tx context descriptor
2942 * @tx_ring: ring to create the descriptor on
2943 * @cd_type_cmd_tso_mss: Quad Word 1
2944 * @cd_tunneling: Quad Word 0 - bits 0-31
2945 * @cd_l2tag2: Quad Word 0 - bits 32-63
2947 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
2948 const u64 cd_type_cmd_tso_mss,
2949 const u32 cd_tunneling, const u32 cd_l2tag2)
2951 struct i40e_tx_context_desc *context_desc;
2952 int i = tx_ring->next_to_use;
2954 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
2955 !cd_tunneling && !cd_l2tag2)
2958 /* grab the next descriptor */
2959 context_desc = I40E_TX_CTXTDESC(tx_ring, i);
2962 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2964 /* cpu_to_le32 and assign to struct fields */
2965 context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
2966 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
2967 context_desc->rsvd = cpu_to_le16(0);
2968 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
2972 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
2973 * @tx_ring: the ring to be checked
2974 * @size: the size buffer we want to assure is available
2976 * Returns -EBUSY if a stop is needed, else 0
2978 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2980 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
2981 /* Memory barrier before checking head and tail */
2984 /* Check again in a case another CPU has just made room available. */
2985 if (likely(I40E_DESC_UNUSED(tx_ring) < size))
2988 /* A reprieve! - use start_queue because it doesn't call schedule */
2989 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
2990 ++tx_ring->tx_stats.restart_queue;
2995 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
2998 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
2999 * and so we need to figure out the cases where we need to linearize the skb.
3001 * For TSO we need to count the TSO header and segment payload separately.
3002 * As such we need to check cases where we have 7 fragments or more as we
3003 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
3004 * the segment payload in the first descriptor, and another 7 for the
3007 bool __i40e_chk_linearize(struct sk_buff *skb)
3009 const struct skb_frag_struct *frag, *stale;
3012 /* no need to check if number of frags is less than 7 */
3013 nr_frags = skb_shinfo(skb)->nr_frags;
3014 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1))
3017 /* We need to walk through the list and validate that each group
3018 * of 6 fragments totals at least gso_size.
3020 nr_frags -= I40E_MAX_BUFFER_TXD - 2;
3021 frag = &skb_shinfo(skb)->frags[0];
3023 /* Initialize size to the negative value of gso_size minus 1. We
3024 * use this as the worst case scenerio in which the frag ahead
3025 * of us only provides one byte which is why we are limited to 6
3026 * descriptors for a single transmit as the header and previous
3027 * fragment are already consuming 2 descriptors.
3029 sum = 1 - skb_shinfo(skb)->gso_size;
3031 /* Add size of frags 0 through 4 to create our initial sum */
3032 sum += skb_frag_size(frag++);
3033 sum += skb_frag_size(frag++);
3034 sum += skb_frag_size(frag++);
3035 sum += skb_frag_size(frag++);
3036 sum += skb_frag_size(frag++);
3038 /* Walk through fragments adding latest fragment, testing it, and
3039 * then removing stale fragments from the sum.
3041 stale = &skb_shinfo(skb)->frags[0];
3043 sum += skb_frag_size(frag++);
3045 /* if sum is negative we failed to make sufficient progress */
3052 sum -= skb_frag_size(stale++);
3059 * i40e_tx_map - Build the Tx descriptor
3060 * @tx_ring: ring to send buffer on
3062 * @first: first buffer info buffer to use
3063 * @tx_flags: collected send information
3064 * @hdr_len: size of the packet header
3065 * @td_cmd: the command field in the descriptor
3066 * @td_offset: offset for checksum or crc
3068 * Returns 0 on success, -1 on failure to DMA
3070 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
3071 struct i40e_tx_buffer *first, u32 tx_flags,
3072 const u8 hdr_len, u32 td_cmd, u32 td_offset)
3074 unsigned int data_len = skb->data_len;
3075 unsigned int size = skb_headlen(skb);
3076 struct skb_frag_struct *frag;
3077 struct i40e_tx_buffer *tx_bi;
3078 struct i40e_tx_desc *tx_desc;
3079 u16 i = tx_ring->next_to_use;
3084 if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
3085 td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
3086 td_tag = (tx_flags & I40E_TX_FLAGS_VLAN_MASK) >>
3087 I40E_TX_FLAGS_VLAN_SHIFT;
3090 first->tx_flags = tx_flags;
3092 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3094 tx_desc = I40E_TX_DESC(tx_ring, i);
3097 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3098 unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3100 if (dma_mapping_error(tx_ring->dev, dma))
3103 /* record length, and DMA address */
3104 dma_unmap_len_set(tx_bi, len, size);
3105 dma_unmap_addr_set(tx_bi, dma, dma);
3107 /* align size to end of page */
3108 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
3109 tx_desc->buffer_addr = cpu_to_le64(dma);
3111 while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
3112 tx_desc->cmd_type_offset_bsz =
3113 build_ctob(td_cmd, td_offset,
3120 if (i == tx_ring->count) {
3121 tx_desc = I40E_TX_DESC(tx_ring, 0);
3128 max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
3129 tx_desc->buffer_addr = cpu_to_le64(dma);
3132 if (likely(!data_len))
3135 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
3142 if (i == tx_ring->count) {
3143 tx_desc = I40E_TX_DESC(tx_ring, 0);
3147 size = skb_frag_size(frag);
3150 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3153 tx_bi = &tx_ring->tx_bi[i];
3156 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
3159 if (i == tx_ring->count)
3162 tx_ring->next_to_use = i;
3164 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
3166 /* write last descriptor with EOP bit */
3167 td_cmd |= I40E_TX_DESC_CMD_EOP;
3169 /* We can OR these values together as they both are checked against
3170 * 4 below and at this point desc_count will be used as a boolean value
3171 * after this if/else block.
3173 desc_count |= ++tx_ring->packet_stride;
3175 /* Algorithm to optimize tail and RS bit setting:
3176 * if queue is stopped
3178 * reset packet counter
3179 * else if xmit_more is supported and is true
3180 * advance packet counter to 4
3181 * reset desc_count to 0
3183 * if desc_count >= 4
3185 * reset packet counter
3189 * Note: If there are less than 4 descriptors
3190 * pending and interrupts were disabled the service task will
3191 * trigger a force WB.
3193 if (netif_xmit_stopped(txring_txq(tx_ring))) {
3195 } else if (skb->xmit_more) {
3196 /* set stride to arm on next packet and reset desc_count */
3197 tx_ring->packet_stride = WB_STRIDE;
3199 } else if (desc_count >= WB_STRIDE) {
3201 /* write last descriptor with RS bit set */
3202 td_cmd |= I40E_TX_DESC_CMD_RS;
3203 tx_ring->packet_stride = 0;
3206 tx_desc->cmd_type_offset_bsz =
3207 build_ctob(td_cmd, td_offset, size, td_tag);
3209 /* Force memory writes to complete before letting h/w know there
3210 * are new descriptors to fetch.
3212 * We also use this memory barrier to make certain all of the
3213 * status bits have been updated before next_to_watch is written.
3217 /* set next_to_watch value indicating a packet is present */
3218 first->next_to_watch = tx_desc;
3220 /* notify HW of packet */
3222 writel(i, tx_ring->tail);
3224 /* we need this if more than one processor can write to our tail
3225 * at a time, it synchronizes IO on IA64/Altix systems
3233 dev_info(tx_ring->dev, "TX DMA map failed\n");
3235 /* clear dma mappings for failed tx_bi map */
3237 tx_bi = &tx_ring->tx_bi[i];
3238 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi);
3246 tx_ring->next_to_use = i;
3252 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3253 * @xdp: data to transmit
3254 * @xdp_ring: XDP Tx ring
3256 static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
3257 struct i40e_ring *xdp_ring)
3259 u32 size = xdp->data_end - xdp->data;
3260 u16 i = xdp_ring->next_to_use;
3261 struct i40e_tx_buffer *tx_bi;
3262 struct i40e_tx_desc *tx_desc;
3265 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
3266 xdp_ring->tx_stats.tx_busy++;
3267 return I40E_XDP_CONSUMED;
3270 dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE);
3271 if (dma_mapping_error(xdp_ring->dev, dma))
3272 return I40E_XDP_CONSUMED;
3274 tx_bi = &xdp_ring->tx_bi[i];
3275 tx_bi->bytecount = size;
3276 tx_bi->gso_segs = 1;
3277 tx_bi->raw_buf = xdp->data;
3279 /* record length, and DMA address */
3280 dma_unmap_len_set(tx_bi, len, size);
3281 dma_unmap_addr_set(tx_bi, dma, dma);
3283 tx_desc = I40E_TX_DESC(xdp_ring, i);
3284 tx_desc->buffer_addr = cpu_to_le64(dma);
3285 tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC
3289 /* Make certain all of the status bits have been updated
3290 * before next_to_watch is written.
3295 if (i == xdp_ring->count)
3298 tx_bi->next_to_watch = tx_desc;
3299 xdp_ring->next_to_use = i;
3305 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3307 * @tx_ring: ring to send buffer on
3309 * Returns NETDEV_TX_OK if sent, else an error code
3311 static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
3312 struct i40e_ring *tx_ring)
3314 u64 cd_type_cmd_tso_mss = I40E_TX_DESC_DTYPE_CONTEXT;
3315 u32 cd_tunneling = 0, cd_l2tag2 = 0;
3316 struct i40e_tx_buffer *first;
3325 /* prefetch the data, we'll need it later */
3326 prefetch(skb->data);
3328 i40e_trace(xmit_frame_ring, skb, tx_ring);
3330 count = i40e_xmit_descriptor_count(skb);
3331 if (i40e_chk_linearize(skb, count)) {
3332 if (__skb_linearize(skb)) {
3333 dev_kfree_skb_any(skb);
3334 return NETDEV_TX_OK;
3336 count = i40e_txd_use_count(skb->len);
3337 tx_ring->tx_stats.tx_linearize++;
3340 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
3341 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
3342 * + 4 desc gap to avoid the cache line where head is,
3343 * + 1 desc for context descriptor,
3344 * otherwise try next time
3346 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
3347 tx_ring->tx_stats.tx_busy++;
3348 return NETDEV_TX_BUSY;
3351 /* record the location of the first descriptor for this packet */
3352 first = &tx_ring->tx_bi[tx_ring->next_to_use];
3354 first->bytecount = skb->len;
3355 first->gso_segs = 1;
3357 /* prepare the xmit flags */
3358 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
3361 /* obtain protocol of skb */
3362 protocol = vlan_get_protocol(skb);
3364 /* setup IPv4/IPv6 offloads */
3365 if (protocol == htons(ETH_P_IP))
3366 tx_flags |= I40E_TX_FLAGS_IPV4;
3367 else if (protocol == htons(ETH_P_IPV6))
3368 tx_flags |= I40E_TX_FLAGS_IPV6;
3370 tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
3375 tx_flags |= I40E_TX_FLAGS_TSO;
3377 /* Always offload the checksum, since it's in the data descriptor */
3378 tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
3379 tx_ring, &cd_tunneling);
3383 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
3386 tx_flags |= I40E_TX_FLAGS_TSYN;
3388 skb_tx_timestamp(skb);
3390 /* always enable CRC insertion offload */
3391 td_cmd |= I40E_TX_DESC_CMD_ICRC;
3393 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
3394 cd_tunneling, cd_l2tag2);
3396 /* Add Flow Director ATR if it's enabled.
3398 * NOTE: this must always be directly before the data descriptor.
3400 i40e_atr(tx_ring, skb, tx_flags);
3402 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
3404 goto cleanup_tx_tstamp;
3406 return NETDEV_TX_OK;
3409 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
3410 dev_kfree_skb_any(first->skb);
3413 if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
3414 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
3416 dev_kfree_skb_any(pf->ptp_tx_skb);
3417 pf->ptp_tx_skb = NULL;
3418 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
3421 return NETDEV_TX_OK;
3425 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3427 * @netdev: network interface device structure
3429 * Returns NETDEV_TX_OK if sent, else an error code
3431 netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3433 struct i40e_netdev_priv *np = netdev_priv(netdev);
3434 struct i40e_vsi *vsi = np->vsi;
3435 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
3437 /* hardware can't handle really short frames, hardware padding works
3440 if (skb_put_padto(skb, I40E_MIN_TX_LEN))
3441 return NETDEV_TX_OK;
3443 return i40e_xmit_frame_ring(skb, tx_ring);