1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #ifdef CONFIG_RFS_ACCEL
9 #include <linux/cpu_rmap.h>
10 #endif /* CONFIG_RFS_ACCEL */
11 #include <linux/ethtool.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/numa.h>
15 #include <linux/pci.h>
16 #include <linux/utsname.h>
17 #include <linux/version.h>
18 #include <linux/vmalloc.h>
21 #include "ena_netdev.h"
22 #include <linux/bpf_trace.h>
23 #include "ena_pci_id_tbl.h"
25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
26 MODULE_DESCRIPTION(DEVICE_NAME);
27 MODULE_LICENSE("GPL");
29 /* Time in jiffies before concluding the transmitter is hung. */
30 #define TX_TIMEOUT (5 * HZ)
32 #define ENA_NAPI_BUDGET 64
34 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
35 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
36 static int debug = -1;
37 module_param(debug, int, 0);
38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
40 static struct ena_aenq_handlers aenq_handlers;
42 static struct workqueue_struct *ena_wq;
44 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
46 static int ena_rss_init_default(struct ena_adapter *adapter);
47 static void check_for_admin_com_state(struct ena_adapter *adapter);
48 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
49 static int ena_restore_device(struct ena_adapter *adapter);
51 static void ena_init_io_rings(struct ena_adapter *adapter,
52 int first_index, int count);
53 static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
55 static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
57 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
58 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
61 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
62 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
63 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
64 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
65 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
66 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
67 int first_index, int count);
68 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
69 int first_index, int count);
70 static int ena_up(struct ena_adapter *adapter);
71 static void ena_down(struct ena_adapter *adapter);
72 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
73 struct ena_ring *rx_ring);
74 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
75 struct ena_ring *rx_ring);
76 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
77 struct ena_tx_buffer *tx_info);
78 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
79 int first_index, int count);
81 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
83 struct ena_adapter *adapter = netdev_priv(dev);
85 /* Change the state of the device to trigger reset
86 * Check that we are not in the middle or a trigger already
89 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
92 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
93 u64_stats_update_begin(&adapter->syncp);
94 adapter->dev_stats.tx_timeout++;
95 u64_stats_update_end(&adapter->syncp);
97 netif_err(adapter, tx_err, dev, "Transmit time out\n");
100 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
104 for (i = 0; i < adapter->num_io_queues; i++)
105 adapter->rx_ring[i].mtu = mtu;
108 static int ena_change_mtu(struct net_device *dev, int new_mtu)
110 struct ena_adapter *adapter = netdev_priv(dev);
113 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
115 netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
116 update_rx_ring_mtu(adapter, new_mtu);
119 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
126 static int ena_xmit_common(struct net_device *dev,
127 struct ena_ring *ring,
128 struct ena_tx_buffer *tx_info,
129 struct ena_com_tx_ctx *ena_tx_ctx,
133 struct ena_adapter *adapter = netdev_priv(dev);
136 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
138 netif_dbg(adapter, tx_queued, dev,
139 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
141 ena_com_write_sq_doorbell(ring->ena_com_io_sq);
144 /* prepare the packet's descriptors to dma engine */
145 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
148 /* In case there isn't enough space in the queue for the packet,
149 * we simply drop it. All other failure reasons of
150 * ena_com_prepare_tx() are fatal and therefore require a device reset.
153 netif_err(adapter, tx_queued, dev,
154 "Failed to prepare tx bufs\n");
155 u64_stats_update_begin(&ring->syncp);
156 ring->tx_stats.prepare_ctx_err++;
157 u64_stats_update_end(&ring->syncp);
159 adapter->reset_reason =
160 ENA_REGS_RESET_DRIVER_INVALID_STATE;
161 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
166 u64_stats_update_begin(&ring->syncp);
167 ring->tx_stats.cnt++;
168 ring->tx_stats.bytes += bytes;
169 u64_stats_update_end(&ring->syncp);
171 tx_info->tx_descs = nb_hw_desc;
172 tx_info->last_jiffies = jiffies;
173 tx_info->print_once = 0;
175 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
180 /* This is the XDP napi callback. XDP queues use a separate napi callback
183 static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
185 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
186 u32 xdp_work_done, xdp_budget;
187 struct ena_ring *xdp_ring;
188 int napi_comp_call = 0;
191 xdp_ring = ena_napi->xdp_ring;
192 xdp_ring->first_interrupt = ena_napi->first_interrupt;
196 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
197 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
198 napi_complete_done(napi, 0);
202 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
204 /* If the device is about to reset or down, avoid unmask
205 * the interrupt and return 0 so NAPI won't reschedule
207 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
208 napi_complete_done(napi, 0);
210 } else if (xdp_budget > xdp_work_done) {
212 if (napi_complete_done(napi, xdp_work_done))
213 ena_unmask_interrupt(xdp_ring, NULL);
214 ena_update_ring_numa_node(xdp_ring, NULL);
220 u64_stats_update_begin(&xdp_ring->syncp);
221 xdp_ring->tx_stats.napi_comp += napi_comp_call;
222 xdp_ring->tx_stats.tx_poll++;
223 u64_stats_update_end(&xdp_ring->syncp);
228 static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
229 struct ena_tx_buffer *tx_info,
230 struct xdp_buff *xdp,
234 struct ena_adapter *adapter = xdp_ring->adapter;
235 struct ena_com_buf *ena_buf;
239 tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
240 size = tx_info->xdpf->len;
241 ena_buf = tx_info->bufs;
243 /* llq push buffer */
244 *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
245 *push_hdr = tx_info->xdpf->data;
247 if (size - *push_len > 0) {
248 dma = dma_map_single(xdp_ring->dev,
249 *push_hdr + *push_len,
252 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
253 goto error_report_dma_error;
255 tx_info->map_linear_data = 1;
256 tx_info->num_of_bufs = 1;
259 ena_buf->paddr = dma;
264 error_report_dma_error:
265 u64_stats_update_begin(&xdp_ring->syncp);
266 xdp_ring->tx_stats.dma_mapping_err++;
267 u64_stats_update_end(&xdp_ring->syncp);
268 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
270 xdp_return_frame_rx_napi(tx_info->xdpf);
271 tx_info->xdpf = NULL;
272 tx_info->num_of_bufs = 0;
277 static int ena_xdp_xmit_buff(struct net_device *dev,
278 struct xdp_buff *xdp,
280 struct ena_rx_buffer *rx_info)
282 struct ena_adapter *adapter = netdev_priv(dev);
283 struct ena_com_tx_ctx ena_tx_ctx = {};
284 struct ena_tx_buffer *tx_info;
285 struct ena_ring *xdp_ring;
286 u16 next_to_use, req_id;
291 xdp_ring = &adapter->tx_ring[qid];
292 next_to_use = xdp_ring->next_to_use;
293 req_id = xdp_ring->free_ids[next_to_use];
294 tx_info = &xdp_ring->tx_buffer_info[req_id];
295 tx_info->num_of_bufs = 0;
296 page_ref_inc(rx_info->page);
297 tx_info->xdp_rx_page = rx_info->page;
299 rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
301 goto error_drop_packet;
303 ena_tx_ctx.ena_bufs = tx_info->bufs;
304 ena_tx_ctx.push_header = push_hdr;
305 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
306 ena_tx_ctx.req_id = req_id;
307 ena_tx_ctx.header_len = push_len;
309 rc = ena_xmit_common(dev,
314 xdp->data_end - xdp->data);
316 goto error_unmap_dma;
317 /* trigger the dma engine. ena_com_write_sq_doorbell()
320 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
321 u64_stats_update_begin(&xdp_ring->syncp);
322 xdp_ring->tx_stats.doorbells++;
323 u64_stats_update_end(&xdp_ring->syncp);
328 ena_unmap_tx_buff(xdp_ring, tx_info);
329 tx_info->xdpf = NULL;
331 __free_page(tx_info->xdp_rx_page);
335 static int ena_xdp_execute(struct ena_ring *rx_ring,
336 struct xdp_buff *xdp,
337 struct ena_rx_buffer *rx_info)
339 struct bpf_prog *xdp_prog;
340 u32 verdict = XDP_PASS;
344 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
349 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
351 if (verdict == XDP_TX) {
352 ena_xdp_xmit_buff(rx_ring->netdev,
354 rx_ring->qid + rx_ring->adapter->num_io_queues,
357 xdp_stat = &rx_ring->rx_stats.xdp_tx;
358 } else if (unlikely(verdict == XDP_ABORTED)) {
359 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
360 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
361 } else if (unlikely(verdict == XDP_DROP)) {
362 xdp_stat = &rx_ring->rx_stats.xdp_drop;
363 } else if (unlikely(verdict == XDP_PASS)) {
364 xdp_stat = &rx_ring->rx_stats.xdp_pass;
366 bpf_warn_invalid_xdp_action(verdict);
367 xdp_stat = &rx_ring->rx_stats.xdp_invalid;
370 u64_stats_update_begin(&rx_ring->syncp);
372 u64_stats_update_end(&rx_ring->syncp);
379 static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
381 adapter->xdp_first_ring = adapter->num_io_queues;
382 adapter->xdp_num_queues = adapter->num_io_queues;
384 ena_init_io_rings(adapter,
385 adapter->xdp_first_ring,
386 adapter->xdp_num_queues);
389 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
393 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
394 adapter->xdp_num_queues);
398 rc = ena_create_io_tx_queues_in_range(adapter,
399 adapter->xdp_first_ring,
400 adapter->xdp_num_queues);
407 ena_free_all_io_tx_resources(adapter);
412 /* Provides a way for both kernel and bpf-prog to know
413 * more about the RX-queue a given XDP frame arrived on.
415 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
419 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
422 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
423 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
428 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
432 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
433 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
435 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
442 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
444 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
445 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
448 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
449 struct bpf_prog *prog,
450 int first, int count)
452 struct ena_ring *rx_ring;
455 for (i = first; i < count; i++) {
456 rx_ring = &adapter->rx_ring[i];
457 xchg(&rx_ring->xdp_bpf_prog, prog);
459 ena_xdp_register_rxq_info(rx_ring);
460 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
462 ena_xdp_unregister_rxq_info(rx_ring);
463 rx_ring->rx_headroom = 0;
468 static void ena_xdp_exchange_program(struct ena_adapter *adapter,
469 struct bpf_prog *prog)
471 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
473 ena_xdp_exchange_program_rx_in_range(adapter,
476 adapter->num_io_queues);
479 bpf_prog_put(old_bpf_prog);
482 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
487 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
492 adapter->xdp_first_ring = 0;
493 adapter->xdp_num_queues = 0;
494 ena_xdp_exchange_program(adapter, NULL);
496 rc = ena_up(adapter);
503 static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
505 struct ena_adapter *adapter = netdev_priv(netdev);
506 struct bpf_prog *prog = bpf->prog;
507 struct bpf_prog *old_bpf_prog;
511 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
512 rc = ena_xdp_allowed(adapter);
513 if (rc == ENA_XDP_ALLOWED) {
514 old_bpf_prog = adapter->xdp_bpf_prog;
517 ena_init_all_xdp_queues(adapter);
518 } else if (!old_bpf_prog) {
520 ena_init_all_xdp_queues(adapter);
522 ena_xdp_exchange_program(adapter, prog);
524 if (is_up && !old_bpf_prog) {
525 rc = ena_up(adapter);
529 } else if (old_bpf_prog) {
530 rc = ena_destroy_and_free_all_xdp_queues(adapter);
535 prev_mtu = netdev->max_mtu;
536 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
539 netif_info(adapter, drv, adapter->netdev,
540 "XDP program is set, changing the max_mtu from %d to %d",
541 prev_mtu, netdev->max_mtu);
543 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
544 netif_err(adapter, drv, adapter->netdev,
545 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
546 netdev->mtu, ENA_XDP_MAX_MTU);
547 NL_SET_ERR_MSG_MOD(bpf->extack,
548 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
550 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
551 netif_err(adapter, drv, adapter->netdev,
552 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
553 adapter->num_io_queues, adapter->max_num_io_queues);
554 NL_SET_ERR_MSG_MOD(bpf->extack,
555 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
562 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
563 * program as well as to query the current xdp program id.
565 static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
567 switch (bpf->command) {
569 return ena_xdp_set(netdev, bpf);
576 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
578 #ifdef CONFIG_RFS_ACCEL
582 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
583 if (!adapter->netdev->rx_cpu_rmap)
585 for (i = 0; i < adapter->num_io_queues; i++) {
586 int irq_idx = ENA_IO_IRQ_IDX(i);
588 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
589 pci_irq_vector(adapter->pdev, irq_idx));
591 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
592 adapter->netdev->rx_cpu_rmap = NULL;
596 #endif /* CONFIG_RFS_ACCEL */
600 static void ena_init_io_rings_common(struct ena_adapter *adapter,
601 struct ena_ring *ring, u16 qid)
604 ring->pdev = adapter->pdev;
605 ring->dev = &adapter->pdev->dev;
606 ring->netdev = adapter->netdev;
607 ring->napi = &adapter->ena_napi[qid].napi;
608 ring->adapter = adapter;
609 ring->ena_dev = adapter->ena_dev;
610 ring->per_napi_packets = 0;
612 ring->first_interrupt = false;
613 ring->no_interrupt_event_cnt = 0;
614 u64_stats_init(&ring->syncp);
617 static void ena_init_io_rings(struct ena_adapter *adapter,
618 int first_index, int count)
620 struct ena_com_dev *ena_dev;
621 struct ena_ring *txr, *rxr;
624 ena_dev = adapter->ena_dev;
626 for (i = first_index; i < first_index + count; i++) {
627 txr = &adapter->tx_ring[i];
628 rxr = &adapter->rx_ring[i];
630 /* TX common ring state */
631 ena_init_io_rings_common(adapter, txr, i);
633 /* TX specific ring state */
634 txr->ring_size = adapter->requested_tx_ring_size;
635 txr->tx_max_header_size = ena_dev->tx_max_header_size;
636 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
637 txr->sgl_size = adapter->max_tx_sgl_size;
638 txr->smoothed_interval =
639 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
640 txr->disable_meta_caching = adapter->disable_meta_caching;
642 /* Don't init RX queues for xdp queues */
643 if (!ENA_IS_XDP_INDEX(adapter, i)) {
644 /* RX common ring state */
645 ena_init_io_rings_common(adapter, rxr, i);
647 /* RX specific ring state */
648 rxr->ring_size = adapter->requested_rx_ring_size;
649 rxr->rx_copybreak = adapter->rx_copybreak;
650 rxr->sgl_size = adapter->max_rx_sgl_size;
651 rxr->smoothed_interval =
652 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
653 rxr->empty_rx_queue = 0;
654 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
659 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
660 * @adapter: network interface device structure
663 * Return 0 on success, negative on failure
665 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
667 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
668 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
671 if (tx_ring->tx_buffer_info) {
672 netif_err(adapter, ifup,
673 adapter->netdev, "tx_buffer_info info is not NULL");
677 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
678 node = cpu_to_node(ena_irq->cpu);
680 tx_ring->tx_buffer_info = vzalloc_node(size, node);
681 if (!tx_ring->tx_buffer_info) {
682 tx_ring->tx_buffer_info = vzalloc(size);
683 if (!tx_ring->tx_buffer_info)
684 goto err_tx_buffer_info;
687 size = sizeof(u16) * tx_ring->ring_size;
688 tx_ring->free_ids = vzalloc_node(size, node);
689 if (!tx_ring->free_ids) {
690 tx_ring->free_ids = vzalloc(size);
691 if (!tx_ring->free_ids)
692 goto err_tx_free_ids;
695 size = tx_ring->tx_max_header_size;
696 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
697 if (!tx_ring->push_buf_intermediate_buf) {
698 tx_ring->push_buf_intermediate_buf = vzalloc(size);
699 if (!tx_ring->push_buf_intermediate_buf)
700 goto err_push_buf_intermediate_buf;
703 /* Req id ring for TX out of order completions */
704 for (i = 0; i < tx_ring->ring_size; i++)
705 tx_ring->free_ids[i] = i;
707 /* Reset tx statistics */
708 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
710 tx_ring->next_to_use = 0;
711 tx_ring->next_to_clean = 0;
712 tx_ring->cpu = ena_irq->cpu;
715 err_push_buf_intermediate_buf:
716 vfree(tx_ring->free_ids);
717 tx_ring->free_ids = NULL;
719 vfree(tx_ring->tx_buffer_info);
720 tx_ring->tx_buffer_info = NULL;
725 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
726 * @adapter: network interface device structure
729 * Free all transmit software resources
731 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
733 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
735 vfree(tx_ring->tx_buffer_info);
736 tx_ring->tx_buffer_info = NULL;
738 vfree(tx_ring->free_ids);
739 tx_ring->free_ids = NULL;
741 vfree(tx_ring->push_buf_intermediate_buf);
742 tx_ring->push_buf_intermediate_buf = NULL;
745 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
751 for (i = first_index; i < first_index + count; i++) {
752 rc = ena_setup_tx_resources(adapter, i);
761 netif_err(adapter, ifup, adapter->netdev,
762 "Tx queue %d: allocation failed\n", i);
764 /* rewind the index freeing the rings as we go */
765 while (first_index < i--)
766 ena_free_tx_resources(adapter, i);
770 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
771 int first_index, int count)
775 for (i = first_index; i < first_index + count; i++)
776 ena_free_tx_resources(adapter, i);
779 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
780 * @adapter: board private structure
782 * Free all transmit software resources
784 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
786 ena_free_all_io_tx_resources_in_range(adapter,
788 adapter->xdp_num_queues +
789 adapter->num_io_queues);
792 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
793 * @adapter: network interface device structure
796 * Returns 0 on success, negative on failure
798 static int ena_setup_rx_resources(struct ena_adapter *adapter,
801 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
802 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
805 if (rx_ring->rx_buffer_info) {
806 netif_err(adapter, ifup, adapter->netdev,
807 "rx_buffer_info is not NULL");
811 /* alloc extra element so in rx path
812 * we can always prefetch rx_info + 1
814 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
815 node = cpu_to_node(ena_irq->cpu);
817 rx_ring->rx_buffer_info = vzalloc_node(size, node);
818 if (!rx_ring->rx_buffer_info) {
819 rx_ring->rx_buffer_info = vzalloc(size);
820 if (!rx_ring->rx_buffer_info)
824 size = sizeof(u16) * rx_ring->ring_size;
825 rx_ring->free_ids = vzalloc_node(size, node);
826 if (!rx_ring->free_ids) {
827 rx_ring->free_ids = vzalloc(size);
828 if (!rx_ring->free_ids) {
829 vfree(rx_ring->rx_buffer_info);
830 rx_ring->rx_buffer_info = NULL;
835 /* Req id ring for receiving RX pkts out of order */
836 for (i = 0; i < rx_ring->ring_size; i++)
837 rx_ring->free_ids[i] = i;
839 /* Reset rx statistics */
840 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
842 rx_ring->next_to_clean = 0;
843 rx_ring->next_to_use = 0;
844 rx_ring->cpu = ena_irq->cpu;
849 /* ena_free_rx_resources - Free I/O Rx Resources
850 * @adapter: network interface device structure
853 * Free all receive software resources
855 static void ena_free_rx_resources(struct ena_adapter *adapter,
858 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
860 vfree(rx_ring->rx_buffer_info);
861 rx_ring->rx_buffer_info = NULL;
863 vfree(rx_ring->free_ids);
864 rx_ring->free_ids = NULL;
867 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
868 * @adapter: board private structure
870 * Return 0 on success, negative on failure
872 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
876 for (i = 0; i < adapter->num_io_queues; i++) {
877 rc = ena_setup_rx_resources(adapter, i);
886 netif_err(adapter, ifup, adapter->netdev,
887 "Rx queue %d: allocation failed\n", i);
889 /* rewind the index freeing the rings as we go */
891 ena_free_rx_resources(adapter, i);
895 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
896 * @adapter: board private structure
898 * Free all receive software resources
900 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
904 for (i = 0; i < adapter->num_io_queues; i++)
905 ena_free_rx_resources(adapter, i);
908 static int ena_alloc_rx_page(struct ena_ring *rx_ring,
909 struct ena_rx_buffer *rx_info, gfp_t gfp)
911 int headroom = rx_ring->rx_headroom;
912 struct ena_com_buf *ena_buf;
916 /* restore page offset value in case it has been changed by device */
917 rx_info->page_offset = headroom;
919 /* if previous allocated page is not used */
920 if (unlikely(rx_info->page))
923 page = alloc_page(gfp);
924 if (unlikely(!page)) {
925 u64_stats_update_begin(&rx_ring->syncp);
926 rx_ring->rx_stats.page_alloc_fail++;
927 u64_stats_update_end(&rx_ring->syncp);
931 /* To enable NIC-side port-mirroring, AKA SPAN port,
932 * we make the buffer readable from the nic as well
934 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
936 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
937 u64_stats_update_begin(&rx_ring->syncp);
938 rx_ring->rx_stats.dma_mapping_err++;
939 u64_stats_update_end(&rx_ring->syncp);
944 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
945 "Allocate page %p, rx_info %p\n", page, rx_info);
947 rx_info->page = page;
948 ena_buf = &rx_info->ena_buf;
949 ena_buf->paddr = dma + headroom;
950 ena_buf->len = ENA_PAGE_SIZE - headroom;
955 static void ena_free_rx_page(struct ena_ring *rx_ring,
956 struct ena_rx_buffer *rx_info)
958 struct page *page = rx_info->page;
959 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
961 if (unlikely(!page)) {
962 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
963 "Trying to free unallocated buffer\n");
967 dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
972 rx_info->page = NULL;
975 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
977 u16 next_to_use, req_id;
981 next_to_use = rx_ring->next_to_use;
983 for (i = 0; i < num; i++) {
984 struct ena_rx_buffer *rx_info;
986 req_id = rx_ring->free_ids[next_to_use];
988 rx_info = &rx_ring->rx_buffer_info[req_id];
990 rc = ena_alloc_rx_page(rx_ring, rx_info,
991 GFP_ATOMIC | __GFP_COMP);
992 if (unlikely(rc < 0)) {
993 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
994 "Failed to allocate buffer for rx queue %d\n",
998 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1002 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1003 "Failed to add buffer for rx queue %d\n",
1007 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1008 rx_ring->ring_size);
1011 if (unlikely(i < num)) {
1012 u64_stats_update_begin(&rx_ring->syncp);
1013 rx_ring->rx_stats.refil_partial++;
1014 u64_stats_update_end(&rx_ring->syncp);
1015 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1016 "Refilled rx qid %d with only %d buffers (from %d)\n",
1017 rx_ring->qid, i, num);
1020 /* ena_com_write_sq_doorbell issues a wmb() */
1022 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1024 rx_ring->next_to_use = next_to_use;
1029 static void ena_free_rx_bufs(struct ena_adapter *adapter,
1032 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1035 for (i = 0; i < rx_ring->ring_size; i++) {
1036 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1039 ena_free_rx_page(rx_ring, rx_info);
1043 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1044 * @adapter: board private structure
1046 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1048 struct ena_ring *rx_ring;
1049 int i, rc, bufs_num;
1051 for (i = 0; i < adapter->num_io_queues; i++) {
1052 rx_ring = &adapter->rx_ring[i];
1053 bufs_num = rx_ring->ring_size - 1;
1054 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1056 if (unlikely(rc != bufs_num))
1057 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1058 "Refilling Queue %d failed. allocated %d buffers from: %d\n",
1063 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1067 for (i = 0; i < adapter->num_io_queues; i++)
1068 ena_free_rx_bufs(adapter, i);
1071 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1072 struct ena_tx_buffer *tx_info)
1074 struct ena_com_buf *ena_buf;
1078 ena_buf = tx_info->bufs;
1079 cnt = tx_info->num_of_bufs;
1084 if (tx_info->map_linear_data) {
1085 dma_unmap_single(tx_ring->dev,
1086 dma_unmap_addr(ena_buf, paddr),
1087 dma_unmap_len(ena_buf, len),
1093 /* unmap remaining mapped pages */
1094 for (i = 0; i < cnt; i++) {
1095 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1096 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1101 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1102 * @tx_ring: TX ring for which buffers be freed
1104 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1106 bool print_once = true;
1109 for (i = 0; i < tx_ring->ring_size; i++) {
1110 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1116 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
1117 "Free uncompleted tx skb qid %d idx 0x%x\n",
1121 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
1122 "Free uncompleted tx skb qid %d idx 0x%x\n",
1126 ena_unmap_tx_buff(tx_ring, tx_info);
1128 dev_kfree_skb_any(tx_info->skb);
1130 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1134 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1136 struct ena_ring *tx_ring;
1139 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1140 tx_ring = &adapter->tx_ring[i];
1141 ena_free_tx_bufs(tx_ring);
1145 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1150 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1151 ena_qid = ENA_IO_TXQ_IDX(i);
1152 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1156 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1161 for (i = 0; i < adapter->num_io_queues; i++) {
1162 ena_qid = ENA_IO_RXQ_IDX(i);
1163 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1164 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1168 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1170 ena_destroy_all_tx_queues(adapter);
1171 ena_destroy_all_rx_queues(adapter);
1174 static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1175 struct ena_tx_buffer *tx_info, bool is_xdp)
1178 netif_err(ring->adapter,
1181 "tx_info doesn't have valid %s",
1182 is_xdp ? "xdp frame" : "skb");
1184 netif_err(ring->adapter,
1187 "Invalid req_id: %hu\n",
1190 u64_stats_update_begin(&ring->syncp);
1191 ring->tx_stats.bad_req_id++;
1192 u64_stats_update_end(&ring->syncp);
1194 /* Trigger device reset */
1195 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1196 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1200 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1202 struct ena_tx_buffer *tx_info = NULL;
1204 if (likely(req_id < tx_ring->ring_size)) {
1205 tx_info = &tx_ring->tx_buffer_info[req_id];
1206 if (likely(tx_info->skb))
1210 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1213 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1215 struct ena_tx_buffer *tx_info = NULL;
1217 if (likely(req_id < xdp_ring->ring_size)) {
1218 tx_info = &xdp_ring->tx_buffer_info[req_id];
1219 if (likely(tx_info->xdpf))
1223 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1226 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1228 struct netdev_queue *txq;
1237 next_to_clean = tx_ring->next_to_clean;
1238 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1240 while (tx_pkts < budget) {
1241 struct ena_tx_buffer *tx_info;
1242 struct sk_buff *skb;
1244 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1249 rc = validate_tx_req_id(tx_ring, req_id);
1253 tx_info = &tx_ring->tx_buffer_info[req_id];
1256 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1257 prefetch(&skb->end);
1259 tx_info->skb = NULL;
1260 tx_info->last_jiffies = 0;
1262 ena_unmap_tx_buff(tx_ring, tx_info);
1264 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1265 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1268 tx_bytes += skb->len;
1271 total_done += tx_info->tx_descs;
1273 tx_ring->free_ids[next_to_clean] = req_id;
1274 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1275 tx_ring->ring_size);
1278 tx_ring->next_to_clean = next_to_clean;
1279 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1280 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1282 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1284 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1285 "tx_poll: q %d done. total pkts: %d\n",
1286 tx_ring->qid, tx_pkts);
1288 /* need to make the rings circular update visible to
1289 * ena_start_xmit() before checking for netif_queue_stopped().
1293 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1294 ENA_TX_WAKEUP_THRESH);
1295 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1296 __netif_tx_lock(txq, smp_processor_id());
1298 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1299 ENA_TX_WAKEUP_THRESH);
1300 if (netif_tx_queue_stopped(txq) && above_thresh &&
1301 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1302 netif_tx_wake_queue(txq);
1303 u64_stats_update_begin(&tx_ring->syncp);
1304 tx_ring->tx_stats.queue_wakeup++;
1305 u64_stats_update_end(&tx_ring->syncp);
1307 __netif_tx_unlock(txq);
1313 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
1315 struct sk_buff *skb;
1318 skb = napi_get_frags(rx_ring->napi);
1320 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1321 rx_ring->rx_copybreak);
1323 if (unlikely(!skb)) {
1324 u64_stats_update_begin(&rx_ring->syncp);
1325 rx_ring->rx_stats.skb_alloc_fail++;
1326 u64_stats_update_end(&rx_ring->syncp);
1327 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1328 "Failed to allocate skb. frags: %d\n", frags);
1335 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1336 struct ena_com_rx_buf_info *ena_bufs,
1340 struct sk_buff *skb;
1341 struct ena_rx_buffer *rx_info;
1342 u16 len, req_id, buf = 0;
1345 len = ena_bufs[buf].len;
1346 req_id = ena_bufs[buf].req_id;
1348 rx_info = &rx_ring->rx_buffer_info[req_id];
1350 if (unlikely(!rx_info->page)) {
1351 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
1356 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1357 "rx_info %p page %p\n",
1358 rx_info, rx_info->page);
1360 /* save virt address of first buffer */
1361 va = page_address(rx_info->page) + rx_info->page_offset;
1365 if (len <= rx_ring->rx_copybreak) {
1366 skb = ena_alloc_skb(rx_ring, false);
1370 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1371 "RX allocated small packet. len %d. data_len %d\n",
1372 skb->len, skb->data_len);
1374 /* sync this buffer for CPU use */
1375 dma_sync_single_for_cpu(rx_ring->dev,
1376 dma_unmap_addr(&rx_info->ena_buf, paddr),
1379 skb_copy_to_linear_data(skb, va, len);
1380 dma_sync_single_for_device(rx_ring->dev,
1381 dma_unmap_addr(&rx_info->ena_buf, paddr),
1386 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1387 rx_ring->free_ids[*next_to_clean] = req_id;
1388 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1389 rx_ring->ring_size);
1393 skb = ena_alloc_skb(rx_ring, true);
1398 dma_unmap_page(rx_ring->dev,
1399 dma_unmap_addr(&rx_info->ena_buf, paddr),
1400 ENA_PAGE_SIZE, DMA_BIDIRECTIONAL);
1402 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1403 rx_info->page_offset, len, ENA_PAGE_SIZE);
1405 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1406 "RX skb updated. len %d. data_len %d\n",
1407 skb->len, skb->data_len);
1409 rx_info->page = NULL;
1411 rx_ring->free_ids[*next_to_clean] = req_id;
1413 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1414 rx_ring->ring_size);
1415 if (likely(--descs == 0))
1419 len = ena_bufs[buf].len;
1420 req_id = ena_bufs[buf].req_id;
1422 rx_info = &rx_ring->rx_buffer_info[req_id];
1428 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1429 * @adapter: structure containing adapter specific data
1430 * @ena_rx_ctx: received packet context/metadata
1431 * @skb: skb currently being received and modified
1433 static void ena_rx_checksum(struct ena_ring *rx_ring,
1434 struct ena_com_rx_ctx *ena_rx_ctx,
1435 struct sk_buff *skb)
1437 /* Rx csum disabled */
1438 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1439 skb->ip_summed = CHECKSUM_NONE;
1443 /* For fragmented packets the checksum isn't valid */
1444 if (ena_rx_ctx->frag) {
1445 skb->ip_summed = CHECKSUM_NONE;
1449 /* if IP and error */
1450 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1451 (ena_rx_ctx->l3_csum_err))) {
1452 /* ipv4 checksum error */
1453 skb->ip_summed = CHECKSUM_NONE;
1454 u64_stats_update_begin(&rx_ring->syncp);
1455 rx_ring->rx_stats.bad_csum++;
1456 u64_stats_update_end(&rx_ring->syncp);
1457 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1458 "RX IPv4 header checksum error\n");
1463 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1464 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1465 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1466 /* TCP/UDP checksum error */
1467 u64_stats_update_begin(&rx_ring->syncp);
1468 rx_ring->rx_stats.bad_csum++;
1469 u64_stats_update_end(&rx_ring->syncp);
1470 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1471 "RX L4 checksum error\n");
1472 skb->ip_summed = CHECKSUM_NONE;
1476 if (likely(ena_rx_ctx->l4_csum_checked)) {
1477 skb->ip_summed = CHECKSUM_UNNECESSARY;
1478 u64_stats_update_begin(&rx_ring->syncp);
1479 rx_ring->rx_stats.csum_good++;
1480 u64_stats_update_end(&rx_ring->syncp);
1482 u64_stats_update_begin(&rx_ring->syncp);
1483 rx_ring->rx_stats.csum_unchecked++;
1484 u64_stats_update_end(&rx_ring->syncp);
1485 skb->ip_summed = CHECKSUM_NONE;
1488 skb->ip_summed = CHECKSUM_NONE;
1494 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1495 struct ena_com_rx_ctx *ena_rx_ctx,
1496 struct sk_buff *skb)
1498 enum pkt_hash_types hash_type;
1500 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1501 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1502 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1504 hash_type = PKT_HASH_TYPE_L4;
1506 hash_type = PKT_HASH_TYPE_NONE;
1508 /* Override hash type if the packet is fragmented */
1509 if (ena_rx_ctx->frag)
1510 hash_type = PKT_HASH_TYPE_NONE;
1512 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1516 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1518 struct ena_rx_buffer *rx_info;
1521 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1522 xdp->data = page_address(rx_info->page) + rx_info->page_offset;
1523 xdp_set_data_meta_invalid(xdp);
1524 xdp->data_hard_start = page_address(rx_info->page);
1525 xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
1526 /* If for some reason we received a bigger packet than
1527 * we expect, then we simply drop it
1529 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1532 ret = ena_xdp_execute(rx_ring, xdp, rx_info);
1534 /* The xdp program might expand the headers */
1535 if (ret == XDP_PASS) {
1536 rx_info->page_offset = xdp->data - xdp->data_hard_start;
1537 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1542 /* ena_clean_rx_irq - Cleanup RX irq
1543 * @rx_ring: RX ring to clean
1544 * @napi: napi handler
1545 * @budget: how many packets driver is allowed to clean
1547 * Returns the number of cleaned buffers.
1549 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1552 u16 next_to_clean = rx_ring->next_to_clean;
1553 struct ena_com_rx_ctx ena_rx_ctx;
1554 struct ena_rx_buffer *rx_info;
1555 struct ena_adapter *adapter;
1556 u32 res_budget, work_done;
1557 int rx_copybreak_pkt = 0;
1558 int refill_threshold;
1559 struct sk_buff *skb;
1560 int refill_required;
1561 struct xdp_buff xdp;
1567 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1568 "%s qid %d\n", __func__, rx_ring->qid);
1569 res_budget = budget;
1570 xdp.rxq = &rx_ring->xdp_rxq;
1571 xdp.frame_sz = ENA_PAGE_SIZE;
1574 xdp_verdict = XDP_PASS;
1576 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1577 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1578 ena_rx_ctx.descs = 0;
1579 ena_rx_ctx.pkt_offset = 0;
1580 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1581 rx_ring->ena_com_io_sq,
1586 if (unlikely(ena_rx_ctx.descs == 0))
1589 /* First descriptor might have an offset set by the device */
1590 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1591 rx_info->page_offset += ena_rx_ctx.pkt_offset;
1593 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1594 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1595 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1596 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1598 if (ena_xdp_present_ring(rx_ring))
1599 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1601 /* allocate skb and fill it */
1602 if (xdp_verdict == XDP_PASS)
1603 skb = ena_rx_skb(rx_ring,
1608 if (unlikely(!skb)) {
1609 /* The page might not actually be freed here since the
1610 * page reference count is incremented in
1611 * ena_xdp_xmit_buff(), and it will be decreased only
1612 * when send completion was received from the device
1614 if (xdp_verdict == XDP_TX)
1615 ena_free_rx_page(rx_ring,
1616 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
1617 for (i = 0; i < ena_rx_ctx.descs; i++) {
1618 rx_ring->free_ids[next_to_clean] =
1619 rx_ring->ena_bufs[i].req_id;
1621 ENA_RX_RING_IDX_NEXT(next_to_clean,
1622 rx_ring->ring_size);
1624 if (xdp_verdict != XDP_PASS) {
1631 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1633 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1635 skb_record_rx_queue(skb, rx_ring->qid);
1637 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1638 total_len += rx_ring->ena_bufs[0].len;
1640 napi_gro_receive(napi, skb);
1642 total_len += skb->len;
1643 napi_gro_frags(napi);
1647 } while (likely(res_budget));
1649 work_done = budget - res_budget;
1650 rx_ring->per_napi_packets += work_done;
1651 u64_stats_update_begin(&rx_ring->syncp);
1652 rx_ring->rx_stats.bytes += total_len;
1653 rx_ring->rx_stats.cnt += work_done;
1654 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1655 u64_stats_update_end(&rx_ring->syncp);
1657 rx_ring->next_to_clean = next_to_clean;
1659 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
1661 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1662 ENA_RX_REFILL_THRESH_PACKET);
1664 /* Optimization, try to batch new rx buffers */
1665 if (refill_required > refill_threshold) {
1666 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1667 ena_refill_rx_bufs(rx_ring, refill_required);
1673 adapter = netdev_priv(rx_ring->netdev);
1675 if (rc == -ENOSPC) {
1676 u64_stats_update_begin(&rx_ring->syncp);
1677 rx_ring->rx_stats.bad_desc_num++;
1678 u64_stats_update_end(&rx_ring->syncp);
1679 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1681 u64_stats_update_begin(&rx_ring->syncp);
1682 rx_ring->rx_stats.bad_req_id++;
1683 u64_stats_update_end(&rx_ring->syncp);
1684 adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
1687 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1692 static void ena_dim_work(struct work_struct *w)
1694 struct dim *dim = container_of(w, struct dim, work);
1695 struct dim_cq_moder cur_moder =
1696 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1697 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1699 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1700 dim->state = DIM_START_MEASURE;
1703 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1705 struct dim_sample dim_sample;
1706 struct ena_ring *rx_ring = ena_napi->rx_ring;
1708 if (!rx_ring->per_napi_packets)
1711 rx_ring->non_empty_napi_events++;
1713 dim_update_sample(rx_ring->non_empty_napi_events,
1714 rx_ring->rx_stats.cnt,
1715 rx_ring->rx_stats.bytes,
1718 net_dim(&ena_napi->dim, dim_sample);
1720 rx_ring->per_napi_packets = 0;
1723 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1724 struct ena_ring *rx_ring)
1726 struct ena_eth_io_intr_reg intr_reg;
1727 u32 rx_interval = 0;
1728 /* Rx ring can be NULL when for XDP tx queues which don't have an
1729 * accompanying rx_ring pair.
1732 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1733 rx_ring->smoothed_interval :
1734 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1736 /* Update intr register: rx intr delay,
1737 * tx intr delay and interrupt unmask
1739 ena_com_update_intr_reg(&intr_reg,
1741 tx_ring->smoothed_interval,
1744 u64_stats_update_begin(&tx_ring->syncp);
1745 tx_ring->tx_stats.unmask_interrupt++;
1746 u64_stats_update_end(&tx_ring->syncp);
1748 /* It is a shared MSI-X.
1749 * Tx and Rx CQ have pointer to it.
1750 * So we use one of them to reach the intr reg
1751 * The Tx ring is used because the rx_ring is NULL for XDP queues
1753 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1756 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1757 struct ena_ring *rx_ring)
1759 int cpu = get_cpu();
1762 /* Check only one ring since the 2 rings are running on the same cpu */
1763 if (likely(tx_ring->cpu == cpu))
1766 numa_node = cpu_to_node(cpu);
1769 if (numa_node != NUMA_NO_NODE) {
1770 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1772 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1785 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1794 if (unlikely(!xdp_ring))
1796 next_to_clean = xdp_ring->next_to_clean;
1798 while (tx_pkts < budget) {
1799 struct ena_tx_buffer *tx_info;
1800 struct xdp_frame *xdpf;
1802 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1807 rc = validate_xdp_req_id(xdp_ring, req_id);
1811 tx_info = &xdp_ring->tx_buffer_info[req_id];
1812 xdpf = tx_info->xdpf;
1814 tx_info->xdpf = NULL;
1815 tx_info->last_jiffies = 0;
1816 ena_unmap_tx_buff(xdp_ring, tx_info);
1818 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1819 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1822 tx_bytes += xdpf->len;
1824 total_done += tx_info->tx_descs;
1826 __free_page(tx_info->xdp_rx_page);
1827 xdp_ring->free_ids[next_to_clean] = req_id;
1828 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1829 xdp_ring->ring_size);
1832 xdp_ring->next_to_clean = next_to_clean;
1833 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1834 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1836 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1837 "tx_poll: q %d done. total pkts: %d\n",
1838 xdp_ring->qid, tx_pkts);
1843 static int ena_io_poll(struct napi_struct *napi, int budget)
1845 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1846 struct ena_ring *tx_ring, *rx_ring;
1848 int rx_work_done = 0;
1850 int napi_comp_call = 0;
1853 tx_ring = ena_napi->tx_ring;
1854 rx_ring = ena_napi->rx_ring;
1856 tx_ring->first_interrupt = ena_napi->first_interrupt;
1857 rx_ring->first_interrupt = ena_napi->first_interrupt;
1859 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1861 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1862 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1863 napi_complete_done(napi, 0);
1867 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1868 /* On netpoll the budget is zero and the handler should only clean the
1872 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1874 /* If the device is about to reset or down, avoid unmask
1875 * the interrupt and return 0 so NAPI won't reschedule
1877 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1878 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1879 napi_complete_done(napi, 0);
1882 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1885 /* Update numa and unmask the interrupt only when schedule
1886 * from the interrupt context (vs from sk_busy_loop)
1888 if (napi_complete_done(napi, rx_work_done) &&
1889 READ_ONCE(ena_napi->interrupts_masked)) {
1890 smp_rmb(); /* make sure interrupts_masked is read */
1891 WRITE_ONCE(ena_napi->interrupts_masked, false);
1892 /* We apply adaptive moderation on Rx path only.
1893 * Tx uses static interrupt moderation.
1895 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1896 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1898 ena_unmask_interrupt(tx_ring, rx_ring);
1901 ena_update_ring_numa_node(tx_ring, rx_ring);
1908 u64_stats_update_begin(&tx_ring->syncp);
1909 tx_ring->tx_stats.napi_comp += napi_comp_call;
1910 tx_ring->tx_stats.tx_poll++;
1911 u64_stats_update_end(&tx_ring->syncp);
1916 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1918 struct ena_adapter *adapter = (struct ena_adapter *)data;
1920 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1922 /* Don't call the aenq handler before probe is done */
1923 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1924 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1929 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1930 * @irq: interrupt number
1931 * @data: pointer to a network interface private napi device structure
1933 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1935 struct ena_napi *ena_napi = data;
1937 ena_napi->first_interrupt = true;
1939 WRITE_ONCE(ena_napi->interrupts_masked, true);
1940 smp_wmb(); /* write interrupts_masked before calling napi */
1942 napi_schedule_irqoff(&ena_napi->napi);
1947 /* Reserve a single MSI-X vector for management (admin + aenq).
1948 * plus reserve one vector for each potential io queue.
1949 * the number of potential io queues is the minimum of what the device
1950 * supports and the number of vCPUs.
1952 static int ena_enable_msix(struct ena_adapter *adapter)
1954 int msix_vecs, irq_cnt;
1956 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1957 netif_err(adapter, probe, adapter->netdev,
1958 "Error, MSI-X is already enabled\n");
1962 /* Reserved the max msix vectors we might need */
1963 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
1964 netif_dbg(adapter, probe, adapter->netdev,
1965 "Trying to enable MSI-X, vectors %d\n", msix_vecs);
1967 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1968 msix_vecs, PCI_IRQ_MSIX);
1971 netif_err(adapter, probe, adapter->netdev,
1972 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1976 if (irq_cnt != msix_vecs) {
1977 netif_notice(adapter, probe, adapter->netdev,
1978 "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
1979 irq_cnt, msix_vecs);
1980 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1983 if (ena_init_rx_cpu_rmap(adapter))
1984 netif_warn(adapter, probe, adapter->netdev,
1985 "Failed to map IRQs to CPUs\n");
1987 adapter->msix_vecs = irq_cnt;
1988 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1993 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1997 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1998 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1999 pci_name(adapter->pdev));
2000 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2001 ena_intr_msix_mgmnt;
2002 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2003 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
2004 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
2005 cpu = cpumask_first(cpu_online_mask);
2006 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2007 cpumask_set_cpu(cpu,
2008 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2011 static void ena_setup_io_intr(struct ena_adapter *adapter)
2013 struct net_device *netdev;
2014 int irq_idx, i, cpu;
2017 netdev = adapter->netdev;
2018 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2020 for (i = 0; i < io_queue_count; i++) {
2021 irq_idx = ENA_IO_IRQ_IDX(i);
2022 cpu = i % num_online_cpus();
2024 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2025 "%s-Tx-Rx-%d", netdev->name, i);
2026 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2027 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2028 adapter->irq_tbl[irq_idx].vector =
2029 pci_irq_vector(adapter->pdev, irq_idx);
2030 adapter->irq_tbl[irq_idx].cpu = cpu;
2032 cpumask_set_cpu(cpu,
2033 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2037 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2039 unsigned long flags = 0;
2040 struct ena_irq *irq;
2043 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2044 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2047 netif_err(adapter, probe, adapter->netdev,
2048 "Failed to request admin irq\n");
2052 netif_dbg(adapter, probe, adapter->netdev,
2053 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2054 irq->affinity_hint_mask.bits[0], irq->vector);
2056 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2061 static int ena_request_io_irq(struct ena_adapter *adapter)
2063 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2064 unsigned long flags = 0;
2065 struct ena_irq *irq;
2068 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2069 netif_err(adapter, ifup, adapter->netdev,
2070 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2074 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2075 irq = &adapter->irq_tbl[i];
2076 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2079 netif_err(adapter, ifup, adapter->netdev,
2080 "Failed to request I/O IRQ. index %d rc %d\n",
2085 netif_dbg(adapter, ifup, adapter->netdev,
2086 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2087 i, irq->affinity_hint_mask.bits[0], irq->vector);
2089 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2095 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2096 irq = &adapter->irq_tbl[k];
2097 free_irq(irq->vector, irq->data);
2103 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2105 struct ena_irq *irq;
2107 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2108 synchronize_irq(irq->vector);
2109 irq_set_affinity_hint(irq->vector, NULL);
2110 free_irq(irq->vector, irq->data);
2113 static void ena_free_io_irq(struct ena_adapter *adapter)
2115 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2116 struct ena_irq *irq;
2119 #ifdef CONFIG_RFS_ACCEL
2120 if (adapter->msix_vecs >= 1) {
2121 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2122 adapter->netdev->rx_cpu_rmap = NULL;
2124 #endif /* CONFIG_RFS_ACCEL */
2126 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2127 irq = &adapter->irq_tbl[i];
2128 irq_set_affinity_hint(irq->vector, NULL);
2129 free_irq(irq->vector, irq->data);
2133 static void ena_disable_msix(struct ena_adapter *adapter)
2135 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2136 pci_free_irq_vectors(adapter->pdev);
2139 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2141 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2144 if (!netif_running(adapter->netdev))
2147 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
2148 synchronize_irq(adapter->irq_tbl[i].vector);
2151 static void ena_del_napi_in_range(struct ena_adapter *adapter,
2157 for (i = first_index; i < first_index + count; i++) {
2158 netif_napi_del(&adapter->ena_napi[i].napi);
2160 WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
2161 adapter->ena_napi[i].xdp_ring);
2165 static void ena_init_napi_in_range(struct ena_adapter *adapter,
2166 int first_index, int count)
2170 for (i = first_index; i < first_index + count; i++) {
2171 struct ena_napi *napi = &adapter->ena_napi[i];
2173 netif_napi_add(adapter->netdev,
2175 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
2178 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2179 napi->rx_ring = &adapter->rx_ring[i];
2180 napi->tx_ring = &adapter->tx_ring[i];
2182 napi->xdp_ring = &adapter->tx_ring[i];
2188 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2194 for (i = first_index; i < first_index + count; i++)
2195 napi_disable(&adapter->ena_napi[i].napi);
2198 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2204 for (i = first_index; i < first_index + count; i++)
2205 napi_enable(&adapter->ena_napi[i].napi);
2208 /* Configure the Rx forwarding */
2209 static int ena_rss_configure(struct ena_adapter *adapter)
2211 struct ena_com_dev *ena_dev = adapter->ena_dev;
2214 /* In case the RSS table wasn't initialized by probe */
2215 if (!ena_dev->rss.tbl_log_size) {
2216 rc = ena_rss_init_default(adapter);
2217 if (rc && (rc != -EOPNOTSUPP)) {
2218 netif_err(adapter, ifup, adapter->netdev,
2219 "Failed to init RSS rc: %d\n", rc);
2224 /* Set indirect table */
2225 rc = ena_com_indirect_table_set(ena_dev);
2226 if (unlikely(rc && rc != -EOPNOTSUPP))
2229 /* Configure hash function (if supported) */
2230 rc = ena_com_set_hash_function(ena_dev);
2231 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2234 /* Configure hash inputs (if supported) */
2235 rc = ena_com_set_hash_ctrl(ena_dev);
2236 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2242 static int ena_up_complete(struct ena_adapter *adapter)
2246 rc = ena_rss_configure(adapter);
2250 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2252 ena_refill_all_rx_bufs(adapter);
2254 /* enable transmits */
2255 netif_tx_start_all_queues(adapter->netdev);
2257 ena_napi_enable_in_range(adapter,
2259 adapter->xdp_num_queues + adapter->num_io_queues);
2264 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2266 struct ena_com_create_io_ctx ctx;
2267 struct ena_com_dev *ena_dev;
2268 struct ena_ring *tx_ring;
2273 ena_dev = adapter->ena_dev;
2275 tx_ring = &adapter->tx_ring[qid];
2276 msix_vector = ENA_IO_IRQ_IDX(qid);
2277 ena_qid = ENA_IO_TXQ_IDX(qid);
2279 memset(&ctx, 0x0, sizeof(ctx));
2281 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2283 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2284 ctx.msix_vector = msix_vector;
2285 ctx.queue_size = tx_ring->ring_size;
2286 ctx.numa_node = cpu_to_node(tx_ring->cpu);
2288 rc = ena_com_create_io_queue(ena_dev, &ctx);
2290 netif_err(adapter, ifup, adapter->netdev,
2291 "Failed to create I/O TX queue num %d rc: %d\n",
2296 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2297 &tx_ring->ena_com_io_sq,
2298 &tx_ring->ena_com_io_cq);
2300 netif_err(adapter, ifup, adapter->netdev,
2301 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2303 ena_com_destroy_io_queue(ena_dev, ena_qid);
2307 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2311 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2312 int first_index, int count)
2314 struct ena_com_dev *ena_dev = adapter->ena_dev;
2317 for (i = first_index; i < first_index + count; i++) {
2318 rc = ena_create_io_tx_queue(adapter, i);
2326 while (i-- > first_index)
2327 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2332 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2334 struct ena_com_dev *ena_dev;
2335 struct ena_com_create_io_ctx ctx;
2336 struct ena_ring *rx_ring;
2341 ena_dev = adapter->ena_dev;
2343 rx_ring = &adapter->rx_ring[qid];
2344 msix_vector = ENA_IO_IRQ_IDX(qid);
2345 ena_qid = ENA_IO_RXQ_IDX(qid);
2347 memset(&ctx, 0x0, sizeof(ctx));
2350 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2351 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2352 ctx.msix_vector = msix_vector;
2353 ctx.queue_size = rx_ring->ring_size;
2354 ctx.numa_node = cpu_to_node(rx_ring->cpu);
2356 rc = ena_com_create_io_queue(ena_dev, &ctx);
2358 netif_err(adapter, ifup, adapter->netdev,
2359 "Failed to create I/O RX queue num %d rc: %d\n",
2364 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2365 &rx_ring->ena_com_io_sq,
2366 &rx_ring->ena_com_io_cq);
2368 netif_err(adapter, ifup, adapter->netdev,
2369 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2374 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2378 ena_com_destroy_io_queue(ena_dev, ena_qid);
2382 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2384 struct ena_com_dev *ena_dev = adapter->ena_dev;
2387 for (i = 0; i < adapter->num_io_queues; i++) {
2388 rc = ena_create_io_rx_queue(adapter, i);
2391 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2398 cancel_work_sync(&adapter->ena_napi[i].dim.work);
2399 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2405 static void set_io_rings_size(struct ena_adapter *adapter,
2411 for (i = 0; i < adapter->num_io_queues; i++) {
2412 adapter->tx_ring[i].ring_size = new_tx_size;
2413 adapter->rx_ring[i].ring_size = new_rx_size;
2417 /* This function allows queue allocation to backoff when the system is
2418 * low on memory. If there is not enough memory to allocate io queues
2419 * the driver will try to allocate smaller queues.
2421 * The backoff algorithm is as follows:
2422 * 1. Try to allocate TX and RX and if successful.
2423 * 1.1. return success
2425 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2427 * 3. If TX or RX is smaller than 256
2428 * 3.1. return failure.
2430 * 4.1. go back to 1.
2432 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2434 int rc, cur_rx_ring_size, cur_tx_ring_size;
2435 int new_rx_ring_size, new_tx_ring_size;
2437 /* current queue sizes might be set to smaller than the requested
2438 * ones due to past queue allocation failures.
2440 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2441 adapter->requested_rx_ring_size);
2444 if (ena_xdp_present(adapter)) {
2445 rc = ena_setup_and_create_all_xdp_queues(adapter);
2450 rc = ena_setup_tx_resources_in_range(adapter,
2452 adapter->num_io_queues);
2456 rc = ena_create_io_tx_queues_in_range(adapter,
2458 adapter->num_io_queues);
2460 goto err_create_tx_queues;
2462 rc = ena_setup_all_rx_resources(adapter);
2466 rc = ena_create_all_io_rx_queues(adapter);
2468 goto err_create_rx_queues;
2472 err_create_rx_queues:
2473 ena_free_all_io_rx_resources(adapter);
2475 ena_destroy_all_tx_queues(adapter);
2476 err_create_tx_queues:
2477 ena_free_all_io_tx_resources(adapter);
2479 if (rc != -ENOMEM) {
2480 netif_err(adapter, ifup, adapter->netdev,
2481 "Queue creation failed with error code %d\n",
2486 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2487 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2489 netif_err(adapter, ifup, adapter->netdev,
2490 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2491 cur_tx_ring_size, cur_rx_ring_size);
2493 new_tx_ring_size = cur_tx_ring_size;
2494 new_rx_ring_size = cur_rx_ring_size;
2496 /* Decrease the size of the larger queue, or
2497 * decrease both if they are the same size.
2499 if (cur_rx_ring_size <= cur_tx_ring_size)
2500 new_tx_ring_size = cur_tx_ring_size / 2;
2501 if (cur_rx_ring_size >= cur_tx_ring_size)
2502 new_rx_ring_size = cur_rx_ring_size / 2;
2504 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2505 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2506 netif_err(adapter, ifup, adapter->netdev,
2507 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2512 netif_err(adapter, ifup, adapter->netdev,
2513 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2517 set_io_rings_size(adapter, new_tx_ring_size,
2522 static int ena_up(struct ena_adapter *adapter)
2524 int io_queue_count, rc, i;
2526 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
2528 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2529 ena_setup_io_intr(adapter);
2531 /* napi poll functions should be initialized before running
2532 * request_irq(), to handle a rare condition where there is a pending
2533 * interrupt, causing the ISR to fire immediately while the poll
2534 * function wasn't set yet, causing a null dereference
2536 ena_init_napi_in_range(adapter, 0, io_queue_count);
2538 rc = ena_request_io_irq(adapter);
2542 rc = create_queues_with_size_backoff(adapter);
2544 goto err_create_queues_with_backoff;
2546 rc = ena_up_complete(adapter);
2550 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2551 netif_carrier_on(adapter->netdev);
2553 u64_stats_update_begin(&adapter->syncp);
2554 adapter->dev_stats.interface_up++;
2555 u64_stats_update_end(&adapter->syncp);
2557 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2559 /* Enable completion queues interrupt */
2560 for (i = 0; i < adapter->num_io_queues; i++)
2561 ena_unmask_interrupt(&adapter->tx_ring[i],
2562 &adapter->rx_ring[i]);
2564 /* schedule napi in case we had pending packets
2565 * from the last time we disable napi
2567 for (i = 0; i < io_queue_count; i++)
2568 napi_schedule(&adapter->ena_napi[i].napi);
2573 ena_destroy_all_tx_queues(adapter);
2574 ena_free_all_io_tx_resources(adapter);
2575 ena_destroy_all_rx_queues(adapter);
2576 ena_free_all_io_rx_resources(adapter);
2577 err_create_queues_with_backoff:
2578 ena_free_io_irq(adapter);
2580 ena_del_napi_in_range(adapter, 0, io_queue_count);
2585 static void ena_down(struct ena_adapter *adapter)
2587 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2589 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2591 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2593 u64_stats_update_begin(&adapter->syncp);
2594 adapter->dev_stats.interface_down++;
2595 u64_stats_update_end(&adapter->syncp);
2597 netif_carrier_off(adapter->netdev);
2598 netif_tx_disable(adapter->netdev);
2600 /* After this point the napi handler won't enable the tx queue */
2601 ena_napi_disable_in_range(adapter, 0, io_queue_count);
2603 /* After destroy the queue there won't be any new interrupts */
2605 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2608 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2610 netif_err(adapter, ifdown, adapter->netdev,
2611 "Device reset failed\n");
2612 /* stop submitting admin commands on a device that was reset */
2613 ena_com_set_admin_running_state(adapter->ena_dev, false);
2616 ena_destroy_all_io_queues(adapter);
2618 ena_disable_io_intr_sync(adapter);
2619 ena_free_io_irq(adapter);
2620 ena_del_napi_in_range(adapter, 0, io_queue_count);
2622 ena_free_all_tx_bufs(adapter);
2623 ena_free_all_rx_bufs(adapter);
2624 ena_free_all_io_tx_resources(adapter);
2625 ena_free_all_io_rx_resources(adapter);
2628 /* ena_open - Called when a network interface is made active
2629 * @netdev: network interface device structure
2631 * Returns 0 on success, negative value on failure
2633 * The open entry point is called when a network interface is made
2634 * active by the system (IFF_UP). At this point all resources needed
2635 * for transmit and receive operations are allocated, the interrupt
2636 * handler is registered with the OS, the watchdog timer is started,
2637 * and the stack is notified that the interface is ready.
2639 static int ena_open(struct net_device *netdev)
2641 struct ena_adapter *adapter = netdev_priv(netdev);
2644 /* Notify the stack of the actual queue counts. */
2645 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2647 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2651 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2653 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2657 rc = ena_up(adapter);
2664 /* ena_close - Disables a network interface
2665 * @netdev: network interface device structure
2667 * Returns 0, this is not allowed to fail
2669 * The close entry point is called when an interface is de-activated
2670 * by the OS. The hardware is still under the drivers control, but
2671 * needs to be disabled. A global MAC reset is issued to stop the
2672 * hardware, and all transmit and receive resources are freed.
2674 static int ena_close(struct net_device *netdev)
2676 struct ena_adapter *adapter = netdev_priv(netdev);
2678 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2680 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2683 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2686 /* Check for device status and issue reset if needed*/
2687 check_for_admin_com_state(adapter);
2688 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2689 netif_err(adapter, ifdown, adapter->netdev,
2690 "Destroy failure, restarting device\n");
2691 ena_dump_stats_to_dmesg(adapter);
2692 /* rtnl lock already obtained in dev_ioctl() layer */
2693 ena_destroy_device(adapter, false);
2694 ena_restore_device(adapter);
2700 int ena_update_queue_sizes(struct ena_adapter *adapter,
2706 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2707 ena_close(adapter->netdev);
2708 adapter->requested_tx_ring_size = new_tx_size;
2709 adapter->requested_rx_ring_size = new_rx_size;
2710 ena_init_io_rings(adapter,
2712 adapter->xdp_num_queues +
2713 adapter->num_io_queues);
2714 return dev_was_up ? ena_up(adapter) : 0;
2717 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2719 struct ena_com_dev *ena_dev = adapter->ena_dev;
2720 int prev_channel_count;
2723 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2724 ena_close(adapter->netdev);
2725 prev_channel_count = adapter->num_io_queues;
2726 adapter->num_io_queues = new_channel_count;
2727 if (ena_xdp_present(adapter) &&
2728 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2729 adapter->xdp_first_ring = new_channel_count;
2730 adapter->xdp_num_queues = new_channel_count;
2731 if (prev_channel_count > new_channel_count)
2732 ena_xdp_exchange_program_rx_in_range(adapter,
2735 prev_channel_count);
2737 ena_xdp_exchange_program_rx_in_range(adapter,
2738 adapter->xdp_bpf_prog,
2743 /* We need to destroy the rss table so that the indirection
2744 * table will be reinitialized by ena_up()
2746 ena_com_rss_destroy(ena_dev);
2747 ena_init_io_rings(adapter,
2749 adapter->xdp_num_queues +
2750 adapter->num_io_queues);
2751 return dev_was_up ? ena_open(adapter->netdev) : 0;
2754 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
2755 struct sk_buff *skb,
2756 bool disable_meta_caching)
2758 u32 mss = skb_shinfo(skb)->gso_size;
2759 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2762 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2763 ena_tx_ctx->l4_csum_enable = 1;
2765 ena_tx_ctx->tso_enable = 1;
2766 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2767 ena_tx_ctx->l4_csum_partial = 0;
2769 ena_tx_ctx->tso_enable = 0;
2770 ena_meta->l4_hdr_len = 0;
2771 ena_tx_ctx->l4_csum_partial = 1;
2774 switch (ip_hdr(skb)->version) {
2776 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2777 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2780 ena_tx_ctx->l3_csum_enable = 1;
2781 l4_protocol = ip_hdr(skb)->protocol;
2784 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2785 l4_protocol = ipv6_hdr(skb)->nexthdr;
2791 if (l4_protocol == IPPROTO_TCP)
2792 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2794 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2796 ena_meta->mss = mss;
2797 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2798 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2799 ena_tx_ctx->meta_valid = 1;
2800 } else if (disable_meta_caching) {
2801 memset(ena_meta, 0, sizeof(*ena_meta));
2802 ena_tx_ctx->meta_valid = 1;
2804 ena_tx_ctx->meta_valid = 0;
2808 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2809 struct sk_buff *skb)
2811 int num_frags, header_len, rc;
2813 num_frags = skb_shinfo(skb)->nr_frags;
2814 header_len = skb_headlen(skb);
2816 if (num_frags < tx_ring->sgl_size)
2819 if ((num_frags == tx_ring->sgl_size) &&
2820 (header_len < tx_ring->tx_max_header_size))
2823 u64_stats_update_begin(&tx_ring->syncp);
2824 tx_ring->tx_stats.linearize++;
2825 u64_stats_update_end(&tx_ring->syncp);
2827 rc = skb_linearize(skb);
2829 u64_stats_update_begin(&tx_ring->syncp);
2830 tx_ring->tx_stats.linearize_failed++;
2831 u64_stats_update_end(&tx_ring->syncp);
2837 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2838 struct ena_tx_buffer *tx_info,
2839 struct sk_buff *skb,
2843 struct ena_adapter *adapter = tx_ring->adapter;
2844 struct ena_com_buf *ena_buf;
2846 u32 skb_head_len, frag_len, last_frag;
2851 skb_head_len = skb_headlen(skb);
2853 ena_buf = tx_info->bufs;
2855 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2856 /* When the device is LLQ mode, the driver will copy
2857 * the header into the device memory space.
2858 * the ena_com layer assume the header is in a linear
2860 * This assumption might be wrong since part of the header
2861 * can be in the fragmented buffers.
2862 * Use skb_header_pointer to make sure the header is in a
2863 * linear memory space.
2866 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2867 *push_hdr = skb_header_pointer(skb, 0, push_len,
2868 tx_ring->push_buf_intermediate_buf);
2869 *header_len = push_len;
2870 if (unlikely(skb->data != *push_hdr)) {
2871 u64_stats_update_begin(&tx_ring->syncp);
2872 tx_ring->tx_stats.llq_buffer_copy++;
2873 u64_stats_update_end(&tx_ring->syncp);
2875 delta = push_len - skb_head_len;
2879 *header_len = min_t(u32, skb_head_len,
2880 tx_ring->tx_max_header_size);
2883 netif_dbg(adapter, tx_queued, adapter->netdev,
2884 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2885 *push_hdr, push_len);
2887 if (skb_head_len > push_len) {
2888 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2889 skb_head_len - push_len, DMA_TO_DEVICE);
2890 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2891 goto error_report_dma_error;
2893 ena_buf->paddr = dma;
2894 ena_buf->len = skb_head_len - push_len;
2897 tx_info->num_of_bufs++;
2898 tx_info->map_linear_data = 1;
2900 tx_info->map_linear_data = 0;
2903 last_frag = skb_shinfo(skb)->nr_frags;
2905 for (i = 0; i < last_frag; i++) {
2906 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2908 frag_len = skb_frag_size(frag);
2910 if (unlikely(delta >= frag_len)) {
2915 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2916 frag_len - delta, DMA_TO_DEVICE);
2917 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2918 goto error_report_dma_error;
2920 ena_buf->paddr = dma;
2921 ena_buf->len = frag_len - delta;
2923 tx_info->num_of_bufs++;
2929 error_report_dma_error:
2930 u64_stats_update_begin(&tx_ring->syncp);
2931 tx_ring->tx_stats.dma_mapping_err++;
2932 u64_stats_update_end(&tx_ring->syncp);
2933 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
2935 tx_info->skb = NULL;
2937 tx_info->num_of_bufs += i;
2938 ena_unmap_tx_buff(tx_ring, tx_info);
2943 /* Called with netif_tx_lock. */
2944 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2946 struct ena_adapter *adapter = netdev_priv(dev);
2947 struct ena_tx_buffer *tx_info;
2948 struct ena_com_tx_ctx ena_tx_ctx;
2949 struct ena_ring *tx_ring;
2950 struct netdev_queue *txq;
2952 u16 next_to_use, req_id, header_len;
2955 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2956 /* Determine which tx ring we will be placed on */
2957 qid = skb_get_queue_mapping(skb);
2958 tx_ring = &adapter->tx_ring[qid];
2959 txq = netdev_get_tx_queue(dev, qid);
2961 rc = ena_check_and_linearize_skb(tx_ring, skb);
2963 goto error_drop_packet;
2965 skb_tx_timestamp(skb);
2967 next_to_use = tx_ring->next_to_use;
2968 req_id = tx_ring->free_ids[next_to_use];
2969 tx_info = &tx_ring->tx_buffer_info[req_id];
2970 tx_info->num_of_bufs = 0;
2972 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2974 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2976 goto error_drop_packet;
2978 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2979 ena_tx_ctx.ena_bufs = tx_info->bufs;
2980 ena_tx_ctx.push_header = push_hdr;
2981 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2982 ena_tx_ctx.req_id = req_id;
2983 ena_tx_ctx.header_len = header_len;
2985 /* set flags and meta data */
2986 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
2988 rc = ena_xmit_common(dev,
2995 goto error_unmap_dma;
2997 netdev_tx_sent_queue(txq, skb->len);
2999 /* stop the queue when no more space available, the packet can have up
3000 * to sgl_size + 2. one for the meta descriptor and one for header
3001 * (if the header is larger than tx_max_header_size).
3003 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3004 tx_ring->sgl_size + 2))) {
3005 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3008 netif_tx_stop_queue(txq);
3009 u64_stats_update_begin(&tx_ring->syncp);
3010 tx_ring->tx_stats.queue_stop++;
3011 u64_stats_update_end(&tx_ring->syncp);
3013 /* There is a rare condition where this function decide to
3014 * stop the queue but meanwhile clean_tx_irq updates
3015 * next_to_completion and terminates.
3016 * The queue will remain stopped forever.
3017 * To solve this issue add a mb() to make sure that
3018 * netif_tx_stop_queue() write is vissible before checking if
3019 * there is additional space in the queue.
3023 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3024 ENA_TX_WAKEUP_THRESH)) {
3025 netif_tx_wake_queue(txq);
3026 u64_stats_update_begin(&tx_ring->syncp);
3027 tx_ring->tx_stats.queue_wakeup++;
3028 u64_stats_update_end(&tx_ring->syncp);
3032 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
3033 /* trigger the dma engine. ena_com_write_sq_doorbell()
3036 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
3037 u64_stats_update_begin(&tx_ring->syncp);
3038 tx_ring->tx_stats.doorbells++;
3039 u64_stats_update_end(&tx_ring->syncp);
3042 return NETDEV_TX_OK;
3045 ena_unmap_tx_buff(tx_ring, tx_info);
3046 tx_info->skb = NULL;
3050 return NETDEV_TX_OK;
3053 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
3054 struct net_device *sb_dev)
3057 /* we suspect that this is good for in--kernel network services that
3058 * want to loop incoming skb rx to tx in normal user generated traffic,
3059 * most probably we will not get to this
3061 if (skb_rx_queue_recorded(skb))
3062 qid = skb_get_rx_queue(skb);
3064 qid = netdev_pick_tx(dev, skb, NULL);
3069 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3071 struct device *dev = &pdev->dev;
3072 struct ena_admin_host_info *host_info;
3075 /* Allocate only the host info */
3076 rc = ena_com_allocate_host_info(ena_dev);
3078 dev_err(dev, "Cannot allocate host info\n");
3082 host_info = ena_dev->host_attr.host_info;
3084 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
3085 host_info->os_type = ENA_ADMIN_OS_LINUX;
3086 host_info->kernel_ver = LINUX_VERSION_CODE;
3087 strlcpy(host_info->kernel_ver_str, utsname()->version,
3088 sizeof(host_info->kernel_ver_str) - 1);
3089 host_info->os_dist = 0;
3090 strncpy(host_info->os_dist_str, utsname()->release,
3091 sizeof(host_info->os_dist_str) - 1);
3092 host_info->driver_version =
3093 (DRV_MODULE_GEN_MAJOR) |
3094 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3095 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3096 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
3097 host_info->num_cpus = num_online_cpus();
3099 host_info->driver_supported_features =
3100 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
3101 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
3102 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
3103 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
3105 rc = ena_com_set_host_attributes(ena_dev);
3107 if (rc == -EOPNOTSUPP)
3108 dev_warn(dev, "Cannot set host attributes\n");
3110 dev_err(dev, "Cannot set host attributes\n");
3118 ena_com_delete_host_info(ena_dev);
3121 static void ena_config_debug_area(struct ena_adapter *adapter)
3123 u32 debug_area_size;
3126 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3127 if (ss_count <= 0) {
3128 netif_err(adapter, drv, adapter->netdev,
3129 "SS count is negative\n");
3133 /* allocate 32 bytes for each string and 64bit for the value */
3134 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3136 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3138 netif_err(adapter, drv, adapter->netdev,
3139 "Cannot allocate debug area\n");
3143 rc = ena_com_set_host_attributes(adapter->ena_dev);
3145 if (rc == -EOPNOTSUPP)
3146 netif_warn(adapter, drv, adapter->netdev,
3147 "Cannot set host attributes\n");
3149 netif_err(adapter, drv, adapter->netdev,
3150 "Cannot set host attributes\n");
3156 ena_com_delete_debug_area(adapter->ena_dev);
3159 int ena_update_hw_stats(struct ena_adapter *adapter)
3163 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
3165 dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
3172 static void ena_get_stats64(struct net_device *netdev,
3173 struct rtnl_link_stats64 *stats)
3175 struct ena_adapter *adapter = netdev_priv(netdev);
3176 struct ena_ring *rx_ring, *tx_ring;
3182 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3185 for (i = 0; i < adapter->num_io_queues; i++) {
3188 tx_ring = &adapter->tx_ring[i];
3191 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3192 packets = tx_ring->tx_stats.cnt;
3193 bytes = tx_ring->tx_stats.bytes;
3194 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
3196 stats->tx_packets += packets;
3197 stats->tx_bytes += bytes;
3199 rx_ring = &adapter->rx_ring[i];
3202 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3203 packets = rx_ring->rx_stats.cnt;
3204 bytes = rx_ring->rx_stats.bytes;
3205 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3207 stats->rx_packets += packets;
3208 stats->rx_bytes += bytes;
3212 start = u64_stats_fetch_begin_irq(&adapter->syncp);
3213 rx_drops = adapter->dev_stats.rx_drops;
3214 tx_drops = adapter->dev_stats.tx_drops;
3215 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
3217 stats->rx_dropped = rx_drops;
3218 stats->tx_dropped = tx_drops;
3220 stats->multicast = 0;
3221 stats->collisions = 0;
3223 stats->rx_length_errors = 0;
3224 stats->rx_crc_errors = 0;
3225 stats->rx_frame_errors = 0;
3226 stats->rx_fifo_errors = 0;
3227 stats->rx_missed_errors = 0;
3228 stats->tx_window_errors = 0;
3230 stats->rx_errors = 0;
3231 stats->tx_errors = 0;
3234 static const struct net_device_ops ena_netdev_ops = {
3235 .ndo_open = ena_open,
3236 .ndo_stop = ena_close,
3237 .ndo_start_xmit = ena_start_xmit,
3238 .ndo_select_queue = ena_select_queue,
3239 .ndo_get_stats64 = ena_get_stats64,
3240 .ndo_tx_timeout = ena_tx_timeout,
3241 .ndo_change_mtu = ena_change_mtu,
3242 .ndo_set_mac_address = NULL,
3243 .ndo_validate_addr = eth_validate_addr,
3247 static int ena_device_validate_params(struct ena_adapter *adapter,
3248 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3250 struct net_device *netdev = adapter->netdev;
3253 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3256 netif_err(adapter, drv, netdev,
3257 "Error, mac address are different\n");
3261 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3262 netif_err(adapter, drv, netdev,
3263 "Error, device max mtu is smaller than netdev MTU\n");
3270 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3272 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3273 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3274 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3275 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3276 llq_config->llq_ring_entry_size_value = 128;
3279 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3280 struct ena_com_dev *ena_dev,
3281 struct ena_admin_feature_llq_desc *llq,
3282 struct ena_llq_configurations *llq_default_configurations)
3285 u32 llq_feature_mask;
3287 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3288 if (!(ena_dev->supported_features & llq_feature_mask)) {
3290 "LLQ is not supported Fallback to host mode policy.\n");
3291 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3295 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3298 "Failed to configure the device mode. Fallback to host mode policy.\n");
3299 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3305 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3308 bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3311 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3313 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3314 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3320 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3321 pci_resource_start(pdev, ENA_MEM_BAR),
3322 pci_resource_len(pdev, ENA_MEM_BAR));
3324 if (!ena_dev->mem_bar)
3330 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3331 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3334 struct ena_llq_configurations llq_config;
3335 struct device *dev = &pdev->dev;
3336 bool readless_supported;
3341 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3343 dev_err(dev, "Failed to init mmio read less\n");
3347 /* The PCIe configuration space revision id indicate if mmio reg
3350 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3351 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3353 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3355 dev_err(dev, "Can not reset device\n");
3356 goto err_mmio_read_less;
3359 rc = ena_com_validate_version(ena_dev);
3361 dev_err(dev, "Device version is too low\n");
3362 goto err_mmio_read_less;
3365 dma_width = ena_com_get_dma_width(ena_dev);
3366 if (dma_width < 0) {
3367 dev_err(dev, "Invalid dma width value %d", dma_width);
3369 goto err_mmio_read_less;
3372 rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
3374 dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
3375 goto err_mmio_read_less;
3378 /* ENA admin level init */
3379 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3382 "Can not initialize ena admin queue with device\n");
3383 goto err_mmio_read_less;
3386 /* To enable the msix interrupts the driver needs to know the number
3387 * of queues. So the driver uses polling mode to retrieve this
3390 ena_com_set_admin_polling_mode(ena_dev, true);
3392 ena_config_host_info(ena_dev, pdev);
3394 /* Get Device Attributes*/
3395 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3397 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3398 goto err_admin_init;
3401 /* Try to turn all the available aenq groups */
3402 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3403 BIT(ENA_ADMIN_FATAL_ERROR) |
3404 BIT(ENA_ADMIN_WARNING) |
3405 BIT(ENA_ADMIN_NOTIFICATION) |
3406 BIT(ENA_ADMIN_KEEP_ALIVE);
3408 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3410 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3412 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3413 goto err_admin_init;
3416 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3418 set_default_llq_configurations(&llq_config);
3420 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3423 dev_err(dev, "ENA device init failed\n");
3424 goto err_admin_init;
3430 ena_com_delete_host_info(ena_dev);
3431 ena_com_admin_destroy(ena_dev);
3433 ena_com_mmio_reg_read_request_destroy(ena_dev);
3438 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3440 struct ena_com_dev *ena_dev = adapter->ena_dev;
3441 struct device *dev = &adapter->pdev->dev;
3444 rc = ena_enable_msix(adapter);
3446 dev_err(dev, "Can not reserve msix vectors\n");
3450 ena_setup_mgmnt_intr(adapter);
3452 rc = ena_request_mgmnt_irq(adapter);
3454 dev_err(dev, "Can not setup management interrupts\n");
3455 goto err_disable_msix;
3458 ena_com_set_admin_polling_mode(ena_dev, false);
3460 ena_com_admin_aenq_enable(ena_dev);
3465 ena_disable_msix(adapter);
3470 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3472 struct net_device *netdev = adapter->netdev;
3473 struct ena_com_dev *ena_dev = adapter->ena_dev;
3476 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3479 netif_carrier_off(netdev);
3481 del_timer_sync(&adapter->timer_service);
3483 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3484 adapter->dev_up_before_reset = dev_up;
3486 ena_com_set_admin_running_state(ena_dev, false);
3488 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3491 /* Stop the device from sending AENQ events (in case reset flag is set
3492 * and device is up, ena_down() already reset the device.
3494 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3495 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3497 ena_free_mgmnt_irq(adapter);
3499 ena_disable_msix(adapter);
3501 ena_com_abort_admin_commands(ena_dev);
3503 ena_com_wait_for_abort_completion(ena_dev);
3505 ena_com_admin_destroy(ena_dev);
3507 ena_com_mmio_reg_read_request_destroy(ena_dev);
3509 /* return reset reason to default value */
3510 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3512 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3513 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3516 static int ena_restore_device(struct ena_adapter *adapter)
3518 struct ena_com_dev_get_features_ctx get_feat_ctx;
3519 struct ena_com_dev *ena_dev = adapter->ena_dev;
3520 struct pci_dev *pdev = adapter->pdev;
3524 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3525 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3527 dev_err(&pdev->dev, "Can not initialize device\n");
3530 adapter->wd_state = wd_state;
3532 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3534 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3535 goto err_device_destroy;
3538 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3540 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3541 goto err_device_destroy;
3543 /* If the interface was up before the reset bring it up */
3544 if (adapter->dev_up_before_reset) {
3545 rc = ena_up(adapter);
3547 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3548 goto err_disable_msix;
3552 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3554 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3555 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3556 netif_carrier_on(adapter->netdev);
3558 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3559 adapter->last_keep_alive_jiffies = jiffies;
3561 dev_err(&pdev->dev, "Device reset completed successfully\n");
3565 ena_free_mgmnt_irq(adapter);
3566 ena_disable_msix(adapter);
3568 ena_com_abort_admin_commands(ena_dev);
3569 ena_com_wait_for_abort_completion(ena_dev);
3570 ena_com_admin_destroy(ena_dev);
3571 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3572 ena_com_mmio_reg_read_request_destroy(ena_dev);
3574 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3575 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3577 "Reset attempt failed. Can not reset the device\n");
3582 static void ena_fw_reset_device(struct work_struct *work)
3584 struct ena_adapter *adapter =
3585 container_of(work, struct ena_adapter, reset_task);
3589 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3590 ena_destroy_device(adapter, false);
3591 ena_restore_device(adapter);
3597 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3598 struct ena_ring *rx_ring)
3600 if (likely(rx_ring->first_interrupt))
3603 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3606 rx_ring->no_interrupt_event_cnt++;
3608 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3609 netif_err(adapter, rx_err, adapter->netdev,
3610 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3612 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3613 smp_mb__before_atomic();
3614 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3621 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3622 struct ena_ring *tx_ring)
3624 struct ena_tx_buffer *tx_buf;
3625 unsigned long last_jiffies;
3629 for (i = 0; i < tx_ring->ring_size; i++) {
3630 tx_buf = &tx_ring->tx_buffer_info[i];
3631 last_jiffies = tx_buf->last_jiffies;
3633 if (last_jiffies == 0)
3634 /* no pending Tx at this location */
3637 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
3638 2 * adapter->missing_tx_completion_to))) {
3639 /* If after graceful period interrupt is still not
3640 * received, we schedule a reset
3642 netif_err(adapter, tx_err, adapter->netdev,
3643 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3645 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3646 smp_mb__before_atomic();
3647 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3651 if (unlikely(time_is_before_jiffies(last_jiffies +
3652 adapter->missing_tx_completion_to))) {
3653 if (!tx_buf->print_once)
3654 netif_notice(adapter, tx_err, adapter->netdev,
3655 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3658 tx_buf->print_once = 1;
3663 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3664 netif_err(adapter, tx_err, adapter->netdev,
3665 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3667 adapter->missing_tx_completion_threshold);
3668 adapter->reset_reason =
3669 ENA_REGS_RESET_MISS_TX_CMPL;
3670 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3674 u64_stats_update_begin(&tx_ring->syncp);
3675 tx_ring->tx_stats.missed_tx += missed_tx;
3676 u64_stats_update_end(&tx_ring->syncp);
3681 static void check_for_missing_completions(struct ena_adapter *adapter)
3683 struct ena_ring *tx_ring;
3684 struct ena_ring *rx_ring;
3688 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
3689 /* Make sure the driver doesn't turn the device in other process */
3692 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3695 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3698 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3701 budget = ENA_MONITORED_TX_QUEUES;
3703 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
3704 tx_ring = &adapter->tx_ring[i];
3705 rx_ring = &adapter->rx_ring[i];
3707 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3711 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3712 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
3721 adapter->last_monitored_tx_qid = i % io_queue_count;
3724 /* trigger napi schedule after 2 consecutive detections */
3725 #define EMPTY_RX_REFILL 2
3726 /* For the rare case where the device runs out of Rx descriptors and the
3727 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3729 * This case will lead to a deadlock:
3730 * The device won't send interrupts since all the new Rx packets will be dropped
3731 * The napi handler won't allocate new Rx descriptors so the device will be
3732 * able to send new packets.
3734 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3735 * It is recommended to have at least 512MB, with a minimum of 128MB for
3736 * constrained environment).
3738 * When such a situation is detected - Reschedule napi
3740 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3742 struct ena_ring *rx_ring;
3743 int i, refill_required;
3745 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3748 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3751 for (i = 0; i < adapter->num_io_queues; i++) {
3752 rx_ring = &adapter->rx_ring[i];
3754 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3755 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3756 rx_ring->empty_rx_queue++;
3758 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3759 u64_stats_update_begin(&rx_ring->syncp);
3760 rx_ring->rx_stats.empty_rx_ring++;
3761 u64_stats_update_end(&rx_ring->syncp);
3763 netif_err(adapter, drv, adapter->netdev,
3764 "Trigger refill for ring %d\n", i);
3766 napi_schedule(rx_ring->napi);
3767 rx_ring->empty_rx_queue = 0;
3770 rx_ring->empty_rx_queue = 0;
3775 /* Check for keep alive expiration */
3776 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3778 unsigned long keep_alive_expired;
3780 if (!adapter->wd_state)
3783 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3786 keep_alive_expired = adapter->last_keep_alive_jiffies +
3787 adapter->keep_alive_timeout;
3788 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3789 netif_err(adapter, drv, adapter->netdev,
3790 "Keep alive watchdog timeout.\n");
3791 u64_stats_update_begin(&adapter->syncp);
3792 adapter->dev_stats.wd_expired++;
3793 u64_stats_update_end(&adapter->syncp);
3794 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3795 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3799 static void check_for_admin_com_state(struct ena_adapter *adapter)
3801 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3802 netif_err(adapter, drv, adapter->netdev,
3803 "ENA admin queue is not in running state!\n");
3804 u64_stats_update_begin(&adapter->syncp);
3805 adapter->dev_stats.admin_q_pause++;
3806 u64_stats_update_end(&adapter->syncp);
3807 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3808 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3812 static void ena_update_hints(struct ena_adapter *adapter,
3813 struct ena_admin_ena_hw_hints *hints)
3815 struct net_device *netdev = adapter->netdev;
3817 if (hints->admin_completion_tx_timeout)
3818 adapter->ena_dev->admin_queue.completion_timeout =
3819 hints->admin_completion_tx_timeout * 1000;
3821 if (hints->mmio_read_timeout)
3822 /* convert to usec */
3823 adapter->ena_dev->mmio_read.reg_read_to =
3824 hints->mmio_read_timeout * 1000;
3826 if (hints->missed_tx_completion_count_threshold_to_reset)
3827 adapter->missing_tx_completion_threshold =
3828 hints->missed_tx_completion_count_threshold_to_reset;
3830 if (hints->missing_tx_completion_timeout) {
3831 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3832 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3834 adapter->missing_tx_completion_to =
3835 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3838 if (hints->netdev_wd_timeout)
3839 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3841 if (hints->driver_watchdog_timeout) {
3842 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3843 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3845 adapter->keep_alive_timeout =
3846 msecs_to_jiffies(hints->driver_watchdog_timeout);
3850 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3851 struct net_device *netdev)
3853 host_info->supported_network_features[0] =
3854 netdev->features & GENMASK_ULL(31, 0);
3855 host_info->supported_network_features[1] =
3856 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3859 static void ena_timer_service(struct timer_list *t)
3861 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3862 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3863 struct ena_admin_host_info *host_info =
3864 adapter->ena_dev->host_attr.host_info;
3866 check_for_missing_keep_alive(adapter);
3868 check_for_admin_com_state(adapter);
3870 check_for_missing_completions(adapter);
3872 check_for_empty_rx_ring(adapter);
3875 ena_dump_stats_to_buf(adapter, debug_area);
3878 ena_update_host_info(host_info, adapter->netdev);
3880 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3881 netif_err(adapter, drv, adapter->netdev,
3882 "Trigger reset is on\n");
3883 ena_dump_stats_to_dmesg(adapter);
3884 queue_work(ena_wq, &adapter->reset_task);
3888 /* Reset the timer */
3889 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3892 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
3893 struct ena_com_dev *ena_dev,
3894 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3896 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
3898 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3899 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3900 &get_feat_ctx->max_queue_ext.max_queue_ext;
3901 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
3902 max_queue_ext->max_rx_cq_num);
3904 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3905 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3907 struct ena_admin_queue_feature_desc *max_queues =
3908 &get_feat_ctx->max_queues;
3909 io_tx_sq_num = max_queues->max_sq_num;
3910 io_tx_cq_num = max_queues->max_cq_num;
3911 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
3914 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3915 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3916 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3918 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3919 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3920 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3921 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
3922 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3923 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3924 if (unlikely(!max_num_io_queues)) {
3925 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3929 return max_num_io_queues;
3932 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3933 struct net_device *netdev)
3935 netdev_features_t dev_features = 0;
3937 /* Set offload features */
3938 if (feat->offload.tx &
3939 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3940 dev_features |= NETIF_F_IP_CSUM;
3942 if (feat->offload.tx &
3943 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3944 dev_features |= NETIF_F_IPV6_CSUM;
3946 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3947 dev_features |= NETIF_F_TSO;
3949 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3950 dev_features |= NETIF_F_TSO6;
3952 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3953 dev_features |= NETIF_F_TSO_ECN;
3955 if (feat->offload.rx_supported &
3956 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3957 dev_features |= NETIF_F_RXCSUM;
3959 if (feat->offload.rx_supported &
3960 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3961 dev_features |= NETIF_F_RXCSUM;
3969 netdev->hw_features |= netdev->features;
3970 netdev->vlan_features |= netdev->features;
3973 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3974 struct ena_com_dev_get_features_ctx *feat)
3976 struct net_device *netdev = adapter->netdev;
3978 /* Copy mac address */
3979 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3980 eth_hw_addr_random(netdev);
3981 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3983 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3984 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3987 /* Set offload features */
3988 ena_set_dev_offloads(feat, netdev);
3990 adapter->max_mtu = feat->dev_attr.max_mtu;
3991 netdev->max_mtu = adapter->max_mtu;
3992 netdev->min_mtu = ENA_MIN_MTU;
3995 static int ena_rss_init_default(struct ena_adapter *adapter)
3997 struct ena_com_dev *ena_dev = adapter->ena_dev;
3998 struct device *dev = &adapter->pdev->dev;
4002 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
4004 dev_err(dev, "Cannot init indirect table\n");
4008 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
4009 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
4010 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
4011 ENA_IO_RXQ_IDX(val));
4012 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4013 dev_err(dev, "Cannot fill indirect table\n");
4014 goto err_fill_indir;
4018 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
4019 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
4020 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4021 dev_err(dev, "Cannot fill hash function\n");
4022 goto err_fill_indir;
4025 rc = ena_com_set_default_hash_ctrl(ena_dev);
4026 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4027 dev_err(dev, "Cannot fill hash control\n");
4028 goto err_fill_indir;
4034 ena_com_rss_destroy(ena_dev);
4040 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4042 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4044 pci_release_selected_regions(pdev, release_bars);
4048 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
4050 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4051 struct ena_com_dev *ena_dev = ctx->ena_dev;
4052 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4053 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4054 u32 max_tx_queue_size;
4055 u32 max_rx_queue_size;
4057 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
4058 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4059 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4060 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4061 max_queue_ext->max_rx_sq_depth);
4062 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
4064 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4065 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4066 llq->max_llq_depth);
4068 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4069 max_queue_ext->max_tx_sq_depth);
4071 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4072 max_queue_ext->max_per_packet_tx_descs);
4073 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4074 max_queue_ext->max_per_packet_rx_descs);
4076 struct ena_admin_queue_feature_desc *max_queues =
4077 &ctx->get_feat_ctx->max_queues;
4078 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4079 max_queues->max_sq_depth);
4080 max_tx_queue_size = max_queues->max_cq_depth;
4082 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4083 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4084 llq->max_llq_depth);
4086 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4087 max_queues->max_sq_depth);
4089 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4090 max_queues->max_packet_tx_descs);
4091 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4092 max_queues->max_packet_rx_descs);
4095 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4096 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
4098 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4100 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4103 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4104 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4106 ctx->max_tx_queue_size = max_tx_queue_size;
4107 ctx->max_rx_queue_size = max_rx_queue_size;
4108 ctx->tx_queue_size = tx_queue_size;
4109 ctx->rx_queue_size = rx_queue_size;
4114 /* ena_probe - Device Initialization Routine
4115 * @pdev: PCI device information struct
4116 * @ent: entry in ena_pci_tbl
4118 * Returns 0 on success, negative on failure
4120 * ena_probe initializes an adapter identified by a pci_dev structure.
4121 * The OS initialization, configuring of the adapter private structure,
4122 * and a hardware reset occur.
4124 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4126 struct ena_calc_queue_size_ctx calc_queue_ctx = {};
4127 struct ena_com_dev_get_features_ctx get_feat_ctx;
4128 struct ena_com_dev *ena_dev = NULL;
4129 struct ena_adapter *adapter;
4130 struct net_device *netdev;
4131 static int adapters_found;
4132 u32 max_num_io_queues;
4136 dev_dbg(&pdev->dev, "%s\n", __func__);
4138 rc = pci_enable_device_mem(pdev);
4140 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4144 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
4146 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
4147 goto err_disable_device;
4150 pci_set_master(pdev);
4152 ena_dev = vzalloc(sizeof(*ena_dev));
4155 goto err_disable_device;
4158 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4159 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4161 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4163 goto err_free_ena_dev;
4166 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4167 pci_resource_start(pdev, ENA_REG_BAR),
4168 pci_resource_len(pdev, ENA_REG_BAR));
4169 if (!ena_dev->reg_bar) {
4170 dev_err(&pdev->dev, "Failed to remap regs bar\n");
4172 goto err_free_region;
4175 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
4177 ena_dev->dmadev = &pdev->dev;
4179 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4181 dev_err(&pdev->dev, "ENA device init failed\n");
4184 goto err_free_region;
4187 rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
4189 dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
4190 goto err_free_ena_dev;
4193 calc_queue_ctx.ena_dev = ena_dev;
4194 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4195 calc_queue_ctx.pdev = pdev;
4197 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4198 * Updated during device initialization with the real granularity
4200 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
4201 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
4202 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
4203 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4204 rc = ena_calc_io_queue_size(&calc_queue_ctx);
4205 if (rc || !max_num_io_queues) {
4207 goto err_device_destroy;
4210 /* dev zeroed in init_etherdev */
4211 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
4213 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4215 goto err_device_destroy;
4218 SET_NETDEV_DEV(netdev, &pdev->dev);
4220 adapter = netdev_priv(netdev);
4221 pci_set_drvdata(pdev, adapter);
4223 adapter->ena_dev = ena_dev;
4224 adapter->netdev = netdev;
4225 adapter->pdev = pdev;
4227 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4229 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4230 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
4232 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4233 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
4234 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4235 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
4236 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4237 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
4239 adapter->num_io_queues = max_num_io_queues;
4240 adapter->max_num_io_queues = max_num_io_queues;
4241 adapter->last_monitored_tx_qid = 0;
4243 adapter->xdp_first_ring = 0;
4244 adapter->xdp_num_queues = 0;
4246 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4247 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4248 adapter->disable_meta_caching =
4249 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
4250 BIT(ENA_ADMIN_DISABLE_META_CACHING));
4252 adapter->wd_state = wd_state;
4254 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4256 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4259 "Failed to query interrupt moderation feature\n");
4260 goto err_netdev_destroy;
4262 ena_init_io_rings(adapter,
4264 adapter->xdp_num_queues +
4265 adapter->num_io_queues);
4267 netdev->netdev_ops = &ena_netdev_ops;
4268 netdev->watchdog_timeo = TX_TIMEOUT;
4269 ena_set_ethtool_ops(netdev);
4271 netdev->priv_flags |= IFF_UNICAST_FLT;
4273 u64_stats_init(&adapter->syncp);
4275 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
4278 "Failed to enable and set the admin interrupts\n");
4279 goto err_worker_destroy;
4281 rc = ena_rss_init_default(adapter);
4282 if (rc && (rc != -EOPNOTSUPP)) {
4283 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4287 ena_config_debug_area(adapter);
4289 if (!ena_update_hw_stats(adapter))
4290 adapter->eni_stats_supported = true;
4292 adapter->eni_stats_supported = false;
4294 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4296 netif_carrier_off(netdev);
4298 rc = register_netdev(netdev);
4300 dev_err(&pdev->dev, "Cannot register net device\n");
4304 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4306 adapter->last_keep_alive_jiffies = jiffies;
4307 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4308 adapter->missing_tx_completion_to = TX_TIMEOUT;
4309 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4311 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4313 timer_setup(&adapter->timer_service, ena_timer_service, 0);
4314 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4316 dev_info(&pdev->dev,
4317 "%s found at mem %lx, mac addr %pM\n",
4318 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4321 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4328 ena_com_delete_debug_area(ena_dev);
4329 ena_com_rss_destroy(ena_dev);
4331 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4332 /* stop submitting admin commands on a device that was reset */
4333 ena_com_set_admin_running_state(ena_dev, false);
4334 ena_free_mgmnt_irq(adapter);
4335 ena_disable_msix(adapter);
4337 del_timer(&adapter->timer_service);
4339 free_netdev(netdev);
4341 ena_com_delete_host_info(ena_dev);
4342 ena_com_admin_destroy(ena_dev);
4344 ena_release_bars(ena_dev, pdev);
4348 pci_disable_device(pdev);
4352 /*****************************************************************************/
4354 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4355 * @pdev: PCI device information struct
4356 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4358 * __ena_shutoff is a helper routine that does the real work on shutdown and
4359 * removal paths; the difference between those paths is with regards to whether
4360 * dettach or unregister the netdevice.
4362 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4364 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4365 struct ena_com_dev *ena_dev;
4366 struct net_device *netdev;
4368 ena_dev = adapter->ena_dev;
4369 netdev = adapter->netdev;
4371 #ifdef CONFIG_RFS_ACCEL
4372 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4373 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4374 netdev->rx_cpu_rmap = NULL;
4376 #endif /* CONFIG_RFS_ACCEL */
4378 /* Make sure timer and reset routine won't be called after
4379 * freeing device resources.
4381 del_timer_sync(&adapter->timer_service);
4382 cancel_work_sync(&adapter->reset_task);
4384 rtnl_lock(); /* lock released inside the below if-else block */
4385 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
4386 ena_destroy_device(adapter, true);
4388 netif_device_detach(netdev);
4393 unregister_netdev(netdev);
4394 free_netdev(netdev);
4397 ena_com_rss_destroy(ena_dev);
4399 ena_com_delete_debug_area(ena_dev);
4401 ena_com_delete_host_info(ena_dev);
4403 ena_release_bars(ena_dev, pdev);
4405 pci_disable_device(pdev);
4410 /* ena_remove - Device Removal Routine
4411 * @pdev: PCI device information struct
4413 * ena_remove is called by the PCI subsystem to alert the driver
4414 * that it should release a PCI device.
4417 static void ena_remove(struct pci_dev *pdev)
4419 __ena_shutoff(pdev, false);
4422 /* ena_shutdown - Device Shutdown Routine
4423 * @pdev: PCI device information struct
4425 * ena_shutdown is called by the PCI subsystem to alert the driver that
4426 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4429 static void ena_shutdown(struct pci_dev *pdev)
4431 __ena_shutoff(pdev, true);
4434 /* ena_suspend - PM suspend callback
4435 * @dev_d: Device information struct
4437 static int __maybe_unused ena_suspend(struct device *dev_d)
4439 struct pci_dev *pdev = to_pci_dev(dev_d);
4440 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4442 u64_stats_update_begin(&adapter->syncp);
4443 adapter->dev_stats.suspend++;
4444 u64_stats_update_end(&adapter->syncp);
4447 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4449 "Ignoring device reset request as the device is being suspended\n");
4450 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4452 ena_destroy_device(adapter, true);
4457 /* ena_resume - PM resume callback
4458 * @dev_d: Device information struct
4460 static int __maybe_unused ena_resume(struct device *dev_d)
4462 struct ena_adapter *adapter = dev_get_drvdata(dev_d);
4465 u64_stats_update_begin(&adapter->syncp);
4466 adapter->dev_stats.resume++;
4467 u64_stats_update_end(&adapter->syncp);
4470 rc = ena_restore_device(adapter);
4475 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
4477 static struct pci_driver ena_pci_driver = {
4478 .name = DRV_MODULE_NAME,
4479 .id_table = ena_pci_tbl,
4481 .remove = ena_remove,
4482 .shutdown = ena_shutdown,
4483 .driver.pm = &ena_pm_ops,
4484 .sriov_configure = pci_sriov_configure_simple,
4487 static int __init ena_init(void)
4489 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4491 pr_err("Failed to create workqueue\n");
4495 return pci_register_driver(&ena_pci_driver);
4498 static void __exit ena_cleanup(void)
4500 pci_unregister_driver(&ena_pci_driver);
4503 destroy_workqueue(ena_wq);
4508 /******************************************************************************
4509 ******************************** AENQ Handlers *******************************
4510 *****************************************************************************/
4511 /* ena_update_on_link_change:
4512 * Notify the network interface about the change in link status
4514 static void ena_update_on_link_change(void *adapter_data,
4515 struct ena_admin_aenq_entry *aenq_e)
4517 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4518 struct ena_admin_aenq_link_change_desc *aenq_desc =
4519 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4520 int status = aenq_desc->flags &
4521 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4524 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
4525 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4526 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4527 netif_carrier_on(adapter->netdev);
4529 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4530 netif_carrier_off(adapter->netdev);
4534 static void ena_keep_alive_wd(void *adapter_data,
4535 struct ena_admin_aenq_entry *aenq_e)
4537 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4538 struct ena_admin_aenq_keep_alive_desc *desc;
4542 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4543 adapter->last_keep_alive_jiffies = jiffies;
4545 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4546 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
4548 u64_stats_update_begin(&adapter->syncp);
4549 /* These stats are accumulated by the device, so the counters indicate
4550 * all drops since last reset.
4552 adapter->dev_stats.rx_drops = rx_drops;
4553 adapter->dev_stats.tx_drops = tx_drops;
4554 u64_stats_update_end(&adapter->syncp);
4557 static void ena_notification(void *adapter_data,
4558 struct ena_admin_aenq_entry *aenq_e)
4560 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4561 struct ena_admin_ena_hw_hints *hints;
4563 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4564 "Invalid group(%x) expected %x\n",
4565 aenq_e->aenq_common_desc.group,
4566 ENA_ADMIN_NOTIFICATION);
4568 switch (aenq_e->aenq_common_desc.syndrome) {
4569 case ENA_ADMIN_UPDATE_HINTS:
4570 hints = (struct ena_admin_ena_hw_hints *)
4571 (&aenq_e->inline_data_w4);
4572 ena_update_hints(adapter, hints);
4575 netif_err(adapter, drv, adapter->netdev,
4576 "Invalid aenq notification link state %d\n",
4577 aenq_e->aenq_common_desc.syndrome);
4581 /* This handler will called for unknown event group or unimplemented handlers*/
4582 static void unimplemented_aenq_handler(void *data,
4583 struct ena_admin_aenq_entry *aenq_e)
4585 struct ena_adapter *adapter = (struct ena_adapter *)data;
4587 netif_err(adapter, drv, adapter->netdev,
4588 "Unknown event was received or event with unimplemented handler\n");
4591 static struct ena_aenq_handlers aenq_handlers = {
4593 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4594 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4595 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4597 .unimplemented_handler = unimplemented_aenq_handler
4600 module_init(ena_init);
4601 module_exit(ena_cleanup);