2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/numa.h>
42 #include <linux/pci.h>
43 #include <linux/utsname.h>
44 #include <linux/version.h>
45 #include <linux/vmalloc.h>
48 #include "ena_netdev.h"
49 #include <linux/bpf_trace.h>
50 #include "ena_pci_id_tbl.h"
52 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
53 MODULE_DESCRIPTION(DEVICE_NAME);
54 MODULE_LICENSE("GPL");
56 /* Time in jiffies before concluding the transmitter is hung. */
57 #define TX_TIMEOUT (5 * HZ)
59 #define ENA_NAPI_BUDGET 64
61 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
62 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
63 static int debug = -1;
64 module_param(debug, int, 0);
65 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
67 static struct ena_aenq_handlers aenq_handlers;
69 static struct workqueue_struct *ena_wq;
71 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
73 static int ena_rss_init_default(struct ena_adapter *adapter);
74 static void check_for_admin_com_state(struct ena_adapter *adapter);
75 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
76 static int ena_restore_device(struct ena_adapter *adapter);
78 static void ena_init_io_rings(struct ena_adapter *adapter,
79 int first_index, int count);
80 static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
82 static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
84 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
85 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
88 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
89 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
90 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
91 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
92 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
93 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
94 int first_index, int count);
95 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
96 int first_index, int count);
97 static int ena_up(struct ena_adapter *adapter);
98 static void ena_down(struct ena_adapter *adapter);
99 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
100 struct ena_ring *rx_ring);
101 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
102 struct ena_ring *rx_ring);
103 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
104 struct ena_tx_buffer *tx_info);
105 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
106 int first_index, int count);
108 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
110 struct ena_adapter *adapter = netdev_priv(dev);
112 /* Change the state of the device to trigger reset
113 * Check that we are not in the middle or a trigger already
116 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
119 adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
120 u64_stats_update_begin(&adapter->syncp);
121 adapter->dev_stats.tx_timeout++;
122 u64_stats_update_end(&adapter->syncp);
124 netif_err(adapter, tx_err, dev, "Transmit time out\n");
127 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
131 for (i = 0; i < adapter->num_io_queues; i++)
132 adapter->rx_ring[i].mtu = mtu;
135 static int ena_change_mtu(struct net_device *dev, int new_mtu)
137 struct ena_adapter *adapter = netdev_priv(dev);
140 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
142 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
143 update_rx_ring_mtu(adapter, new_mtu);
146 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
153 static int ena_xmit_common(struct net_device *dev,
154 struct ena_ring *ring,
155 struct ena_tx_buffer *tx_info,
156 struct ena_com_tx_ctx *ena_tx_ctx,
160 struct ena_adapter *adapter = netdev_priv(dev);
163 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
165 netif_dbg(adapter, tx_queued, dev,
166 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
168 ena_com_write_sq_doorbell(ring->ena_com_io_sq);
171 /* prepare the packet's descriptors to dma engine */
172 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
175 /* In case there isn't enough space in the queue for the packet,
176 * we simply drop it. All other failure reasons of
177 * ena_com_prepare_tx() are fatal and therefore require a device reset.
180 netif_err(adapter, tx_queued, dev,
181 "failed to prepare tx bufs\n");
182 u64_stats_update_begin(&ring->syncp);
183 ring->tx_stats.prepare_ctx_err++;
184 u64_stats_update_end(&ring->syncp);
186 adapter->reset_reason =
187 ENA_REGS_RESET_DRIVER_INVALID_STATE;
188 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
193 u64_stats_update_begin(&ring->syncp);
194 ring->tx_stats.cnt++;
195 ring->tx_stats.bytes += bytes;
196 u64_stats_update_end(&ring->syncp);
198 tx_info->tx_descs = nb_hw_desc;
199 tx_info->last_jiffies = jiffies;
200 tx_info->print_once = 0;
202 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
207 /* This is the XDP napi callback. XDP queues use a separate napi callback
210 static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
212 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
213 u32 xdp_work_done, xdp_budget;
214 struct ena_ring *xdp_ring;
215 int napi_comp_call = 0;
218 xdp_ring = ena_napi->xdp_ring;
219 xdp_ring->first_interrupt = ena_napi->first_interrupt;
223 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
224 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
225 napi_complete_done(napi, 0);
229 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
231 /* If the device is about to reset or down, avoid unmask
232 * the interrupt and return 0 so NAPI won't reschedule
234 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
235 napi_complete_done(napi, 0);
237 } else if (xdp_budget > xdp_work_done) {
239 if (napi_complete_done(napi, xdp_work_done))
240 ena_unmask_interrupt(xdp_ring, NULL);
241 ena_update_ring_numa_node(xdp_ring, NULL);
247 u64_stats_update_begin(&xdp_ring->syncp);
248 xdp_ring->tx_stats.napi_comp += napi_comp_call;
249 xdp_ring->tx_stats.tx_poll++;
250 u64_stats_update_end(&xdp_ring->syncp);
255 static int ena_xdp_tx_map_buff(struct ena_ring *xdp_ring,
256 struct ena_tx_buffer *tx_info,
257 struct xdp_buff *xdp,
261 struct ena_adapter *adapter = xdp_ring->adapter;
262 struct ena_com_buf *ena_buf;
266 tx_info->xdpf = xdp_convert_buff_to_frame(xdp);
267 size = tx_info->xdpf->len;
268 ena_buf = tx_info->bufs;
270 /* llq push buffer */
271 *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
272 *push_hdr = tx_info->xdpf->data;
274 if (size - *push_len > 0) {
275 dma = dma_map_single(xdp_ring->dev,
276 *push_hdr + *push_len,
279 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
280 goto error_report_dma_error;
282 tx_info->map_linear_data = 1;
283 tx_info->num_of_bufs = 1;
286 ena_buf->paddr = dma;
291 error_report_dma_error:
292 u64_stats_update_begin(&xdp_ring->syncp);
293 xdp_ring->tx_stats.dma_mapping_err++;
294 u64_stats_update_end(&xdp_ring->syncp);
295 netdev_warn(adapter->netdev, "failed to map xdp buff\n");
297 xdp_return_frame_rx_napi(tx_info->xdpf);
298 tx_info->xdpf = NULL;
299 tx_info->num_of_bufs = 0;
304 static int ena_xdp_xmit_buff(struct net_device *dev,
305 struct xdp_buff *xdp,
307 struct ena_rx_buffer *rx_info)
309 struct ena_adapter *adapter = netdev_priv(dev);
310 struct ena_com_tx_ctx ena_tx_ctx = {};
311 struct ena_tx_buffer *tx_info;
312 struct ena_ring *xdp_ring;
313 u16 next_to_use, req_id;
318 xdp_ring = &adapter->tx_ring[qid];
319 next_to_use = xdp_ring->next_to_use;
320 req_id = xdp_ring->free_ids[next_to_use];
321 tx_info = &xdp_ring->tx_buffer_info[req_id];
322 tx_info->num_of_bufs = 0;
323 page_ref_inc(rx_info->page);
324 tx_info->xdp_rx_page = rx_info->page;
326 rc = ena_xdp_tx_map_buff(xdp_ring, tx_info, xdp, &push_hdr, &push_len);
328 goto error_drop_packet;
330 ena_tx_ctx.ena_bufs = tx_info->bufs;
331 ena_tx_ctx.push_header = push_hdr;
332 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
333 ena_tx_ctx.req_id = req_id;
334 ena_tx_ctx.header_len = push_len;
336 rc = ena_xmit_common(dev,
341 xdp->data_end - xdp->data);
343 goto error_unmap_dma;
344 /* trigger the dma engine. ena_com_write_sq_doorbell()
347 ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
348 u64_stats_update_begin(&xdp_ring->syncp);
349 xdp_ring->tx_stats.doorbells++;
350 u64_stats_update_end(&xdp_ring->syncp);
355 ena_unmap_tx_buff(xdp_ring, tx_info);
356 tx_info->xdpf = NULL;
358 __free_page(tx_info->xdp_rx_page);
362 static int ena_xdp_execute(struct ena_ring *rx_ring,
363 struct xdp_buff *xdp,
364 struct ena_rx_buffer *rx_info)
366 struct bpf_prog *xdp_prog;
367 u32 verdict = XDP_PASS;
371 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
376 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
378 if (verdict == XDP_TX) {
379 ena_xdp_xmit_buff(rx_ring->netdev,
381 rx_ring->qid + rx_ring->adapter->num_io_queues,
384 xdp_stat = &rx_ring->rx_stats.xdp_tx;
385 } else if (unlikely(verdict == XDP_ABORTED)) {
386 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
387 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
388 } else if (unlikely(verdict == XDP_DROP)) {
389 xdp_stat = &rx_ring->rx_stats.xdp_drop;
390 } else if (unlikely(verdict == XDP_PASS)) {
391 xdp_stat = &rx_ring->rx_stats.xdp_pass;
393 bpf_warn_invalid_xdp_action(verdict);
394 xdp_stat = &rx_ring->rx_stats.xdp_invalid;
397 u64_stats_update_begin(&rx_ring->syncp);
399 u64_stats_update_end(&rx_ring->syncp);
406 static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
408 adapter->xdp_first_ring = adapter->num_io_queues;
409 adapter->xdp_num_queues = adapter->num_io_queues;
411 ena_init_io_rings(adapter,
412 adapter->xdp_first_ring,
413 adapter->xdp_num_queues);
416 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
420 rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
421 adapter->xdp_num_queues);
425 rc = ena_create_io_tx_queues_in_range(adapter,
426 adapter->xdp_first_ring,
427 adapter->xdp_num_queues);
434 ena_free_all_io_tx_resources(adapter);
439 /* Provides a way for both kernel and bpf-prog to know
440 * more about the RX-queue a given XDP frame arrived on.
442 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
446 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid);
449 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
450 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
455 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
459 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
460 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
462 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
469 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
471 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
472 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
475 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
476 struct bpf_prog *prog,
477 int first, int count)
479 struct ena_ring *rx_ring;
482 for (i = first; i < count; i++) {
483 rx_ring = &adapter->rx_ring[i];
484 xchg(&rx_ring->xdp_bpf_prog, prog);
486 ena_xdp_register_rxq_info(rx_ring);
487 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
489 ena_xdp_unregister_rxq_info(rx_ring);
490 rx_ring->rx_headroom = 0;
495 static void ena_xdp_exchange_program(struct ena_adapter *adapter,
496 struct bpf_prog *prog)
498 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
500 ena_xdp_exchange_program_rx_in_range(adapter,
503 adapter->num_io_queues);
506 bpf_prog_put(old_bpf_prog);
509 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
514 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
519 adapter->xdp_first_ring = 0;
520 adapter->xdp_num_queues = 0;
521 ena_xdp_exchange_program(adapter, NULL);
523 rc = ena_up(adapter);
530 static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
532 struct ena_adapter *adapter = netdev_priv(netdev);
533 struct bpf_prog *prog = bpf->prog;
534 struct bpf_prog *old_bpf_prog;
538 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
539 rc = ena_xdp_allowed(adapter);
540 if (rc == ENA_XDP_ALLOWED) {
541 old_bpf_prog = adapter->xdp_bpf_prog;
544 ena_init_all_xdp_queues(adapter);
545 } else if (!old_bpf_prog) {
547 ena_init_all_xdp_queues(adapter);
549 ena_xdp_exchange_program(adapter, prog);
551 if (is_up && !old_bpf_prog) {
552 rc = ena_up(adapter);
556 } else if (old_bpf_prog) {
557 rc = ena_destroy_and_free_all_xdp_queues(adapter);
562 prev_mtu = netdev->max_mtu;
563 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
566 netif_info(adapter, drv, adapter->netdev,
567 "xdp program set, changing the max_mtu from %d to %d",
568 prev_mtu, netdev->max_mtu);
570 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
571 netif_err(adapter, drv, adapter->netdev,
572 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
573 netdev->mtu, ENA_XDP_MAX_MTU);
574 NL_SET_ERR_MSG_MOD(bpf->extack,
575 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
577 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
578 netif_err(adapter, drv, adapter->netdev,
579 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
580 adapter->num_io_queues, adapter->max_num_io_queues);
581 NL_SET_ERR_MSG_MOD(bpf->extack,
582 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
589 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
590 * program as well as to query the current xdp program id.
592 static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
594 switch (bpf->command) {
596 return ena_xdp_set(netdev, bpf);
603 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
605 #ifdef CONFIG_RFS_ACCEL
609 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
610 if (!adapter->netdev->rx_cpu_rmap)
612 for (i = 0; i < adapter->num_io_queues; i++) {
613 int irq_idx = ENA_IO_IRQ_IDX(i);
615 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
616 pci_irq_vector(adapter->pdev, irq_idx));
618 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
619 adapter->netdev->rx_cpu_rmap = NULL;
623 #endif /* CONFIG_RFS_ACCEL */
627 static void ena_init_io_rings_common(struct ena_adapter *adapter,
628 struct ena_ring *ring, u16 qid)
631 ring->pdev = adapter->pdev;
632 ring->dev = &adapter->pdev->dev;
633 ring->netdev = adapter->netdev;
634 ring->napi = &adapter->ena_napi[qid].napi;
635 ring->adapter = adapter;
636 ring->ena_dev = adapter->ena_dev;
637 ring->per_napi_packets = 0;
639 ring->first_interrupt = false;
640 ring->no_interrupt_event_cnt = 0;
641 u64_stats_init(&ring->syncp);
644 static void ena_init_io_rings(struct ena_adapter *adapter,
645 int first_index, int count)
647 struct ena_com_dev *ena_dev;
648 struct ena_ring *txr, *rxr;
651 ena_dev = adapter->ena_dev;
653 for (i = first_index; i < first_index + count; i++) {
654 txr = &adapter->tx_ring[i];
655 rxr = &adapter->rx_ring[i];
657 /* TX common ring state */
658 ena_init_io_rings_common(adapter, txr, i);
660 /* TX specific ring state */
661 txr->ring_size = adapter->requested_tx_ring_size;
662 txr->tx_max_header_size = ena_dev->tx_max_header_size;
663 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
664 txr->sgl_size = adapter->max_tx_sgl_size;
665 txr->smoothed_interval =
666 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
667 txr->disable_meta_caching = adapter->disable_meta_caching;
669 /* Don't init RX queues for xdp queues */
670 if (!ENA_IS_XDP_INDEX(adapter, i)) {
671 /* RX common ring state */
672 ena_init_io_rings_common(adapter, rxr, i);
674 /* RX specific ring state */
675 rxr->ring_size = adapter->requested_rx_ring_size;
676 rxr->rx_copybreak = adapter->rx_copybreak;
677 rxr->sgl_size = adapter->max_rx_sgl_size;
678 rxr->smoothed_interval =
679 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
680 rxr->empty_rx_queue = 0;
681 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
686 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
687 * @adapter: network interface device structure
690 * Return 0 on success, negative on failure
692 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
694 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
695 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
698 if (tx_ring->tx_buffer_info) {
699 netif_err(adapter, ifup,
700 adapter->netdev, "tx_buffer_info info is not NULL");
704 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
705 node = cpu_to_node(ena_irq->cpu);
707 tx_ring->tx_buffer_info = vzalloc_node(size, node);
708 if (!tx_ring->tx_buffer_info) {
709 tx_ring->tx_buffer_info = vzalloc(size);
710 if (!tx_ring->tx_buffer_info)
711 goto err_tx_buffer_info;
714 size = sizeof(u16) * tx_ring->ring_size;
715 tx_ring->free_ids = vzalloc_node(size, node);
716 if (!tx_ring->free_ids) {
717 tx_ring->free_ids = vzalloc(size);
718 if (!tx_ring->free_ids)
719 goto err_tx_free_ids;
722 size = tx_ring->tx_max_header_size;
723 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
724 if (!tx_ring->push_buf_intermediate_buf) {
725 tx_ring->push_buf_intermediate_buf = vzalloc(size);
726 if (!tx_ring->push_buf_intermediate_buf)
727 goto err_push_buf_intermediate_buf;
730 /* Req id ring for TX out of order completions */
731 for (i = 0; i < tx_ring->ring_size; i++)
732 tx_ring->free_ids[i] = i;
734 /* Reset tx statistics */
735 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
737 tx_ring->next_to_use = 0;
738 tx_ring->next_to_clean = 0;
739 tx_ring->cpu = ena_irq->cpu;
742 err_push_buf_intermediate_buf:
743 vfree(tx_ring->free_ids);
744 tx_ring->free_ids = NULL;
746 vfree(tx_ring->tx_buffer_info);
747 tx_ring->tx_buffer_info = NULL;
752 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
753 * @adapter: network interface device structure
756 * Free all transmit software resources
758 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
760 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
762 vfree(tx_ring->tx_buffer_info);
763 tx_ring->tx_buffer_info = NULL;
765 vfree(tx_ring->free_ids);
766 tx_ring->free_ids = NULL;
768 vfree(tx_ring->push_buf_intermediate_buf);
769 tx_ring->push_buf_intermediate_buf = NULL;
772 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
778 for (i = first_index; i < first_index + count; i++) {
779 rc = ena_setup_tx_resources(adapter, i);
788 netif_err(adapter, ifup, adapter->netdev,
789 "Tx queue %d: allocation failed\n", i);
791 /* rewind the index freeing the rings as we go */
792 while (first_index < i--)
793 ena_free_tx_resources(adapter, i);
797 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
798 int first_index, int count)
802 for (i = first_index; i < first_index + count; i++)
803 ena_free_tx_resources(adapter, i);
806 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
807 * @adapter: board private structure
809 * Free all transmit software resources
811 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
813 ena_free_all_io_tx_resources_in_range(adapter,
815 adapter->xdp_num_queues +
816 adapter->num_io_queues);
819 static int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
821 if (likely(req_id < rx_ring->ring_size))
824 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
825 "Invalid rx req_id: %hu\n", req_id);
827 u64_stats_update_begin(&rx_ring->syncp);
828 rx_ring->rx_stats.bad_req_id++;
829 u64_stats_update_end(&rx_ring->syncp);
831 /* Trigger device reset */
832 rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
833 set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
837 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
838 * @adapter: network interface device structure
841 * Returns 0 on success, negative on failure
843 static int ena_setup_rx_resources(struct ena_adapter *adapter,
846 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
847 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
850 if (rx_ring->rx_buffer_info) {
851 netif_err(adapter, ifup, adapter->netdev,
852 "rx_buffer_info is not NULL");
856 /* alloc extra element so in rx path
857 * we can always prefetch rx_info + 1
859 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
860 node = cpu_to_node(ena_irq->cpu);
862 rx_ring->rx_buffer_info = vzalloc_node(size, node);
863 if (!rx_ring->rx_buffer_info) {
864 rx_ring->rx_buffer_info = vzalloc(size);
865 if (!rx_ring->rx_buffer_info)
869 size = sizeof(u16) * rx_ring->ring_size;
870 rx_ring->free_ids = vzalloc_node(size, node);
871 if (!rx_ring->free_ids) {
872 rx_ring->free_ids = vzalloc(size);
873 if (!rx_ring->free_ids) {
874 vfree(rx_ring->rx_buffer_info);
875 rx_ring->rx_buffer_info = NULL;
880 /* Req id ring for receiving RX pkts out of order */
881 for (i = 0; i < rx_ring->ring_size; i++)
882 rx_ring->free_ids[i] = i;
884 /* Reset rx statistics */
885 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
887 rx_ring->next_to_clean = 0;
888 rx_ring->next_to_use = 0;
889 rx_ring->cpu = ena_irq->cpu;
894 /* ena_free_rx_resources - Free I/O Rx Resources
895 * @adapter: network interface device structure
898 * Free all receive software resources
900 static void ena_free_rx_resources(struct ena_adapter *adapter,
903 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
905 vfree(rx_ring->rx_buffer_info);
906 rx_ring->rx_buffer_info = NULL;
908 vfree(rx_ring->free_ids);
909 rx_ring->free_ids = NULL;
912 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
913 * @adapter: board private structure
915 * Return 0 on success, negative on failure
917 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
921 for (i = 0; i < adapter->num_io_queues; i++) {
922 rc = ena_setup_rx_resources(adapter, i);
931 netif_err(adapter, ifup, adapter->netdev,
932 "Rx queue %d: allocation failed\n", i);
934 /* rewind the index freeing the rings as we go */
936 ena_free_rx_resources(adapter, i);
940 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
941 * @adapter: board private structure
943 * Free all receive software resources
945 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
949 for (i = 0; i < adapter->num_io_queues; i++)
950 ena_free_rx_resources(adapter, i);
953 static int ena_alloc_rx_page(struct ena_ring *rx_ring,
954 struct ena_rx_buffer *rx_info, gfp_t gfp)
956 struct ena_com_buf *ena_buf;
960 /* if previous allocated page is not used */
961 if (unlikely(rx_info->page))
964 page = alloc_page(gfp);
965 if (unlikely(!page)) {
966 u64_stats_update_begin(&rx_ring->syncp);
967 rx_ring->rx_stats.page_alloc_fail++;
968 u64_stats_update_end(&rx_ring->syncp);
972 /* To enable NIC-side port-mirroring, AKA SPAN port,
973 * we make the buffer readable from the nic as well
975 dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
977 if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
978 u64_stats_update_begin(&rx_ring->syncp);
979 rx_ring->rx_stats.dma_mapping_err++;
980 u64_stats_update_end(&rx_ring->syncp);
985 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
986 "alloc page %p, rx_info %p\n", page, rx_info);
988 rx_info->page = page;
989 rx_info->page_offset = 0;
990 ena_buf = &rx_info->ena_buf;
991 ena_buf->paddr = dma + rx_ring->rx_headroom;
992 ena_buf->len = ENA_PAGE_SIZE - rx_ring->rx_headroom;
997 static void ena_free_rx_page(struct ena_ring *rx_ring,
998 struct ena_rx_buffer *rx_info)
1000 struct page *page = rx_info->page;
1001 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
1003 if (unlikely(!page)) {
1004 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1005 "Trying to free unallocated buffer\n");
1009 dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
1014 rx_info->page = NULL;
1017 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1019 u16 next_to_use, req_id;
1023 next_to_use = rx_ring->next_to_use;
1025 for (i = 0; i < num; i++) {
1026 struct ena_rx_buffer *rx_info;
1028 req_id = rx_ring->free_ids[next_to_use];
1030 rx_info = &rx_ring->rx_buffer_info[req_id];
1032 rc = ena_alloc_rx_page(rx_ring, rx_info,
1033 GFP_ATOMIC | __GFP_COMP);
1034 if (unlikely(rc < 0)) {
1035 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1036 "failed to alloc buffer for rx queue %d\n",
1040 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1044 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1045 "failed to add buffer for rx queue %d\n",
1049 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1050 rx_ring->ring_size);
1053 if (unlikely(i < num)) {
1054 u64_stats_update_begin(&rx_ring->syncp);
1055 rx_ring->rx_stats.refil_partial++;
1056 u64_stats_update_end(&rx_ring->syncp);
1057 netdev_warn(rx_ring->netdev,
1058 "refilled rx qid %d with only %d buffers (from %d)\n",
1059 rx_ring->qid, i, num);
1062 /* ena_com_write_sq_doorbell issues a wmb() */
1064 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1066 rx_ring->next_to_use = next_to_use;
1071 static void ena_free_rx_bufs(struct ena_adapter *adapter,
1074 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1077 for (i = 0; i < rx_ring->ring_size; i++) {
1078 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1081 ena_free_rx_page(rx_ring, rx_info);
1085 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1086 * @adapter: board private structure
1088 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1090 struct ena_ring *rx_ring;
1091 int i, rc, bufs_num;
1093 for (i = 0; i < adapter->num_io_queues; i++) {
1094 rx_ring = &adapter->rx_ring[i];
1095 bufs_num = rx_ring->ring_size - 1;
1096 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1098 if (unlikely(rc != bufs_num))
1099 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1100 "refilling Queue %d failed. allocated %d buffers from: %d\n",
1105 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1109 for (i = 0; i < adapter->num_io_queues; i++)
1110 ena_free_rx_bufs(adapter, i);
1113 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1114 struct ena_tx_buffer *tx_info)
1116 struct ena_com_buf *ena_buf;
1120 ena_buf = tx_info->bufs;
1121 cnt = tx_info->num_of_bufs;
1126 if (tx_info->map_linear_data) {
1127 dma_unmap_single(tx_ring->dev,
1128 dma_unmap_addr(ena_buf, paddr),
1129 dma_unmap_len(ena_buf, len),
1135 /* unmap remaining mapped pages */
1136 for (i = 0; i < cnt; i++) {
1137 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1138 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1143 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1144 * @tx_ring: TX ring for which buffers be freed
1146 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1148 bool print_once = true;
1151 for (i = 0; i < tx_ring->ring_size; i++) {
1152 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1158 netdev_notice(tx_ring->netdev,
1159 "free uncompleted tx skb qid %d idx 0x%x\n",
1163 netdev_dbg(tx_ring->netdev,
1164 "free uncompleted tx skb qid %d idx 0x%x\n",
1168 ena_unmap_tx_buff(tx_ring, tx_info);
1170 dev_kfree_skb_any(tx_info->skb);
1172 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1176 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1178 struct ena_ring *tx_ring;
1181 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1182 tx_ring = &adapter->tx_ring[i];
1183 ena_free_tx_bufs(tx_ring);
1187 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1192 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1193 ena_qid = ENA_IO_TXQ_IDX(i);
1194 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1198 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1203 for (i = 0; i < adapter->num_io_queues; i++) {
1204 ena_qid = ENA_IO_RXQ_IDX(i);
1205 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1206 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1210 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1212 ena_destroy_all_tx_queues(adapter);
1213 ena_destroy_all_rx_queues(adapter);
1216 static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1217 struct ena_tx_buffer *tx_info, bool is_xdp)
1220 netif_err(ring->adapter,
1223 "tx_info doesn't have valid %s",
1224 is_xdp ? "xdp frame" : "skb");
1226 netif_err(ring->adapter,
1229 "Invalid req_id: %hu\n",
1232 u64_stats_update_begin(&ring->syncp);
1233 ring->tx_stats.bad_req_id++;
1234 u64_stats_update_end(&ring->syncp);
1236 /* Trigger device reset */
1237 ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
1238 set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
1242 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1244 struct ena_tx_buffer *tx_info = NULL;
1246 if (likely(req_id < tx_ring->ring_size)) {
1247 tx_info = &tx_ring->tx_buffer_info[req_id];
1248 if (likely(tx_info->skb))
1252 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1255 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1257 struct ena_tx_buffer *tx_info = NULL;
1259 if (likely(req_id < xdp_ring->ring_size)) {
1260 tx_info = &xdp_ring->tx_buffer_info[req_id];
1261 if (likely(tx_info->xdpf))
1265 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1268 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1270 struct netdev_queue *txq;
1279 next_to_clean = tx_ring->next_to_clean;
1280 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1282 while (tx_pkts < budget) {
1283 struct ena_tx_buffer *tx_info;
1284 struct sk_buff *skb;
1286 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1291 rc = validate_tx_req_id(tx_ring, req_id);
1295 tx_info = &tx_ring->tx_buffer_info[req_id];
1298 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1299 prefetch(&skb->end);
1301 tx_info->skb = NULL;
1302 tx_info->last_jiffies = 0;
1304 ena_unmap_tx_buff(tx_ring, tx_info);
1306 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1307 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1310 tx_bytes += skb->len;
1313 total_done += tx_info->tx_descs;
1315 tx_ring->free_ids[next_to_clean] = req_id;
1316 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1317 tx_ring->ring_size);
1320 tx_ring->next_to_clean = next_to_clean;
1321 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1322 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1324 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1326 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1327 "tx_poll: q %d done. total pkts: %d\n",
1328 tx_ring->qid, tx_pkts);
1330 /* need to make the rings circular update visible to
1331 * ena_start_xmit() before checking for netif_queue_stopped().
1335 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1336 ENA_TX_WAKEUP_THRESH);
1337 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1338 __netif_tx_lock(txq, smp_processor_id());
1340 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1341 ENA_TX_WAKEUP_THRESH);
1342 if (netif_tx_queue_stopped(txq) && above_thresh &&
1343 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1344 netif_tx_wake_queue(txq);
1345 u64_stats_update_begin(&tx_ring->syncp);
1346 tx_ring->tx_stats.queue_wakeup++;
1347 u64_stats_update_end(&tx_ring->syncp);
1349 __netif_tx_unlock(txq);
1355 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
1357 struct sk_buff *skb;
1360 skb = napi_get_frags(rx_ring->napi);
1362 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1363 rx_ring->rx_copybreak);
1365 if (unlikely(!skb)) {
1366 u64_stats_update_begin(&rx_ring->syncp);
1367 rx_ring->rx_stats.skb_alloc_fail++;
1368 u64_stats_update_end(&rx_ring->syncp);
1369 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1370 "Failed to allocate skb. frags: %d\n", frags);
1377 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1378 struct ena_com_rx_buf_info *ena_bufs,
1382 struct sk_buff *skb;
1383 struct ena_rx_buffer *rx_info;
1384 u16 len, req_id, buf = 0;
1388 len = ena_bufs[buf].len;
1389 req_id = ena_bufs[buf].req_id;
1391 rc = validate_rx_req_id(rx_ring, req_id);
1392 if (unlikely(rc < 0))
1395 rx_info = &rx_ring->rx_buffer_info[req_id];
1397 if (unlikely(!rx_info->page)) {
1398 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
1403 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1404 "rx_info %p page %p\n",
1405 rx_info, rx_info->page);
1407 /* save virt address of first buffer */
1408 va = page_address(rx_info->page) + rx_info->page_offset;
1409 prefetch(va + NET_IP_ALIGN);
1411 if (len <= rx_ring->rx_copybreak) {
1412 skb = ena_alloc_skb(rx_ring, false);
1416 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1417 "rx allocated small packet. len %d. data_len %d\n",
1418 skb->len, skb->data_len);
1420 /* sync this buffer for CPU use */
1421 dma_sync_single_for_cpu(rx_ring->dev,
1422 dma_unmap_addr(&rx_info->ena_buf, paddr),
1425 skb_copy_to_linear_data(skb, va, len);
1426 dma_sync_single_for_device(rx_ring->dev,
1427 dma_unmap_addr(&rx_info->ena_buf, paddr),
1432 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1433 rx_ring->free_ids[*next_to_clean] = req_id;
1434 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1435 rx_ring->ring_size);
1439 skb = ena_alloc_skb(rx_ring, true);
1444 dma_unmap_page(rx_ring->dev,
1445 dma_unmap_addr(&rx_info->ena_buf, paddr),
1446 ENA_PAGE_SIZE, DMA_BIDIRECTIONAL);
1448 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1449 rx_info->page_offset, len, ENA_PAGE_SIZE);
1450 /* The offset is non zero only for the first buffer */
1451 rx_info->page_offset = 0;
1453 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1454 "rx skb updated. len %d. data_len %d\n",
1455 skb->len, skb->data_len);
1457 rx_info->page = NULL;
1459 rx_ring->free_ids[*next_to_clean] = req_id;
1461 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1462 rx_ring->ring_size);
1463 if (likely(--descs == 0))
1467 len = ena_bufs[buf].len;
1468 req_id = ena_bufs[buf].req_id;
1470 rc = validate_rx_req_id(rx_ring, req_id);
1471 if (unlikely(rc < 0))
1474 rx_info = &rx_ring->rx_buffer_info[req_id];
1480 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1481 * @adapter: structure containing adapter specific data
1482 * @ena_rx_ctx: received packet context/metadata
1483 * @skb: skb currently being received and modified
1485 static void ena_rx_checksum(struct ena_ring *rx_ring,
1486 struct ena_com_rx_ctx *ena_rx_ctx,
1487 struct sk_buff *skb)
1489 /* Rx csum disabled */
1490 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1491 skb->ip_summed = CHECKSUM_NONE;
1495 /* For fragmented packets the checksum isn't valid */
1496 if (ena_rx_ctx->frag) {
1497 skb->ip_summed = CHECKSUM_NONE;
1501 /* if IP and error */
1502 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1503 (ena_rx_ctx->l3_csum_err))) {
1504 /* ipv4 checksum error */
1505 skb->ip_summed = CHECKSUM_NONE;
1506 u64_stats_update_begin(&rx_ring->syncp);
1507 rx_ring->rx_stats.bad_csum++;
1508 u64_stats_update_end(&rx_ring->syncp);
1509 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1510 "RX IPv4 header checksum error\n");
1515 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1516 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1517 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1518 /* TCP/UDP checksum error */
1519 u64_stats_update_begin(&rx_ring->syncp);
1520 rx_ring->rx_stats.bad_csum++;
1521 u64_stats_update_end(&rx_ring->syncp);
1522 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1523 "RX L4 checksum error\n");
1524 skb->ip_summed = CHECKSUM_NONE;
1528 if (likely(ena_rx_ctx->l4_csum_checked)) {
1529 skb->ip_summed = CHECKSUM_UNNECESSARY;
1530 u64_stats_update_begin(&rx_ring->syncp);
1531 rx_ring->rx_stats.csum_good++;
1532 u64_stats_update_end(&rx_ring->syncp);
1534 u64_stats_update_begin(&rx_ring->syncp);
1535 rx_ring->rx_stats.csum_unchecked++;
1536 u64_stats_update_end(&rx_ring->syncp);
1537 skb->ip_summed = CHECKSUM_NONE;
1540 skb->ip_summed = CHECKSUM_NONE;
1546 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1547 struct ena_com_rx_ctx *ena_rx_ctx,
1548 struct sk_buff *skb)
1550 enum pkt_hash_types hash_type;
1552 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1553 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1554 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1556 hash_type = PKT_HASH_TYPE_L4;
1558 hash_type = PKT_HASH_TYPE_NONE;
1560 /* Override hash type if the packet is fragmented */
1561 if (ena_rx_ctx->frag)
1562 hash_type = PKT_HASH_TYPE_NONE;
1564 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1568 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1570 struct ena_rx_buffer *rx_info;
1573 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1574 xdp->data = page_address(rx_info->page) +
1575 rx_info->page_offset + rx_ring->rx_headroom;
1576 xdp_set_data_meta_invalid(xdp);
1577 xdp->data_hard_start = page_address(rx_info->page);
1578 xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
1579 /* If for some reason we received a bigger packet than
1580 * we expect, then we simply drop it
1582 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1585 ret = ena_xdp_execute(rx_ring, xdp, rx_info);
1587 /* The xdp program might expand the headers */
1588 if (ret == XDP_PASS) {
1589 rx_info->page_offset = xdp->data - xdp->data_hard_start;
1590 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1595 /* ena_clean_rx_irq - Cleanup RX irq
1596 * @rx_ring: RX ring to clean
1597 * @napi: napi handler
1598 * @budget: how many packets driver is allowed to clean
1600 * Returns the number of cleaned buffers.
1602 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1605 u16 next_to_clean = rx_ring->next_to_clean;
1606 struct ena_com_rx_ctx ena_rx_ctx;
1607 struct ena_rx_buffer *rx_info;
1608 struct ena_adapter *adapter;
1609 u32 res_budget, work_done;
1610 int rx_copybreak_pkt = 0;
1611 int refill_threshold;
1612 struct sk_buff *skb;
1613 int refill_required;
1614 struct xdp_buff xdp;
1620 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1621 "%s qid %d\n", __func__, rx_ring->qid);
1622 res_budget = budget;
1623 xdp.rxq = &rx_ring->xdp_rxq;
1624 xdp.frame_sz = ENA_PAGE_SIZE;
1627 xdp_verdict = XDP_PASS;
1629 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1630 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1631 ena_rx_ctx.descs = 0;
1632 ena_rx_ctx.pkt_offset = 0;
1633 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1634 rx_ring->ena_com_io_sq,
1639 if (unlikely(ena_rx_ctx.descs == 0))
1642 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1643 rx_info->page_offset = ena_rx_ctx.pkt_offset;
1645 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1646 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1647 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1648 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1650 if (ena_xdp_present_ring(rx_ring))
1651 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1653 /* allocate skb and fill it */
1654 if (xdp_verdict == XDP_PASS)
1655 skb = ena_rx_skb(rx_ring,
1660 if (unlikely(!skb)) {
1661 if (xdp_verdict == XDP_TX)
1662 ena_free_rx_page(rx_ring,
1663 &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
1664 for (i = 0; i < ena_rx_ctx.descs; i++) {
1665 rx_ring->free_ids[next_to_clean] =
1666 rx_ring->ena_bufs[i].req_id;
1668 ENA_RX_RING_IDX_NEXT(next_to_clean,
1669 rx_ring->ring_size);
1671 if (xdp_verdict != XDP_PASS) {
1678 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1680 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1682 skb_record_rx_queue(skb, rx_ring->qid);
1684 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1685 total_len += rx_ring->ena_bufs[0].len;
1687 napi_gro_receive(napi, skb);
1689 total_len += skb->len;
1690 napi_gro_frags(napi);
1694 } while (likely(res_budget));
1696 work_done = budget - res_budget;
1697 rx_ring->per_napi_packets += work_done;
1698 u64_stats_update_begin(&rx_ring->syncp);
1699 rx_ring->rx_stats.bytes += total_len;
1700 rx_ring->rx_stats.cnt += work_done;
1701 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1702 u64_stats_update_end(&rx_ring->syncp);
1704 rx_ring->next_to_clean = next_to_clean;
1706 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
1708 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1709 ENA_RX_REFILL_THRESH_PACKET);
1711 /* Optimization, try to batch new rx buffers */
1712 if (refill_required > refill_threshold) {
1713 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1714 ena_refill_rx_bufs(rx_ring, refill_required);
1720 adapter = netdev_priv(rx_ring->netdev);
1722 u64_stats_update_begin(&rx_ring->syncp);
1723 rx_ring->rx_stats.bad_desc_num++;
1724 u64_stats_update_end(&rx_ring->syncp);
1726 /* Too many desc from the device. Trigger reset */
1727 adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1728 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1733 static void ena_dim_work(struct work_struct *w)
1735 struct dim *dim = container_of(w, struct dim, work);
1736 struct dim_cq_moder cur_moder =
1737 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1738 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1740 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1741 dim->state = DIM_START_MEASURE;
1744 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1746 struct dim_sample dim_sample;
1747 struct ena_ring *rx_ring = ena_napi->rx_ring;
1749 if (!rx_ring->per_napi_packets)
1752 rx_ring->non_empty_napi_events++;
1754 dim_update_sample(rx_ring->non_empty_napi_events,
1755 rx_ring->rx_stats.cnt,
1756 rx_ring->rx_stats.bytes,
1759 net_dim(&ena_napi->dim, dim_sample);
1761 rx_ring->per_napi_packets = 0;
1764 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1765 struct ena_ring *rx_ring)
1767 struct ena_eth_io_intr_reg intr_reg;
1768 u32 rx_interval = 0;
1769 /* Rx ring can be NULL when for XDP tx queues which don't have an
1770 * accompanying rx_ring pair.
1773 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1774 rx_ring->smoothed_interval :
1775 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1777 /* Update intr register: rx intr delay,
1778 * tx intr delay and interrupt unmask
1780 ena_com_update_intr_reg(&intr_reg,
1782 tx_ring->smoothed_interval,
1785 u64_stats_update_begin(&tx_ring->syncp);
1786 tx_ring->tx_stats.unmask_interrupt++;
1787 u64_stats_update_end(&tx_ring->syncp);
1788 /* It is a shared MSI-X.
1789 * Tx and Rx CQ have pointer to it.
1790 * So we use one of them to reach the intr reg
1791 * The Tx ring is used because the rx_ring is NULL for XDP queues
1793 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1796 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1797 struct ena_ring *rx_ring)
1799 int cpu = get_cpu();
1802 /* Check only one ring since the 2 rings are running on the same cpu */
1803 if (likely(tx_ring->cpu == cpu))
1806 numa_node = cpu_to_node(cpu);
1809 if (numa_node != NUMA_NO_NODE) {
1810 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1812 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1825 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1834 if (unlikely(!xdp_ring))
1836 next_to_clean = xdp_ring->next_to_clean;
1838 while (tx_pkts < budget) {
1839 struct ena_tx_buffer *tx_info;
1840 struct xdp_frame *xdpf;
1842 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1847 rc = validate_xdp_req_id(xdp_ring, req_id);
1851 tx_info = &xdp_ring->tx_buffer_info[req_id];
1852 xdpf = tx_info->xdpf;
1854 tx_info->xdpf = NULL;
1855 tx_info->last_jiffies = 0;
1856 ena_unmap_tx_buff(xdp_ring, tx_info);
1858 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1859 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1862 tx_bytes += xdpf->len;
1864 total_done += tx_info->tx_descs;
1866 __free_page(tx_info->xdp_rx_page);
1867 xdp_ring->free_ids[next_to_clean] = req_id;
1868 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1869 xdp_ring->ring_size);
1872 xdp_ring->next_to_clean = next_to_clean;
1873 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
1874 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
1876 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1877 "tx_poll: q %d done. total pkts: %d\n",
1878 xdp_ring->qid, tx_pkts);
1883 static int ena_io_poll(struct napi_struct *napi, int budget)
1885 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1886 struct ena_ring *tx_ring, *rx_ring;
1888 int rx_work_done = 0;
1890 int napi_comp_call = 0;
1893 tx_ring = ena_napi->tx_ring;
1894 rx_ring = ena_napi->rx_ring;
1896 tx_ring->first_interrupt = ena_napi->first_interrupt;
1897 rx_ring->first_interrupt = ena_napi->first_interrupt;
1899 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1901 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1902 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1903 napi_complete_done(napi, 0);
1907 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1908 /* On netpoll the budget is zero and the handler should only clean the
1912 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1914 /* If the device is about to reset or down, avoid unmask
1915 * the interrupt and return 0 so NAPI won't reschedule
1917 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1918 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1919 napi_complete_done(napi, 0);
1922 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1925 /* Update numa and unmask the interrupt only when schedule
1926 * from the interrupt context (vs from sk_busy_loop)
1928 if (napi_complete_done(napi, rx_work_done) &&
1929 READ_ONCE(ena_napi->interrupts_masked)) {
1930 smp_rmb(); /* make sure interrupts_masked is read */
1931 WRITE_ONCE(ena_napi->interrupts_masked, false);
1932 /* We apply adaptive moderation on Rx path only.
1933 * Tx uses static interrupt moderation.
1935 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1936 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
1938 ena_unmask_interrupt(tx_ring, rx_ring);
1941 ena_update_ring_numa_node(tx_ring, rx_ring);
1948 u64_stats_update_begin(&tx_ring->syncp);
1949 tx_ring->tx_stats.napi_comp += napi_comp_call;
1950 tx_ring->tx_stats.tx_poll++;
1951 u64_stats_update_end(&tx_ring->syncp);
1956 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1958 struct ena_adapter *adapter = (struct ena_adapter *)data;
1960 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1962 /* Don't call the aenq handler before probe is done */
1963 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1964 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1969 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1970 * @irq: interrupt number
1971 * @data: pointer to a network interface private napi device structure
1973 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1975 struct ena_napi *ena_napi = data;
1977 ena_napi->first_interrupt = true;
1979 WRITE_ONCE(ena_napi->interrupts_masked, true);
1980 smp_wmb(); /* write interrupts_masked before calling napi */
1982 napi_schedule_irqoff(&ena_napi->napi);
1987 /* Reserve a single MSI-X vector for management (admin + aenq).
1988 * plus reserve one vector for each potential io queue.
1989 * the number of potential io queues is the minimum of what the device
1990 * supports and the number of vCPUs.
1992 static int ena_enable_msix(struct ena_adapter *adapter)
1994 int msix_vecs, irq_cnt;
1996 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1997 netif_err(adapter, probe, adapter->netdev,
1998 "Error, MSI-X is already enabled\n");
2002 /* Reserved the max msix vectors we might need */
2003 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
2004 netif_dbg(adapter, probe, adapter->netdev,
2005 "trying to enable MSI-X, vectors %d\n", msix_vecs);
2007 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
2008 msix_vecs, PCI_IRQ_MSIX);
2011 netif_err(adapter, probe, adapter->netdev,
2012 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
2016 if (irq_cnt != msix_vecs) {
2017 netif_notice(adapter, probe, adapter->netdev,
2018 "enable only %d MSI-X (out of %d), reduce the number of queues\n",
2019 irq_cnt, msix_vecs);
2020 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
2023 if (ena_init_rx_cpu_rmap(adapter))
2024 netif_warn(adapter, probe, adapter->netdev,
2025 "Failed to map IRQs to CPUs\n");
2027 adapter->msix_vecs = irq_cnt;
2028 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
2033 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2037 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2038 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2039 pci_name(adapter->pdev));
2040 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2041 ena_intr_msix_mgmnt;
2042 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2043 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
2044 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
2045 cpu = cpumask_first(cpu_online_mask);
2046 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2047 cpumask_set_cpu(cpu,
2048 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2051 static void ena_setup_io_intr(struct ena_adapter *adapter)
2053 struct net_device *netdev;
2054 int irq_idx, i, cpu;
2057 netdev = adapter->netdev;
2058 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2060 for (i = 0; i < io_queue_count; i++) {
2061 irq_idx = ENA_IO_IRQ_IDX(i);
2062 cpu = i % num_online_cpus();
2064 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2065 "%s-Tx-Rx-%d", netdev->name, i);
2066 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2067 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2068 adapter->irq_tbl[irq_idx].vector =
2069 pci_irq_vector(adapter->pdev, irq_idx);
2070 adapter->irq_tbl[irq_idx].cpu = cpu;
2072 cpumask_set_cpu(cpu,
2073 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2077 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2079 unsigned long flags = 0;
2080 struct ena_irq *irq;
2083 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2084 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2087 netif_err(adapter, probe, adapter->netdev,
2088 "failed to request admin irq\n");
2092 netif_dbg(adapter, probe, adapter->netdev,
2093 "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2094 irq->affinity_hint_mask.bits[0], irq->vector);
2096 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2101 static int ena_request_io_irq(struct ena_adapter *adapter)
2103 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2104 unsigned long flags = 0;
2105 struct ena_irq *irq;
2108 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2109 netif_err(adapter, ifup, adapter->netdev,
2110 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2114 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2115 irq = &adapter->irq_tbl[i];
2116 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2119 netif_err(adapter, ifup, adapter->netdev,
2120 "Failed to request I/O IRQ. index %d rc %d\n",
2125 netif_dbg(adapter, ifup, adapter->netdev,
2126 "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2127 i, irq->affinity_hint_mask.bits[0], irq->vector);
2129 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2135 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2136 irq = &adapter->irq_tbl[k];
2137 free_irq(irq->vector, irq->data);
2143 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2145 struct ena_irq *irq;
2147 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2148 synchronize_irq(irq->vector);
2149 irq_set_affinity_hint(irq->vector, NULL);
2150 free_irq(irq->vector, irq->data);
2153 static void ena_free_io_irq(struct ena_adapter *adapter)
2155 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2156 struct ena_irq *irq;
2159 #ifdef CONFIG_RFS_ACCEL
2160 if (adapter->msix_vecs >= 1) {
2161 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2162 adapter->netdev->rx_cpu_rmap = NULL;
2164 #endif /* CONFIG_RFS_ACCEL */
2166 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2167 irq = &adapter->irq_tbl[i];
2168 irq_set_affinity_hint(irq->vector, NULL);
2169 free_irq(irq->vector, irq->data);
2173 static void ena_disable_msix(struct ena_adapter *adapter)
2175 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2176 pci_free_irq_vectors(adapter->pdev);
2179 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2181 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2184 if (!netif_running(adapter->netdev))
2187 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
2188 synchronize_irq(adapter->irq_tbl[i].vector);
2191 static void ena_del_napi_in_range(struct ena_adapter *adapter,
2197 for (i = first_index; i < first_index + count; i++) {
2198 netif_napi_del(&adapter->ena_napi[i].napi);
2200 WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
2201 adapter->ena_napi[i].xdp_ring);
2205 static void ena_init_napi_in_range(struct ena_adapter *adapter,
2206 int first_index, int count)
2210 for (i = first_index; i < first_index + count; i++) {
2211 struct ena_napi *napi = &adapter->ena_napi[i];
2213 netif_napi_add(adapter->netdev,
2215 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
2218 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2219 napi->rx_ring = &adapter->rx_ring[i];
2220 napi->tx_ring = &adapter->tx_ring[i];
2222 napi->xdp_ring = &adapter->tx_ring[i];
2228 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2234 for (i = first_index; i < first_index + count; i++)
2235 napi_disable(&adapter->ena_napi[i].napi);
2238 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2244 for (i = first_index; i < first_index + count; i++)
2245 napi_enable(&adapter->ena_napi[i].napi);
2248 /* Configure the Rx forwarding */
2249 static int ena_rss_configure(struct ena_adapter *adapter)
2251 struct ena_com_dev *ena_dev = adapter->ena_dev;
2254 /* In case the RSS table wasn't initialized by probe */
2255 if (!ena_dev->rss.tbl_log_size) {
2256 rc = ena_rss_init_default(adapter);
2257 if (rc && (rc != -EOPNOTSUPP)) {
2258 netif_err(adapter, ifup, adapter->netdev,
2259 "Failed to init RSS rc: %d\n", rc);
2264 /* Set indirect table */
2265 rc = ena_com_indirect_table_set(ena_dev);
2266 if (unlikely(rc && rc != -EOPNOTSUPP))
2269 /* Configure hash function (if supported) */
2270 rc = ena_com_set_hash_function(ena_dev);
2271 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2274 /* Configure hash inputs (if supported) */
2275 rc = ena_com_set_hash_ctrl(ena_dev);
2276 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2282 static int ena_up_complete(struct ena_adapter *adapter)
2286 rc = ena_rss_configure(adapter);
2290 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2292 ena_refill_all_rx_bufs(adapter);
2294 /* enable transmits */
2295 netif_tx_start_all_queues(adapter->netdev);
2297 ena_napi_enable_in_range(adapter,
2299 adapter->xdp_num_queues + adapter->num_io_queues);
2304 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2306 struct ena_com_create_io_ctx ctx;
2307 struct ena_com_dev *ena_dev;
2308 struct ena_ring *tx_ring;
2313 ena_dev = adapter->ena_dev;
2315 tx_ring = &adapter->tx_ring[qid];
2316 msix_vector = ENA_IO_IRQ_IDX(qid);
2317 ena_qid = ENA_IO_TXQ_IDX(qid);
2319 memset(&ctx, 0x0, sizeof(ctx));
2321 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2323 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2324 ctx.msix_vector = msix_vector;
2325 ctx.queue_size = tx_ring->ring_size;
2326 ctx.numa_node = cpu_to_node(tx_ring->cpu);
2328 rc = ena_com_create_io_queue(ena_dev, &ctx);
2330 netif_err(adapter, ifup, adapter->netdev,
2331 "Failed to create I/O TX queue num %d rc: %d\n",
2336 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2337 &tx_ring->ena_com_io_sq,
2338 &tx_ring->ena_com_io_cq);
2340 netif_err(adapter, ifup, adapter->netdev,
2341 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2343 ena_com_destroy_io_queue(ena_dev, ena_qid);
2347 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2351 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2352 int first_index, int count)
2354 struct ena_com_dev *ena_dev = adapter->ena_dev;
2357 for (i = first_index; i < first_index + count; i++) {
2358 rc = ena_create_io_tx_queue(adapter, i);
2366 while (i-- > first_index)
2367 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2372 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2374 struct ena_com_dev *ena_dev;
2375 struct ena_com_create_io_ctx ctx;
2376 struct ena_ring *rx_ring;
2381 ena_dev = adapter->ena_dev;
2383 rx_ring = &adapter->rx_ring[qid];
2384 msix_vector = ENA_IO_IRQ_IDX(qid);
2385 ena_qid = ENA_IO_RXQ_IDX(qid);
2387 memset(&ctx, 0x0, sizeof(ctx));
2390 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2391 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2392 ctx.msix_vector = msix_vector;
2393 ctx.queue_size = rx_ring->ring_size;
2394 ctx.numa_node = cpu_to_node(rx_ring->cpu);
2396 rc = ena_com_create_io_queue(ena_dev, &ctx);
2398 netif_err(adapter, ifup, adapter->netdev,
2399 "Failed to create I/O RX queue num %d rc: %d\n",
2404 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2405 &rx_ring->ena_com_io_sq,
2406 &rx_ring->ena_com_io_cq);
2408 netif_err(adapter, ifup, adapter->netdev,
2409 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2414 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2418 ena_com_destroy_io_queue(ena_dev, ena_qid);
2422 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2424 struct ena_com_dev *ena_dev = adapter->ena_dev;
2427 for (i = 0; i < adapter->num_io_queues; i++) {
2428 rc = ena_create_io_rx_queue(adapter, i);
2431 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2438 cancel_work_sync(&adapter->ena_napi[i].dim.work);
2439 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2445 static void set_io_rings_size(struct ena_adapter *adapter,
2451 for (i = 0; i < adapter->num_io_queues; i++) {
2452 adapter->tx_ring[i].ring_size = new_tx_size;
2453 adapter->rx_ring[i].ring_size = new_rx_size;
2457 /* This function allows queue allocation to backoff when the system is
2458 * low on memory. If there is not enough memory to allocate io queues
2459 * the driver will try to allocate smaller queues.
2461 * The backoff algorithm is as follows:
2462 * 1. Try to allocate TX and RX and if successful.
2463 * 1.1. return success
2465 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2467 * 3. If TX or RX is smaller than 256
2468 * 3.1. return failure.
2470 * 4.1. go back to 1.
2472 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2474 int rc, cur_rx_ring_size, cur_tx_ring_size;
2475 int new_rx_ring_size, new_tx_ring_size;
2477 /* current queue sizes might be set to smaller than the requested
2478 * ones due to past queue allocation failures.
2480 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2481 adapter->requested_rx_ring_size);
2484 if (ena_xdp_present(adapter)) {
2485 rc = ena_setup_and_create_all_xdp_queues(adapter);
2490 rc = ena_setup_tx_resources_in_range(adapter,
2492 adapter->num_io_queues);
2496 rc = ena_create_io_tx_queues_in_range(adapter,
2498 adapter->num_io_queues);
2500 goto err_create_tx_queues;
2502 rc = ena_setup_all_rx_resources(adapter);
2506 rc = ena_create_all_io_rx_queues(adapter);
2508 goto err_create_rx_queues;
2512 err_create_rx_queues:
2513 ena_free_all_io_rx_resources(adapter);
2515 ena_destroy_all_tx_queues(adapter);
2516 err_create_tx_queues:
2517 ena_free_all_io_tx_resources(adapter);
2519 if (rc != -ENOMEM) {
2520 netif_err(adapter, ifup, adapter->netdev,
2521 "Queue creation failed with error code %d\n",
2526 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2527 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2529 netif_err(adapter, ifup, adapter->netdev,
2530 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2531 cur_tx_ring_size, cur_rx_ring_size);
2533 new_tx_ring_size = cur_tx_ring_size;
2534 new_rx_ring_size = cur_rx_ring_size;
2536 /* Decrease the size of the larger queue, or
2537 * decrease both if they are the same size.
2539 if (cur_rx_ring_size <= cur_tx_ring_size)
2540 new_tx_ring_size = cur_tx_ring_size / 2;
2541 if (cur_rx_ring_size >= cur_tx_ring_size)
2542 new_rx_ring_size = cur_rx_ring_size / 2;
2544 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2545 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2546 netif_err(adapter, ifup, adapter->netdev,
2547 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2552 netif_err(adapter, ifup, adapter->netdev,
2553 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2557 set_io_rings_size(adapter, new_tx_ring_size,
2562 static int ena_up(struct ena_adapter *adapter)
2564 int io_queue_count, rc, i;
2566 netdev_dbg(adapter->netdev, "%s\n", __func__);
2568 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2569 ena_setup_io_intr(adapter);
2571 /* napi poll functions should be initialized before running
2572 * request_irq(), to handle a rare condition where there is a pending
2573 * interrupt, causing the ISR to fire immediately while the poll
2574 * function wasn't set yet, causing a null dereference
2576 ena_init_napi_in_range(adapter, 0, io_queue_count);
2578 rc = ena_request_io_irq(adapter);
2582 rc = create_queues_with_size_backoff(adapter);
2584 goto err_create_queues_with_backoff;
2586 rc = ena_up_complete(adapter);
2590 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2591 netif_carrier_on(adapter->netdev);
2593 u64_stats_update_begin(&adapter->syncp);
2594 adapter->dev_stats.interface_up++;
2595 u64_stats_update_end(&adapter->syncp);
2597 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2599 /* Enable completion queues interrupt */
2600 for (i = 0; i < adapter->num_io_queues; i++)
2601 ena_unmask_interrupt(&adapter->tx_ring[i],
2602 &adapter->rx_ring[i]);
2604 /* schedule napi in case we had pending packets
2605 * from the last time we disable napi
2607 for (i = 0; i < io_queue_count; i++)
2608 napi_schedule(&adapter->ena_napi[i].napi);
2613 ena_destroy_all_tx_queues(adapter);
2614 ena_free_all_io_tx_resources(adapter);
2615 ena_destroy_all_rx_queues(adapter);
2616 ena_free_all_io_rx_resources(adapter);
2617 err_create_queues_with_backoff:
2618 ena_free_io_irq(adapter);
2620 ena_del_napi_in_range(adapter, 0, io_queue_count);
2625 static void ena_down(struct ena_adapter *adapter)
2627 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2629 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2631 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2633 u64_stats_update_begin(&adapter->syncp);
2634 adapter->dev_stats.interface_down++;
2635 u64_stats_update_end(&adapter->syncp);
2637 netif_carrier_off(adapter->netdev);
2638 netif_tx_disable(adapter->netdev);
2640 /* After this point the napi handler won't enable the tx queue */
2641 ena_napi_disable_in_range(adapter, 0, io_queue_count);
2643 /* After destroy the queue there won't be any new interrupts */
2645 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2648 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2650 dev_err(&adapter->pdev->dev, "Device reset failed\n");
2651 /* stop submitting admin commands on a device that was reset */
2652 ena_com_set_admin_running_state(adapter->ena_dev, false);
2655 ena_destroy_all_io_queues(adapter);
2657 ena_disable_io_intr_sync(adapter);
2658 ena_free_io_irq(adapter);
2659 ena_del_napi_in_range(adapter, 0, io_queue_count);
2661 ena_free_all_tx_bufs(adapter);
2662 ena_free_all_rx_bufs(adapter);
2663 ena_free_all_io_tx_resources(adapter);
2664 ena_free_all_io_rx_resources(adapter);
2667 /* ena_open - Called when a network interface is made active
2668 * @netdev: network interface device structure
2670 * Returns 0 on success, negative value on failure
2672 * The open entry point is called when a network interface is made
2673 * active by the system (IFF_UP). At this point all resources needed
2674 * for transmit and receive operations are allocated, the interrupt
2675 * handler is registered with the OS, the watchdog timer is started,
2676 * and the stack is notified that the interface is ready.
2678 static int ena_open(struct net_device *netdev)
2680 struct ena_adapter *adapter = netdev_priv(netdev);
2683 /* Notify the stack of the actual queue counts. */
2684 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2686 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2690 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2692 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2696 rc = ena_up(adapter);
2703 /* ena_close - Disables a network interface
2704 * @netdev: network interface device structure
2706 * Returns 0, this is not allowed to fail
2708 * The close entry point is called when an interface is de-activated
2709 * by the OS. The hardware is still under the drivers control, but
2710 * needs to be disabled. A global MAC reset is issued to stop the
2711 * hardware, and all transmit and receive resources are freed.
2713 static int ena_close(struct net_device *netdev)
2715 struct ena_adapter *adapter = netdev_priv(netdev);
2717 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2719 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2722 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2725 /* Check for device status and issue reset if needed*/
2726 check_for_admin_com_state(adapter);
2727 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2728 netif_err(adapter, ifdown, adapter->netdev,
2729 "Destroy failure, restarting device\n");
2730 ena_dump_stats_to_dmesg(adapter);
2731 /* rtnl lock already obtained in dev_ioctl() layer */
2732 ena_destroy_device(adapter, false);
2733 ena_restore_device(adapter);
2739 int ena_update_queue_sizes(struct ena_adapter *adapter,
2745 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2746 ena_close(adapter->netdev);
2747 adapter->requested_tx_ring_size = new_tx_size;
2748 adapter->requested_rx_ring_size = new_rx_size;
2749 ena_init_io_rings(adapter,
2751 adapter->xdp_num_queues +
2752 adapter->num_io_queues);
2753 return dev_was_up ? ena_up(adapter) : 0;
2756 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2758 struct ena_com_dev *ena_dev = adapter->ena_dev;
2759 int prev_channel_count;
2762 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2763 ena_close(adapter->netdev);
2764 prev_channel_count = adapter->num_io_queues;
2765 adapter->num_io_queues = new_channel_count;
2766 if (ena_xdp_present(adapter) &&
2767 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2768 adapter->xdp_first_ring = new_channel_count;
2769 adapter->xdp_num_queues = new_channel_count;
2770 if (prev_channel_count > new_channel_count)
2771 ena_xdp_exchange_program_rx_in_range(adapter,
2774 prev_channel_count);
2776 ena_xdp_exchange_program_rx_in_range(adapter,
2777 adapter->xdp_bpf_prog,
2782 /* We need to destroy the rss table so that the indirection
2783 * table will be reinitialized by ena_up()
2785 ena_com_rss_destroy(ena_dev);
2786 ena_init_io_rings(adapter,
2788 adapter->xdp_num_queues +
2789 adapter->num_io_queues);
2790 return dev_was_up ? ena_open(adapter->netdev) : 0;
2793 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
2794 struct sk_buff *skb,
2795 bool disable_meta_caching)
2797 u32 mss = skb_shinfo(skb)->gso_size;
2798 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2801 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2802 ena_tx_ctx->l4_csum_enable = 1;
2804 ena_tx_ctx->tso_enable = 1;
2805 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2806 ena_tx_ctx->l4_csum_partial = 0;
2808 ena_tx_ctx->tso_enable = 0;
2809 ena_meta->l4_hdr_len = 0;
2810 ena_tx_ctx->l4_csum_partial = 1;
2813 switch (ip_hdr(skb)->version) {
2815 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2816 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2819 ena_tx_ctx->l3_csum_enable = 1;
2820 l4_protocol = ip_hdr(skb)->protocol;
2823 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2824 l4_protocol = ipv6_hdr(skb)->nexthdr;
2830 if (l4_protocol == IPPROTO_TCP)
2831 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2833 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2835 ena_meta->mss = mss;
2836 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2837 ena_meta->l3_hdr_offset = skb_network_offset(skb);
2838 ena_tx_ctx->meta_valid = 1;
2839 } else if (disable_meta_caching) {
2840 memset(ena_meta, 0, sizeof(*ena_meta));
2841 ena_tx_ctx->meta_valid = 1;
2843 ena_tx_ctx->meta_valid = 0;
2847 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
2848 struct sk_buff *skb)
2850 int num_frags, header_len, rc;
2852 num_frags = skb_shinfo(skb)->nr_frags;
2853 header_len = skb_headlen(skb);
2855 if (num_frags < tx_ring->sgl_size)
2858 if ((num_frags == tx_ring->sgl_size) &&
2859 (header_len < tx_ring->tx_max_header_size))
2862 u64_stats_update_begin(&tx_ring->syncp);
2863 tx_ring->tx_stats.linearize++;
2864 u64_stats_update_end(&tx_ring->syncp);
2866 rc = skb_linearize(skb);
2868 u64_stats_update_begin(&tx_ring->syncp);
2869 tx_ring->tx_stats.linearize_failed++;
2870 u64_stats_update_end(&tx_ring->syncp);
2876 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2877 struct ena_tx_buffer *tx_info,
2878 struct sk_buff *skb,
2882 struct ena_adapter *adapter = tx_ring->adapter;
2883 struct ena_com_buf *ena_buf;
2885 u32 skb_head_len, frag_len, last_frag;
2890 skb_head_len = skb_headlen(skb);
2892 ena_buf = tx_info->bufs;
2894 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2895 /* When the device is LLQ mode, the driver will copy
2896 * the header into the device memory space.
2897 * the ena_com layer assume the header is in a linear
2899 * This assumption might be wrong since part of the header
2900 * can be in the fragmented buffers.
2901 * Use skb_header_pointer to make sure the header is in a
2902 * linear memory space.
2905 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2906 *push_hdr = skb_header_pointer(skb, 0, push_len,
2907 tx_ring->push_buf_intermediate_buf);
2908 *header_len = push_len;
2909 if (unlikely(skb->data != *push_hdr)) {
2910 u64_stats_update_begin(&tx_ring->syncp);
2911 tx_ring->tx_stats.llq_buffer_copy++;
2912 u64_stats_update_end(&tx_ring->syncp);
2914 delta = push_len - skb_head_len;
2918 *header_len = min_t(u32, skb_head_len,
2919 tx_ring->tx_max_header_size);
2922 netif_dbg(adapter, tx_queued, adapter->netdev,
2923 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2924 *push_hdr, push_len);
2926 if (skb_head_len > push_len) {
2927 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2928 skb_head_len - push_len, DMA_TO_DEVICE);
2929 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2930 goto error_report_dma_error;
2932 ena_buf->paddr = dma;
2933 ena_buf->len = skb_head_len - push_len;
2936 tx_info->num_of_bufs++;
2937 tx_info->map_linear_data = 1;
2939 tx_info->map_linear_data = 0;
2942 last_frag = skb_shinfo(skb)->nr_frags;
2944 for (i = 0; i < last_frag; i++) {
2945 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2947 frag_len = skb_frag_size(frag);
2949 if (unlikely(delta >= frag_len)) {
2954 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2955 frag_len - delta, DMA_TO_DEVICE);
2956 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2957 goto error_report_dma_error;
2959 ena_buf->paddr = dma;
2960 ena_buf->len = frag_len - delta;
2962 tx_info->num_of_bufs++;
2968 error_report_dma_error:
2969 u64_stats_update_begin(&tx_ring->syncp);
2970 tx_ring->tx_stats.dma_mapping_err++;
2971 u64_stats_update_end(&tx_ring->syncp);
2972 netdev_warn(adapter->netdev, "failed to map skb\n");
2974 tx_info->skb = NULL;
2976 tx_info->num_of_bufs += i;
2977 ena_unmap_tx_buff(tx_ring, tx_info);
2982 /* Called with netif_tx_lock. */
2983 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2985 struct ena_adapter *adapter = netdev_priv(dev);
2986 struct ena_tx_buffer *tx_info;
2987 struct ena_com_tx_ctx ena_tx_ctx;
2988 struct ena_ring *tx_ring;
2989 struct netdev_queue *txq;
2991 u16 next_to_use, req_id, header_len;
2994 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2995 /* Determine which tx ring we will be placed on */
2996 qid = skb_get_queue_mapping(skb);
2997 tx_ring = &adapter->tx_ring[qid];
2998 txq = netdev_get_tx_queue(dev, qid);
3000 rc = ena_check_and_linearize_skb(tx_ring, skb);
3002 goto error_drop_packet;
3004 skb_tx_timestamp(skb);
3006 next_to_use = tx_ring->next_to_use;
3007 req_id = tx_ring->free_ids[next_to_use];
3008 tx_info = &tx_ring->tx_buffer_info[req_id];
3009 tx_info->num_of_bufs = 0;
3011 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
3013 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
3015 goto error_drop_packet;
3017 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
3018 ena_tx_ctx.ena_bufs = tx_info->bufs;
3019 ena_tx_ctx.push_header = push_hdr;
3020 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
3021 ena_tx_ctx.req_id = req_id;
3022 ena_tx_ctx.header_len = header_len;
3024 /* set flags and meta data */
3025 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
3027 rc = ena_xmit_common(dev,
3034 goto error_unmap_dma;
3036 netdev_tx_sent_queue(txq, skb->len);
3038 /* stop the queue when no more space available, the packet can have up
3039 * to sgl_size + 2. one for the meta descriptor and one for header
3040 * (if the header is larger than tx_max_header_size).
3042 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3043 tx_ring->sgl_size + 2))) {
3044 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3047 netif_tx_stop_queue(txq);
3048 u64_stats_update_begin(&tx_ring->syncp);
3049 tx_ring->tx_stats.queue_stop++;
3050 u64_stats_update_end(&tx_ring->syncp);
3052 /* There is a rare condition where this function decide to
3053 * stop the queue but meanwhile clean_tx_irq updates
3054 * next_to_completion and terminates.
3055 * The queue will remain stopped forever.
3056 * To solve this issue add a mb() to make sure that
3057 * netif_tx_stop_queue() write is vissible before checking if
3058 * there is additional space in the queue.
3062 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3063 ENA_TX_WAKEUP_THRESH)) {
3064 netif_tx_wake_queue(txq);
3065 u64_stats_update_begin(&tx_ring->syncp);
3066 tx_ring->tx_stats.queue_wakeup++;
3067 u64_stats_update_end(&tx_ring->syncp);
3071 if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
3072 /* trigger the dma engine. ena_com_write_sq_doorbell()
3075 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
3076 u64_stats_update_begin(&tx_ring->syncp);
3077 tx_ring->tx_stats.doorbells++;
3078 u64_stats_update_end(&tx_ring->syncp);
3081 return NETDEV_TX_OK;
3084 ena_unmap_tx_buff(tx_ring, tx_info);
3085 tx_info->skb = NULL;
3089 return NETDEV_TX_OK;
3092 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
3093 struct net_device *sb_dev)
3096 /* we suspect that this is good for in--kernel network services that
3097 * want to loop incoming skb rx to tx in normal user generated traffic,
3098 * most probably we will not get to this
3100 if (skb_rx_queue_recorded(skb))
3101 qid = skb_get_rx_queue(skb);
3103 qid = netdev_pick_tx(dev, skb, NULL);
3108 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3110 struct ena_admin_host_info *host_info;
3113 /* Allocate only the host info */
3114 rc = ena_com_allocate_host_info(ena_dev);
3116 pr_err("Cannot allocate host info\n");
3120 host_info = ena_dev->host_attr.host_info;
3122 host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
3123 host_info->os_type = ENA_ADMIN_OS_LINUX;
3124 host_info->kernel_ver = LINUX_VERSION_CODE;
3125 strlcpy(host_info->kernel_ver_str, utsname()->version,
3126 sizeof(host_info->kernel_ver_str) - 1);
3127 host_info->os_dist = 0;
3128 strncpy(host_info->os_dist_str, utsname()->release,
3129 sizeof(host_info->os_dist_str) - 1);
3130 host_info->driver_version =
3131 (DRV_MODULE_GEN_MAJOR) |
3132 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3133 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3134 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
3135 host_info->num_cpus = num_online_cpus();
3137 host_info->driver_supported_features =
3138 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
3139 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
3140 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
3141 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
3143 rc = ena_com_set_host_attributes(ena_dev);
3145 if (rc == -EOPNOTSUPP)
3146 pr_warn("Cannot set host attributes\n");
3148 pr_err("Cannot set host attributes\n");
3156 ena_com_delete_host_info(ena_dev);
3159 static void ena_config_debug_area(struct ena_adapter *adapter)
3161 u32 debug_area_size;
3164 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3165 if (ss_count <= 0) {
3166 netif_err(adapter, drv, adapter->netdev,
3167 "SS count is negative\n");
3171 /* allocate 32 bytes for each string and 64bit for the value */
3172 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3174 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3176 pr_err("Cannot allocate debug area\n");
3180 rc = ena_com_set_host_attributes(adapter->ena_dev);
3182 if (rc == -EOPNOTSUPP)
3183 netif_warn(adapter, drv, adapter->netdev,
3184 "Cannot set host attributes\n");
3186 netif_err(adapter, drv, adapter->netdev,
3187 "Cannot set host attributes\n");
3193 ena_com_delete_debug_area(adapter->ena_dev);
3196 int ena_update_hw_stats(struct ena_adapter *adapter)
3200 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
3202 dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
3209 static void ena_get_stats64(struct net_device *netdev,
3210 struct rtnl_link_stats64 *stats)
3212 struct ena_adapter *adapter = netdev_priv(netdev);
3213 struct ena_ring *rx_ring, *tx_ring;
3219 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3222 for (i = 0; i < adapter->num_io_queues; i++) {
3225 tx_ring = &adapter->tx_ring[i];
3228 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
3229 packets = tx_ring->tx_stats.cnt;
3230 bytes = tx_ring->tx_stats.bytes;
3231 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
3233 stats->tx_packets += packets;
3234 stats->tx_bytes += bytes;
3236 rx_ring = &adapter->rx_ring[i];
3239 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
3240 packets = rx_ring->rx_stats.cnt;
3241 bytes = rx_ring->rx_stats.bytes;
3242 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
3244 stats->rx_packets += packets;
3245 stats->rx_bytes += bytes;
3249 start = u64_stats_fetch_begin_irq(&adapter->syncp);
3250 rx_drops = adapter->dev_stats.rx_drops;
3251 tx_drops = adapter->dev_stats.tx_drops;
3252 } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
3254 stats->rx_dropped = rx_drops;
3255 stats->tx_dropped = tx_drops;
3257 stats->multicast = 0;
3258 stats->collisions = 0;
3260 stats->rx_length_errors = 0;
3261 stats->rx_crc_errors = 0;
3262 stats->rx_frame_errors = 0;
3263 stats->rx_fifo_errors = 0;
3264 stats->rx_missed_errors = 0;
3265 stats->tx_window_errors = 0;
3267 stats->rx_errors = 0;
3268 stats->tx_errors = 0;
3271 static const struct net_device_ops ena_netdev_ops = {
3272 .ndo_open = ena_open,
3273 .ndo_stop = ena_close,
3274 .ndo_start_xmit = ena_start_xmit,
3275 .ndo_select_queue = ena_select_queue,
3276 .ndo_get_stats64 = ena_get_stats64,
3277 .ndo_tx_timeout = ena_tx_timeout,
3278 .ndo_change_mtu = ena_change_mtu,
3279 .ndo_set_mac_address = NULL,
3280 .ndo_validate_addr = eth_validate_addr,
3284 static int ena_device_validate_params(struct ena_adapter *adapter,
3285 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3287 struct net_device *netdev = adapter->netdev;
3290 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3293 netif_err(adapter, drv, netdev,
3294 "Error, mac address are different\n");
3298 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3299 netif_err(adapter, drv, netdev,
3300 "Error, device max mtu is smaller than netdev MTU\n");
3307 static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3309 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3310 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3311 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3312 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3313 llq_config->llq_ring_entry_size_value = 128;
3316 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3317 struct ena_com_dev *ena_dev,
3318 struct ena_admin_feature_llq_desc *llq,
3319 struct ena_llq_configurations *llq_default_configurations)
3322 u32 llq_feature_mask;
3324 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3325 if (!(ena_dev->supported_features & llq_feature_mask)) {
3327 "LLQ is not supported Fallback to host mode policy.\n");
3328 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3332 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3335 "Failed to configure the device mode. Fallback to host mode policy.\n");
3336 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3342 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3345 bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3348 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3350 "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3351 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3357 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3358 pci_resource_start(pdev, ENA_MEM_BAR),
3359 pci_resource_len(pdev, ENA_MEM_BAR));
3361 if (!ena_dev->mem_bar)
3367 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
3368 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3371 struct ena_llq_configurations llq_config;
3372 struct device *dev = &pdev->dev;
3373 bool readless_supported;
3378 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3380 dev_err(dev, "failed to init mmio read less\n");
3384 /* The PCIe configuration space revision id indicate if mmio reg
3387 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3388 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3390 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3392 dev_err(dev, "Can not reset device\n");
3393 goto err_mmio_read_less;
3396 rc = ena_com_validate_version(ena_dev);
3398 dev_err(dev, "device version is too low\n");
3399 goto err_mmio_read_less;
3402 dma_width = ena_com_get_dma_width(ena_dev);
3403 if (dma_width < 0) {
3404 dev_err(dev, "Invalid dma width value %d", dma_width);
3406 goto err_mmio_read_less;
3409 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3411 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
3412 goto err_mmio_read_less;
3415 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
3417 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
3419 goto err_mmio_read_less;
3422 /* ENA admin level init */
3423 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3426 "Can not initialize ena admin queue with device\n");
3427 goto err_mmio_read_less;
3430 /* To enable the msix interrupts the driver needs to know the number
3431 * of queues. So the driver uses polling mode to retrieve this
3434 ena_com_set_admin_polling_mode(ena_dev, true);
3436 ena_config_host_info(ena_dev, pdev);
3438 /* Get Device Attributes*/
3439 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3441 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3442 goto err_admin_init;
3445 /* Try to turn all the available aenq groups */
3446 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3447 BIT(ENA_ADMIN_FATAL_ERROR) |
3448 BIT(ENA_ADMIN_WARNING) |
3449 BIT(ENA_ADMIN_NOTIFICATION) |
3450 BIT(ENA_ADMIN_KEEP_ALIVE);
3452 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3454 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3456 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3457 goto err_admin_init;
3460 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3462 set_default_llq_configurations(&llq_config);
3464 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3467 dev_err(&pdev->dev, "ena device init failed\n");
3468 goto err_admin_init;
3474 ena_com_delete_host_info(ena_dev);
3475 ena_com_admin_destroy(ena_dev);
3477 ena_com_mmio_reg_read_request_destroy(ena_dev);
3482 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3484 struct ena_com_dev *ena_dev = adapter->ena_dev;
3485 struct device *dev = &adapter->pdev->dev;
3488 rc = ena_enable_msix(adapter);
3490 dev_err(dev, "Can not reserve msix vectors\n");
3494 ena_setup_mgmnt_intr(adapter);
3496 rc = ena_request_mgmnt_irq(adapter);
3498 dev_err(dev, "Can not setup management interrupts\n");
3499 goto err_disable_msix;
3502 ena_com_set_admin_polling_mode(ena_dev, false);
3504 ena_com_admin_aenq_enable(ena_dev);
3509 ena_disable_msix(adapter);
3514 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3516 struct net_device *netdev = adapter->netdev;
3517 struct ena_com_dev *ena_dev = adapter->ena_dev;
3520 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3523 netif_carrier_off(netdev);
3525 del_timer_sync(&adapter->timer_service);
3527 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3528 adapter->dev_up_before_reset = dev_up;
3530 ena_com_set_admin_running_state(ena_dev, false);
3532 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3535 /* Stop the device from sending AENQ events (in case reset flag is set
3536 * and device is up, ena_down() already reset the device.
3538 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3539 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3541 ena_free_mgmnt_irq(adapter);
3543 ena_disable_msix(adapter);
3545 ena_com_abort_admin_commands(ena_dev);
3547 ena_com_wait_for_abort_completion(ena_dev);
3549 ena_com_admin_destroy(ena_dev);
3551 ena_com_mmio_reg_read_request_destroy(ena_dev);
3553 /* return reset reason to default value */
3554 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3556 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3557 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3560 static int ena_restore_device(struct ena_adapter *adapter)
3562 struct ena_com_dev_get_features_ctx get_feat_ctx;
3563 struct ena_com_dev *ena_dev = adapter->ena_dev;
3564 struct pci_dev *pdev = adapter->pdev;
3568 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3569 rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
3571 dev_err(&pdev->dev, "Can not initialize device\n");
3574 adapter->wd_state = wd_state;
3576 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3578 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3579 goto err_device_destroy;
3582 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3584 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3585 goto err_device_destroy;
3587 /* If the interface was up before the reset bring it up */
3588 if (adapter->dev_up_before_reset) {
3589 rc = ena_up(adapter);
3591 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3592 goto err_disable_msix;
3596 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3598 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3599 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3600 netif_carrier_on(adapter->netdev);
3602 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3603 dev_err(&pdev->dev, "Device reset completed successfully\n");
3604 adapter->last_keep_alive_jiffies = jiffies;
3608 ena_free_mgmnt_irq(adapter);
3609 ena_disable_msix(adapter);
3611 ena_com_abort_admin_commands(ena_dev);
3612 ena_com_wait_for_abort_completion(ena_dev);
3613 ena_com_admin_destroy(ena_dev);
3614 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3615 ena_com_mmio_reg_read_request_destroy(ena_dev);
3617 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3618 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3620 "Reset attempt failed. Can not reset the device\n");
3625 static void ena_fw_reset_device(struct work_struct *work)
3627 struct ena_adapter *adapter =
3628 container_of(work, struct ena_adapter, reset_task);
3632 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3633 ena_destroy_device(adapter, false);
3634 ena_restore_device(adapter);
3640 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3641 struct ena_ring *rx_ring)
3643 if (likely(rx_ring->first_interrupt))
3646 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3649 rx_ring->no_interrupt_event_cnt++;
3651 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3652 netif_err(adapter, rx_err, adapter->netdev,
3653 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3655 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3656 smp_mb__before_atomic();
3657 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3664 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3665 struct ena_ring *tx_ring)
3667 struct ena_tx_buffer *tx_buf;
3668 unsigned long last_jiffies;
3672 for (i = 0; i < tx_ring->ring_size; i++) {
3673 tx_buf = &tx_ring->tx_buffer_info[i];
3674 last_jiffies = tx_buf->last_jiffies;
3676 if (last_jiffies == 0)
3677 /* no pending Tx at this location */
3680 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
3681 2 * adapter->missing_tx_completion_to))) {
3682 /* If after graceful period interrupt is still not
3683 * received, we schedule a reset
3685 netif_err(adapter, tx_err, adapter->netdev,
3686 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3688 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
3689 smp_mb__before_atomic();
3690 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3694 if (unlikely(time_is_before_jiffies(last_jiffies +
3695 adapter->missing_tx_completion_to))) {
3696 if (!tx_buf->print_once)
3697 netif_notice(adapter, tx_err, adapter->netdev,
3698 "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
3701 tx_buf->print_once = 1;
3706 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3707 netif_err(adapter, tx_err, adapter->netdev,
3708 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3710 adapter->missing_tx_completion_threshold);
3711 adapter->reset_reason =
3712 ENA_REGS_RESET_MISS_TX_CMPL;
3713 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3717 u64_stats_update_begin(&tx_ring->syncp);
3718 tx_ring->tx_stats.missed_tx += missed_tx;
3719 u64_stats_update_end(&tx_ring->syncp);
3724 static void check_for_missing_completions(struct ena_adapter *adapter)
3726 struct ena_ring *tx_ring;
3727 struct ena_ring *rx_ring;
3731 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
3732 /* Make sure the driver doesn't turn the device in other process */
3735 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3738 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3741 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
3744 budget = ENA_MONITORED_TX_QUEUES;
3746 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
3747 tx_ring = &adapter->tx_ring[i];
3748 rx_ring = &adapter->rx_ring[i];
3750 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
3754 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
3755 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
3764 adapter->last_monitored_tx_qid = i % io_queue_count;
3767 /* trigger napi schedule after 2 consecutive detections */
3768 #define EMPTY_RX_REFILL 2
3769 /* For the rare case where the device runs out of Rx descriptors and the
3770 * napi handler failed to refill new Rx descriptors (due to a lack of memory
3772 * This case will lead to a deadlock:
3773 * The device won't send interrupts since all the new Rx packets will be dropped
3774 * The napi handler won't allocate new Rx descriptors so the device will be
3775 * able to send new packets.
3777 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
3778 * It is recommended to have at least 512MB, with a minimum of 128MB for
3779 * constrained environment).
3781 * When such a situation is detected - Reschedule napi
3783 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
3785 struct ena_ring *rx_ring;
3786 int i, refill_required;
3788 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3791 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
3794 for (i = 0; i < adapter->num_io_queues; i++) {
3795 rx_ring = &adapter->rx_ring[i];
3797 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
3798 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
3799 rx_ring->empty_rx_queue++;
3801 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
3802 u64_stats_update_begin(&rx_ring->syncp);
3803 rx_ring->rx_stats.empty_rx_ring++;
3804 u64_stats_update_end(&rx_ring->syncp);
3806 netif_err(adapter, drv, adapter->netdev,
3807 "trigger refill for ring %d\n", i);
3809 napi_schedule(rx_ring->napi);
3810 rx_ring->empty_rx_queue = 0;
3813 rx_ring->empty_rx_queue = 0;
3818 /* Check for keep alive expiration */
3819 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
3821 unsigned long keep_alive_expired;
3823 if (!adapter->wd_state)
3826 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3829 keep_alive_expired = adapter->last_keep_alive_jiffies +
3830 adapter->keep_alive_timeout;
3831 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
3832 netif_err(adapter, drv, adapter->netdev,
3833 "Keep alive watchdog timeout.\n");
3834 u64_stats_update_begin(&adapter->syncp);
3835 adapter->dev_stats.wd_expired++;
3836 u64_stats_update_end(&adapter->syncp);
3837 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
3838 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3842 static void check_for_admin_com_state(struct ena_adapter *adapter)
3844 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
3845 netif_err(adapter, drv, adapter->netdev,
3846 "ENA admin queue is not in running state!\n");
3847 u64_stats_update_begin(&adapter->syncp);
3848 adapter->dev_stats.admin_q_pause++;
3849 u64_stats_update_end(&adapter->syncp);
3850 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
3851 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3855 static void ena_update_hints(struct ena_adapter *adapter,
3856 struct ena_admin_ena_hw_hints *hints)
3858 struct net_device *netdev = adapter->netdev;
3860 if (hints->admin_completion_tx_timeout)
3861 adapter->ena_dev->admin_queue.completion_timeout =
3862 hints->admin_completion_tx_timeout * 1000;
3864 if (hints->mmio_read_timeout)
3865 /* convert to usec */
3866 adapter->ena_dev->mmio_read.reg_read_to =
3867 hints->mmio_read_timeout * 1000;
3869 if (hints->missed_tx_completion_count_threshold_to_reset)
3870 adapter->missing_tx_completion_threshold =
3871 hints->missed_tx_completion_count_threshold_to_reset;
3873 if (hints->missing_tx_completion_timeout) {
3874 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3875 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
3877 adapter->missing_tx_completion_to =
3878 msecs_to_jiffies(hints->missing_tx_completion_timeout);
3881 if (hints->netdev_wd_timeout)
3882 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
3884 if (hints->driver_watchdog_timeout) {
3885 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
3886 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
3888 adapter->keep_alive_timeout =
3889 msecs_to_jiffies(hints->driver_watchdog_timeout);
3893 static void ena_update_host_info(struct ena_admin_host_info *host_info,
3894 struct net_device *netdev)
3896 host_info->supported_network_features[0] =
3897 netdev->features & GENMASK_ULL(31, 0);
3898 host_info->supported_network_features[1] =
3899 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
3902 static void ena_timer_service(struct timer_list *t)
3904 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
3905 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
3906 struct ena_admin_host_info *host_info =
3907 adapter->ena_dev->host_attr.host_info;
3909 check_for_missing_keep_alive(adapter);
3911 check_for_admin_com_state(adapter);
3913 check_for_missing_completions(adapter);
3915 check_for_empty_rx_ring(adapter);
3918 ena_dump_stats_to_buf(adapter, debug_area);
3921 ena_update_host_info(host_info, adapter->netdev);
3923 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3924 netif_err(adapter, drv, adapter->netdev,
3925 "Trigger reset is on\n");
3926 ena_dump_stats_to_dmesg(adapter);
3927 queue_work(ena_wq, &adapter->reset_task);
3931 /* Reset the timer */
3932 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3935 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
3936 struct ena_com_dev *ena_dev,
3937 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3939 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
3941 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3942 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3943 &get_feat_ctx->max_queue_ext.max_queue_ext;
3944 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
3945 max_queue_ext->max_rx_cq_num);
3947 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
3948 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
3950 struct ena_admin_queue_feature_desc *max_queues =
3951 &get_feat_ctx->max_queues;
3952 io_tx_sq_num = max_queues->max_sq_num;
3953 io_tx_cq_num = max_queues->max_cq_num;
3954 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
3957 /* In case of LLQ use the llq fields for the tx SQ/CQ */
3958 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3959 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
3961 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3962 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
3963 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
3964 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
3965 /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3966 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
3967 if (unlikely(!max_num_io_queues)) {
3968 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3972 return max_num_io_queues;
3975 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3976 struct net_device *netdev)
3978 netdev_features_t dev_features = 0;
3980 /* Set offload features */
3981 if (feat->offload.tx &
3982 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3983 dev_features |= NETIF_F_IP_CSUM;
3985 if (feat->offload.tx &
3986 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3987 dev_features |= NETIF_F_IPV6_CSUM;
3989 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3990 dev_features |= NETIF_F_TSO;
3992 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3993 dev_features |= NETIF_F_TSO6;
3995 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3996 dev_features |= NETIF_F_TSO_ECN;
3998 if (feat->offload.rx_supported &
3999 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
4000 dev_features |= NETIF_F_RXCSUM;
4002 if (feat->offload.rx_supported &
4003 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
4004 dev_features |= NETIF_F_RXCSUM;
4012 netdev->hw_features |= netdev->features;
4013 netdev->vlan_features |= netdev->features;
4016 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
4017 struct ena_com_dev_get_features_ctx *feat)
4019 struct net_device *netdev = adapter->netdev;
4021 /* Copy mac address */
4022 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
4023 eth_hw_addr_random(netdev);
4024 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
4026 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
4027 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4030 /* Set offload features */
4031 ena_set_dev_offloads(feat, netdev);
4033 adapter->max_mtu = feat->dev_attr.max_mtu;
4034 netdev->max_mtu = adapter->max_mtu;
4035 netdev->min_mtu = ENA_MIN_MTU;
4038 static int ena_rss_init_default(struct ena_adapter *adapter)
4040 struct ena_com_dev *ena_dev = adapter->ena_dev;
4041 struct device *dev = &adapter->pdev->dev;
4045 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
4047 dev_err(dev, "Cannot init indirect table\n");
4051 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
4052 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
4053 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
4054 ENA_IO_RXQ_IDX(val));
4055 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4056 dev_err(dev, "Cannot fill indirect table\n");
4057 goto err_fill_indir;
4061 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
4062 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
4063 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4064 dev_err(dev, "Cannot fill hash function\n");
4065 goto err_fill_indir;
4068 rc = ena_com_set_default_hash_ctrl(ena_dev);
4069 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4070 dev_err(dev, "Cannot fill hash control\n");
4071 goto err_fill_indir;
4077 ena_com_rss_destroy(ena_dev);
4083 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4085 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4087 pci_release_selected_regions(pdev, release_bars);
4091 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
4093 struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
4094 struct ena_com_dev *ena_dev = ctx->ena_dev;
4095 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
4096 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
4097 u32 max_tx_queue_size;
4098 u32 max_rx_queue_size;
4100 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
4101 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4102 &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
4103 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
4104 max_queue_ext->max_rx_sq_depth);
4105 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
4107 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4108 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4109 llq->max_llq_depth);
4111 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4112 max_queue_ext->max_tx_sq_depth);
4114 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4115 max_queue_ext->max_per_packet_tx_descs);
4116 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4117 max_queue_ext->max_per_packet_rx_descs);
4119 struct ena_admin_queue_feature_desc *max_queues =
4120 &ctx->get_feat_ctx->max_queues;
4121 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
4122 max_queues->max_sq_depth);
4123 max_tx_queue_size = max_queues->max_cq_depth;
4125 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4126 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4127 llq->max_llq_depth);
4129 max_tx_queue_size = min_t(u32, max_tx_queue_size,
4130 max_queues->max_sq_depth);
4132 ctx->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4133 max_queues->max_packet_tx_descs);
4134 ctx->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
4135 max_queues->max_packet_rx_descs);
4138 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
4139 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
4141 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
4143 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
4146 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
4147 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
4149 ctx->max_tx_queue_size = max_tx_queue_size;
4150 ctx->max_rx_queue_size = max_rx_queue_size;
4151 ctx->tx_queue_size = tx_queue_size;
4152 ctx->rx_queue_size = rx_queue_size;
4157 /* ena_probe - Device Initialization Routine
4158 * @pdev: PCI device information struct
4159 * @ent: entry in ena_pci_tbl
4161 * Returns 0 on success, negative on failure
4163 * ena_probe initializes an adapter identified by a pci_dev structure.
4164 * The OS initialization, configuring of the adapter private structure,
4165 * and a hardware reset occur.
4167 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4169 struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
4170 struct ena_com_dev_get_features_ctx get_feat_ctx;
4171 struct ena_com_dev *ena_dev = NULL;
4172 struct ena_adapter *adapter;
4173 struct net_device *netdev;
4174 static int adapters_found;
4175 u32 max_num_io_queues;
4176 char *queue_type_str;
4180 dev_dbg(&pdev->dev, "%s\n", __func__);
4182 rc = pci_enable_device_mem(pdev);
4184 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4188 pci_set_master(pdev);
4190 ena_dev = vzalloc(sizeof(*ena_dev));
4193 goto err_disable_device;
4196 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4197 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4199 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4201 goto err_free_ena_dev;
4204 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4205 pci_resource_start(pdev, ENA_REG_BAR),
4206 pci_resource_len(pdev, ENA_REG_BAR));
4207 if (!ena_dev->reg_bar) {
4208 dev_err(&pdev->dev, "failed to remap regs bar\n");
4210 goto err_free_region;
4213 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
4215 ena_dev->dmadev = &pdev->dev;
4217 rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
4219 dev_err(&pdev->dev, "ena device init failed\n");
4222 goto err_free_region;
4225 rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
4227 dev_err(&pdev->dev, "ena llq bar mapping failed\n");
4228 goto err_free_ena_dev;
4231 calc_queue_ctx.ena_dev = ena_dev;
4232 calc_queue_ctx.get_feat_ctx = &get_feat_ctx;
4233 calc_queue_ctx.pdev = pdev;
4235 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4236 * Updated during device initialization with the real granularity
4238 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
4239 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
4240 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
4241 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4242 rc = ena_calc_io_queue_size(&calc_queue_ctx);
4243 if (rc || !max_num_io_queues) {
4245 goto err_device_destroy;
4248 /* dev zeroed in init_etherdev */
4249 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), max_num_io_queues);
4251 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4253 goto err_device_destroy;
4256 SET_NETDEV_DEV(netdev, &pdev->dev);
4258 adapter = netdev_priv(netdev);
4259 pci_set_drvdata(pdev, adapter);
4261 adapter->ena_dev = ena_dev;
4262 adapter->netdev = netdev;
4263 adapter->pdev = pdev;
4265 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4267 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4268 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
4270 adapter->requested_tx_ring_size = calc_queue_ctx.tx_queue_size;
4271 adapter->requested_rx_ring_size = calc_queue_ctx.rx_queue_size;
4272 adapter->max_tx_ring_size = calc_queue_ctx.max_tx_queue_size;
4273 adapter->max_rx_ring_size = calc_queue_ctx.max_rx_queue_size;
4274 adapter->max_tx_sgl_size = calc_queue_ctx.max_tx_sgl_size;
4275 adapter->max_rx_sgl_size = calc_queue_ctx.max_rx_sgl_size;
4277 adapter->num_io_queues = max_num_io_queues;
4278 adapter->max_num_io_queues = max_num_io_queues;
4279 adapter->last_monitored_tx_qid = 0;
4281 adapter->xdp_first_ring = 0;
4282 adapter->xdp_num_queues = 0;
4284 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4285 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4286 adapter->disable_meta_caching =
4287 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
4288 BIT(ENA_ADMIN_DISABLE_META_CACHING));
4290 adapter->wd_state = wd_state;
4292 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4294 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4297 "Failed to query interrupt moderation feature\n");
4298 goto err_netdev_destroy;
4300 ena_init_io_rings(adapter,
4302 adapter->xdp_num_queues +
4303 adapter->num_io_queues);
4305 netdev->netdev_ops = &ena_netdev_ops;
4306 netdev->watchdog_timeo = TX_TIMEOUT;
4307 ena_set_ethtool_ops(netdev);
4309 netdev->priv_flags |= IFF_UNICAST_FLT;
4311 u64_stats_init(&adapter->syncp);
4313 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
4316 "Failed to enable and set the admin interrupts\n");
4317 goto err_worker_destroy;
4319 rc = ena_rss_init_default(adapter);
4320 if (rc && (rc != -EOPNOTSUPP)) {
4321 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4325 ena_config_debug_area(adapter);
4327 if (!ena_update_hw_stats(adapter))
4328 adapter->eni_stats_supported = true;
4330 adapter->eni_stats_supported = false;
4332 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4334 netif_carrier_off(netdev);
4336 rc = register_netdev(netdev);
4338 dev_err(&pdev->dev, "Cannot register net device\n");
4342 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4344 adapter->last_keep_alive_jiffies = jiffies;
4345 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4346 adapter->missing_tx_completion_to = TX_TIMEOUT;
4347 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4349 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4351 timer_setup(&adapter->timer_service, ena_timer_service, 0);
4352 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4354 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
4355 queue_type_str = "Regular";
4357 queue_type_str = "Low Latency";
4359 dev_info(&pdev->dev,
4360 "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
4361 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4362 netdev->dev_addr, queue_type_str);
4364 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4371 ena_com_delete_debug_area(ena_dev);
4372 ena_com_rss_destroy(ena_dev);
4374 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4375 /* stop submitting admin commands on a device that was reset */
4376 ena_com_set_admin_running_state(ena_dev, false);
4377 ena_free_mgmnt_irq(adapter);
4378 ena_disable_msix(adapter);
4380 del_timer(&adapter->timer_service);
4382 free_netdev(netdev);
4384 ena_com_delete_host_info(ena_dev);
4385 ena_com_admin_destroy(ena_dev);
4387 ena_release_bars(ena_dev, pdev);
4391 pci_disable_device(pdev);
4395 /*****************************************************************************/
4397 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4398 * @pdev: PCI device information struct
4399 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4401 * __ena_shutoff is a helper routine that does the real work on shutdown and
4402 * removal paths; the difference between those paths is with regards to whether
4403 * dettach or unregister the netdevice.
4405 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4407 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4408 struct ena_com_dev *ena_dev;
4409 struct net_device *netdev;
4411 ena_dev = adapter->ena_dev;
4412 netdev = adapter->netdev;
4414 #ifdef CONFIG_RFS_ACCEL
4415 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4416 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4417 netdev->rx_cpu_rmap = NULL;
4419 #endif /* CONFIG_RFS_ACCEL */
4421 /* Make sure timer and reset routine won't be called after
4422 * freeing device resources.
4424 del_timer_sync(&adapter->timer_service);
4425 cancel_work_sync(&adapter->reset_task);
4427 rtnl_lock(); /* lock released inside the below if-else block */
4428 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
4429 ena_destroy_device(adapter, true);
4431 netif_device_detach(netdev);
4436 unregister_netdev(netdev);
4437 free_netdev(netdev);
4440 ena_com_rss_destroy(ena_dev);
4442 ena_com_delete_debug_area(ena_dev);
4444 ena_com_delete_host_info(ena_dev);
4446 ena_release_bars(ena_dev, pdev);
4448 pci_disable_device(pdev);
4453 /* ena_remove - Device Removal Routine
4454 * @pdev: PCI device information struct
4456 * ena_remove is called by the PCI subsystem to alert the driver
4457 * that it should release a PCI device.
4460 static void ena_remove(struct pci_dev *pdev)
4462 __ena_shutoff(pdev, false);
4465 /* ena_shutdown - Device Shutdown Routine
4466 * @pdev: PCI device information struct
4468 * ena_shutdown is called by the PCI subsystem to alert the driver that
4469 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4472 static void ena_shutdown(struct pci_dev *pdev)
4474 __ena_shutoff(pdev, true);
4477 /* ena_suspend - PM suspend callback
4478 * @dev_d: Device information struct
4480 static int __maybe_unused ena_suspend(struct device *dev_d)
4482 struct pci_dev *pdev = to_pci_dev(dev_d);
4483 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4485 u64_stats_update_begin(&adapter->syncp);
4486 adapter->dev_stats.suspend++;
4487 u64_stats_update_end(&adapter->syncp);
4490 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4492 "ignoring device reset request as the device is being suspended\n");
4493 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4495 ena_destroy_device(adapter, true);
4500 /* ena_resume - PM resume callback
4501 * @dev_d: Device information struct
4503 static int __maybe_unused ena_resume(struct device *dev_d)
4505 struct ena_adapter *adapter = dev_get_drvdata(dev_d);
4508 u64_stats_update_begin(&adapter->syncp);
4509 adapter->dev_stats.resume++;
4510 u64_stats_update_end(&adapter->syncp);
4513 rc = ena_restore_device(adapter);
4518 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
4520 static struct pci_driver ena_pci_driver = {
4521 .name = DRV_MODULE_NAME,
4522 .id_table = ena_pci_tbl,
4524 .remove = ena_remove,
4525 .shutdown = ena_shutdown,
4526 .driver.pm = &ena_pm_ops,
4527 .sriov_configure = pci_sriov_configure_simple,
4530 static int __init ena_init(void)
4532 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4534 pr_err("Failed to create workqueue\n");
4538 return pci_register_driver(&ena_pci_driver);
4541 static void __exit ena_cleanup(void)
4543 pci_unregister_driver(&ena_pci_driver);
4546 destroy_workqueue(ena_wq);
4551 /******************************************************************************
4552 ******************************** AENQ Handlers *******************************
4553 *****************************************************************************/
4554 /* ena_update_on_link_change:
4555 * Notify the network interface about the change in link status
4557 static void ena_update_on_link_change(void *adapter_data,
4558 struct ena_admin_aenq_entry *aenq_e)
4560 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4561 struct ena_admin_aenq_link_change_desc *aenq_desc =
4562 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4563 int status = aenq_desc->flags &
4564 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4567 netdev_dbg(adapter->netdev, "%s\n", __func__);
4568 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4569 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4570 netif_carrier_on(adapter->netdev);
4572 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4573 netif_carrier_off(adapter->netdev);
4577 static void ena_keep_alive_wd(void *adapter_data,
4578 struct ena_admin_aenq_entry *aenq_e)
4580 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4581 struct ena_admin_aenq_keep_alive_desc *desc;
4585 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4586 adapter->last_keep_alive_jiffies = jiffies;
4588 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4589 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
4591 u64_stats_update_begin(&adapter->syncp);
4592 /* These stats are accumulated by the device, so the counters indicate
4593 * all drops since last reset.
4595 adapter->dev_stats.rx_drops = rx_drops;
4596 adapter->dev_stats.tx_drops = tx_drops;
4597 u64_stats_update_end(&adapter->syncp);
4600 static void ena_notification(void *adapter_data,
4601 struct ena_admin_aenq_entry *aenq_e)
4603 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4604 struct ena_admin_ena_hw_hints *hints;
4606 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4607 "Invalid group(%x) expected %x\n",
4608 aenq_e->aenq_common_desc.group,
4609 ENA_ADMIN_NOTIFICATION);
4611 switch (aenq_e->aenq_common_desc.syndrom) {
4612 case ENA_ADMIN_UPDATE_HINTS:
4613 hints = (struct ena_admin_ena_hw_hints *)
4614 (&aenq_e->inline_data_w4);
4615 ena_update_hints(adapter, hints);
4618 netif_err(adapter, drv, adapter->netdev,
4619 "Invalid aenq notification link state %d\n",
4620 aenq_e->aenq_common_desc.syndrom);
4624 /* This handler will called for unknown event group or unimplemented handlers*/
4625 static void unimplemented_aenq_handler(void *data,
4626 struct ena_admin_aenq_entry *aenq_e)
4628 struct ena_adapter *adapter = (struct ena_adapter *)data;
4630 netif_err(adapter, drv, adapter->netdev,
4631 "Unknown event was received or event with unimplemented handler\n");
4634 static struct ena_aenq_handlers aenq_handlers = {
4636 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4637 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4638 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4640 .unimplemented_handler = unimplemented_aenq_handler
4643 module_init(ena_init);
4644 module_exit(ena_cleanup);