1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #ifdef CONFIG_RFS_ACCEL
9 #include <linux/cpu_rmap.h>
10 #endif /* CONFIG_RFS_ACCEL */
11 #include <linux/ethtool.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/numa.h>
15 #include <linux/pci.h>
16 #include <linux/utsname.h>
17 #include <linux/version.h>
18 #include <linux/vmalloc.h>
21 #include "ena_netdev.h"
22 #include <linux/bpf_trace.h>
23 #include "ena_pci_id_tbl.h"
25 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
26 MODULE_DESCRIPTION(DEVICE_NAME);
27 MODULE_LICENSE("GPL");
29 /* Time in jiffies before concluding the transmitter is hung. */
30 #define TX_TIMEOUT (5 * HZ)
32 #define ENA_MAX_RINGS min_t(unsigned int, ENA_MAX_NUM_IO_QUEUES, num_possible_cpus())
34 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
35 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
37 static struct ena_aenq_handlers aenq_handlers;
39 static struct workqueue_struct *ena_wq;
41 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
43 static int ena_rss_init_default(struct ena_adapter *adapter);
44 static void check_for_admin_com_state(struct ena_adapter *adapter);
45 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
46 static int ena_restore_device(struct ena_adapter *adapter);
48 static void ena_init_io_rings(struct ena_adapter *adapter,
49 int first_index, int count);
50 static void ena_init_napi_in_range(struct ena_adapter *adapter, int first_index,
52 static void ena_del_napi_in_range(struct ena_adapter *adapter, int first_index,
54 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid);
55 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
58 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid);
59 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid);
60 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget);
61 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter);
62 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
63 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
64 int first_index, int count);
65 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
66 int first_index, int count);
67 static int ena_up(struct ena_adapter *adapter);
68 static void ena_down(struct ena_adapter *adapter);
69 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
70 struct ena_ring *rx_ring);
71 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
72 struct ena_ring *rx_ring);
73 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
74 struct ena_tx_buffer *tx_info);
75 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
76 int first_index, int count);
77 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
78 int first_index, int count);
80 /* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
81 static void ena_increase_stat(u64 *statp, u64 cnt,
82 struct u64_stats_sync *syncp)
84 u64_stats_update_begin(syncp);
86 u64_stats_update_end(syncp);
89 static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
91 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
92 ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
95 static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
97 struct ena_adapter *adapter = netdev_priv(dev);
99 /* Change the state of the device to trigger reset
100 * Check that we are not in the middle or a trigger already
103 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
106 ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
107 ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
109 netif_err(adapter, tx_err, dev, "Transmit time out\n");
112 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
116 for (i = 0; i < adapter->num_io_queues; i++)
117 adapter->rx_ring[i].mtu = mtu;
120 static int ena_change_mtu(struct net_device *dev, int new_mtu)
122 struct ena_adapter *adapter = netdev_priv(dev);
125 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
127 netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
128 update_rx_ring_mtu(adapter, new_mtu);
131 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
138 static int ena_xmit_common(struct net_device *dev,
139 struct ena_ring *ring,
140 struct ena_tx_buffer *tx_info,
141 struct ena_com_tx_ctx *ena_tx_ctx,
145 struct ena_adapter *adapter = netdev_priv(dev);
148 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq,
150 netif_dbg(adapter, tx_queued, dev,
151 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
153 ena_ring_tx_doorbell(ring);
156 /* prepare the packet's descriptors to dma engine */
157 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx,
160 /* In case there isn't enough space in the queue for the packet,
161 * we simply drop it. All other failure reasons of
162 * ena_com_prepare_tx() are fatal and therefore require a device reset.
165 netif_err(adapter, tx_queued, dev,
166 "Failed to prepare tx bufs\n");
167 ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
170 ena_reset_device(adapter,
171 ENA_REGS_RESET_DRIVER_INVALID_STATE);
175 u64_stats_update_begin(&ring->syncp);
176 ring->tx_stats.cnt++;
177 ring->tx_stats.bytes += bytes;
178 u64_stats_update_end(&ring->syncp);
180 tx_info->tx_descs = nb_hw_desc;
181 tx_info->last_jiffies = jiffies;
182 tx_info->print_once = 0;
184 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
189 /* This is the XDP napi callback. XDP queues use a separate napi callback
192 static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
194 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
195 u32 xdp_work_done, xdp_budget;
196 struct ena_ring *xdp_ring;
197 int napi_comp_call = 0;
200 xdp_ring = ena_napi->xdp_ring;
204 if (!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags) ||
205 test_bit(ENA_FLAG_TRIGGER_RESET, &xdp_ring->adapter->flags)) {
206 napi_complete_done(napi, 0);
210 xdp_work_done = ena_clean_xdp_irq(xdp_ring, xdp_budget);
212 /* If the device is about to reset or down, avoid unmask
213 * the interrupt and return 0 so NAPI won't reschedule
215 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &xdp_ring->adapter->flags))) {
216 napi_complete_done(napi, 0);
218 } else if (xdp_budget > xdp_work_done) {
220 if (napi_complete_done(napi, xdp_work_done))
221 ena_unmask_interrupt(xdp_ring, NULL);
222 ena_update_ring_numa_node(xdp_ring, NULL);
228 u64_stats_update_begin(&xdp_ring->syncp);
229 xdp_ring->tx_stats.napi_comp += napi_comp_call;
230 xdp_ring->tx_stats.tx_poll++;
231 u64_stats_update_end(&xdp_ring->syncp);
232 xdp_ring->tx_stats.last_napi_jiffies = jiffies;
237 static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
238 struct ena_tx_buffer *tx_info,
239 struct xdp_frame *xdpf,
240 struct ena_com_tx_ctx *ena_tx_ctx)
242 struct ena_adapter *adapter = xdp_ring->adapter;
243 struct ena_com_buf *ena_buf;
249 tx_info->xdpf = xdpf;
250 data = tx_info->xdpf->data;
251 size = tx_info->xdpf->len;
253 if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
254 /* Designate part of the packet for LLQ */
255 push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
257 ena_tx_ctx->push_header = data;
263 ena_tx_ctx->header_len = push_len;
266 dma = dma_map_single(xdp_ring->dev,
270 if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
271 goto error_report_dma_error;
273 tx_info->map_linear_data = 0;
275 ena_buf = tx_info->bufs;
276 ena_buf->paddr = dma;
279 ena_tx_ctx->ena_bufs = ena_buf;
280 ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
285 error_report_dma_error:
286 ena_increase_stat(&xdp_ring->tx_stats.dma_mapping_err, 1,
288 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
293 static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
294 struct net_device *dev,
295 struct xdp_frame *xdpf,
298 struct ena_com_tx_ctx ena_tx_ctx = {};
299 struct ena_tx_buffer *tx_info;
300 u16 next_to_use, req_id;
303 next_to_use = xdp_ring->next_to_use;
304 req_id = xdp_ring->free_ids[next_to_use];
305 tx_info = &xdp_ring->tx_buffer_info[req_id];
306 tx_info->num_of_bufs = 0;
308 rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
312 ena_tx_ctx.req_id = req_id;
314 rc = ena_xmit_common(dev,
321 goto error_unmap_dma;
323 /* trigger the dma engine. ena_ring_tx_doorbell()
324 * calls a memory barrier inside it.
326 if (flags & XDP_XMIT_FLUSH)
327 ena_ring_tx_doorbell(xdp_ring);
332 ena_unmap_tx_buff(xdp_ring, tx_info);
333 tx_info->xdpf = NULL;
337 static int ena_xdp_xmit(struct net_device *dev, int n,
338 struct xdp_frame **frames, u32 flags)
340 struct ena_adapter *adapter = netdev_priv(dev);
341 struct ena_ring *xdp_ring;
342 int qid, i, nxmit = 0;
344 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
347 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
350 /* We assume that all rings have the same XDP program */
351 if (!READ_ONCE(adapter->rx_ring->xdp_bpf_prog))
354 qid = smp_processor_id() % adapter->xdp_num_queues;
355 qid += adapter->xdp_first_ring;
356 xdp_ring = &adapter->tx_ring[qid];
358 /* Other CPU ids might try to send thorugh this queue */
359 spin_lock(&xdp_ring->xdp_tx_lock);
361 for (i = 0; i < n; i++) {
362 if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
367 /* Ring doorbell to make device aware of the packets */
368 if (flags & XDP_XMIT_FLUSH)
369 ena_ring_tx_doorbell(xdp_ring);
371 spin_unlock(&xdp_ring->xdp_tx_lock);
373 /* Return number of packets sent */
377 static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
379 u32 verdict = ENA_XDP_PASS;
380 struct bpf_prog *xdp_prog;
381 struct ena_ring *xdp_ring;
382 struct xdp_frame *xdpf;
385 xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
390 verdict = bpf_prog_run_xdp(xdp_prog, xdp);
394 xdpf = xdp_convert_buff_to_frame(xdp);
395 if (unlikely(!xdpf)) {
396 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
397 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
398 verdict = ENA_XDP_DROP;
402 /* Find xmit queue */
403 xdp_ring = rx_ring->xdp_ring;
405 /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
406 spin_lock(&xdp_ring->xdp_tx_lock);
408 if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
410 xdp_return_frame(xdpf);
412 spin_unlock(&xdp_ring->xdp_tx_lock);
413 xdp_stat = &rx_ring->rx_stats.xdp_tx;
414 verdict = ENA_XDP_TX;
417 if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
418 xdp_stat = &rx_ring->rx_stats.xdp_redirect;
419 verdict = ENA_XDP_REDIRECT;
422 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
423 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
424 verdict = ENA_XDP_DROP;
427 trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
428 xdp_stat = &rx_ring->rx_stats.xdp_aborted;
429 verdict = ENA_XDP_DROP;
432 xdp_stat = &rx_ring->rx_stats.xdp_drop;
433 verdict = ENA_XDP_DROP;
436 xdp_stat = &rx_ring->rx_stats.xdp_pass;
437 verdict = ENA_XDP_PASS;
440 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
441 xdp_stat = &rx_ring->rx_stats.xdp_invalid;
442 verdict = ENA_XDP_DROP;
445 ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
450 static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
452 adapter->xdp_first_ring = adapter->num_io_queues;
453 adapter->xdp_num_queues = adapter->num_io_queues;
455 ena_init_io_rings(adapter,
456 adapter->xdp_first_ring,
457 adapter->xdp_num_queues);
460 static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
462 u32 xdp_first_ring = adapter->xdp_first_ring;
463 u32 xdp_num_queues = adapter->xdp_num_queues;
466 rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
470 rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
477 ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
482 /* Provides a way for both kernel and bpf-prog to know
483 * more about the RX-queue a given XDP frame arrived on.
485 static int ena_xdp_register_rxq_info(struct ena_ring *rx_ring)
489 rc = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, rx_ring->qid, 0);
492 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
493 "Failed to register xdp rx queue info. RX queue num %d rc: %d\n",
498 rc = xdp_rxq_info_reg_mem_model(&rx_ring->xdp_rxq, MEM_TYPE_PAGE_SHARED,
502 netif_err(rx_ring->adapter, ifup, rx_ring->netdev,
503 "Failed to register xdp rx queue info memory model. RX queue num %d rc: %d\n",
505 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
512 static void ena_xdp_unregister_rxq_info(struct ena_ring *rx_ring)
514 xdp_rxq_info_unreg_mem_model(&rx_ring->xdp_rxq);
515 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
518 static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
519 struct bpf_prog *prog,
520 int first, int count)
522 struct bpf_prog *old_bpf_prog;
523 struct ena_ring *rx_ring;
526 for (i = first; i < count; i++) {
527 rx_ring = &adapter->rx_ring[i];
528 old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
530 if (!old_bpf_prog && prog) {
531 ena_xdp_register_rxq_info(rx_ring);
532 rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
533 } else if (old_bpf_prog && !prog) {
534 ena_xdp_unregister_rxq_info(rx_ring);
535 rx_ring->rx_headroom = NET_SKB_PAD;
540 static void ena_xdp_exchange_program(struct ena_adapter *adapter,
541 struct bpf_prog *prog)
543 struct bpf_prog *old_bpf_prog = xchg(&adapter->xdp_bpf_prog, prog);
545 ena_xdp_exchange_program_rx_in_range(adapter,
548 adapter->num_io_queues);
551 bpf_prog_put(old_bpf_prog);
554 static int ena_destroy_and_free_all_xdp_queues(struct ena_adapter *adapter)
559 was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
564 adapter->xdp_first_ring = 0;
565 adapter->xdp_num_queues = 0;
566 ena_xdp_exchange_program(adapter, NULL);
568 rc = ena_up(adapter);
575 static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
577 struct ena_adapter *adapter = netdev_priv(netdev);
578 struct bpf_prog *prog = bpf->prog;
579 struct bpf_prog *old_bpf_prog;
583 is_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
584 rc = ena_xdp_allowed(adapter);
585 if (rc == ENA_XDP_ALLOWED) {
586 old_bpf_prog = adapter->xdp_bpf_prog;
589 ena_init_all_xdp_queues(adapter);
590 } else if (!old_bpf_prog) {
592 ena_init_all_xdp_queues(adapter);
594 ena_xdp_exchange_program(adapter, prog);
596 if (is_up && !old_bpf_prog) {
597 rc = ena_up(adapter);
601 xdp_features_set_redirect_target(netdev, false);
602 } else if (old_bpf_prog) {
603 xdp_features_clear_redirect_target(netdev);
604 rc = ena_destroy_and_free_all_xdp_queues(adapter);
609 prev_mtu = netdev->max_mtu;
610 netdev->max_mtu = prog ? ENA_XDP_MAX_MTU : adapter->max_mtu;
613 netif_info(adapter, drv, adapter->netdev,
614 "XDP program is set, changing the max_mtu from %d to %d",
615 prev_mtu, netdev->max_mtu);
617 } else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
618 netif_err(adapter, drv, adapter->netdev,
619 "Failed to set xdp program, the current MTU (%d) is larger than the maximum allowed MTU (%lu) while xdp is on",
620 netdev->mtu, ENA_XDP_MAX_MTU);
621 NL_SET_ERR_MSG_MOD(bpf->extack,
622 "Failed to set xdp program, the current MTU is larger than the maximum allowed MTU. Check the dmesg for more info");
624 } else if (rc == ENA_XDP_NO_ENOUGH_QUEUES) {
625 netif_err(adapter, drv, adapter->netdev,
626 "Failed to set xdp program, the Rx/Tx channel count should be at most half of the maximum allowed channel count. The current queue count (%d), the maximal queue count (%d)\n",
627 adapter->num_io_queues, adapter->max_num_io_queues);
628 NL_SET_ERR_MSG_MOD(bpf->extack,
629 "Failed to set xdp program, there is no enough space for allocating XDP queues, Check the dmesg for more info");
636 /* This is the main xdp callback, it's used by the kernel to set/unset the xdp
637 * program as well as to query the current xdp program id.
639 static int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
641 switch (bpf->command) {
643 return ena_xdp_set(netdev, bpf);
650 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
652 #ifdef CONFIG_RFS_ACCEL
656 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues);
657 if (!adapter->netdev->rx_cpu_rmap)
659 for (i = 0; i < adapter->num_io_queues; i++) {
660 int irq_idx = ENA_IO_IRQ_IDX(i);
662 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
663 pci_irq_vector(adapter->pdev, irq_idx));
665 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
666 adapter->netdev->rx_cpu_rmap = NULL;
670 #endif /* CONFIG_RFS_ACCEL */
674 static void ena_init_io_rings_common(struct ena_adapter *adapter,
675 struct ena_ring *ring, u16 qid)
678 ring->pdev = adapter->pdev;
679 ring->dev = &adapter->pdev->dev;
680 ring->netdev = adapter->netdev;
681 ring->napi = &adapter->ena_napi[qid].napi;
682 ring->adapter = adapter;
683 ring->ena_dev = adapter->ena_dev;
684 ring->per_napi_packets = 0;
687 ring->no_interrupt_event_cnt = 0;
688 u64_stats_init(&ring->syncp);
691 static void ena_init_io_rings(struct ena_adapter *adapter,
692 int first_index, int count)
694 struct ena_com_dev *ena_dev;
695 struct ena_ring *txr, *rxr;
698 ena_dev = adapter->ena_dev;
700 for (i = first_index; i < first_index + count; i++) {
701 txr = &adapter->tx_ring[i];
702 rxr = &adapter->rx_ring[i];
704 /* TX common ring state */
705 ena_init_io_rings_common(adapter, txr, i);
707 /* TX specific ring state */
708 txr->ring_size = adapter->requested_tx_ring_size;
709 txr->tx_max_header_size = ena_dev->tx_max_header_size;
710 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
711 txr->sgl_size = adapter->max_tx_sgl_size;
712 txr->smoothed_interval =
713 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
714 txr->disable_meta_caching = adapter->disable_meta_caching;
715 spin_lock_init(&txr->xdp_tx_lock);
717 /* Don't init RX queues for xdp queues */
718 if (!ENA_IS_XDP_INDEX(adapter, i)) {
719 /* RX common ring state */
720 ena_init_io_rings_common(adapter, rxr, i);
722 /* RX specific ring state */
723 rxr->ring_size = adapter->requested_rx_ring_size;
724 rxr->rx_copybreak = adapter->rx_copybreak;
725 rxr->sgl_size = adapter->max_rx_sgl_size;
726 rxr->smoothed_interval =
727 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
728 rxr->empty_rx_queue = 0;
729 rxr->rx_headroom = NET_SKB_PAD;
730 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
731 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues];
736 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
737 * @adapter: network interface device structure
740 * Return 0 on success, negative on failure
742 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
744 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
745 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
748 if (tx_ring->tx_buffer_info) {
749 netif_err(adapter, ifup,
750 adapter->netdev, "tx_buffer_info info is not NULL");
754 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
755 node = cpu_to_node(ena_irq->cpu);
757 tx_ring->tx_buffer_info = vzalloc_node(size, node);
758 if (!tx_ring->tx_buffer_info) {
759 tx_ring->tx_buffer_info = vzalloc(size);
760 if (!tx_ring->tx_buffer_info)
761 goto err_tx_buffer_info;
764 size = sizeof(u16) * tx_ring->ring_size;
765 tx_ring->free_ids = vzalloc_node(size, node);
766 if (!tx_ring->free_ids) {
767 tx_ring->free_ids = vzalloc(size);
768 if (!tx_ring->free_ids)
769 goto err_tx_free_ids;
772 size = tx_ring->tx_max_header_size;
773 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
774 if (!tx_ring->push_buf_intermediate_buf) {
775 tx_ring->push_buf_intermediate_buf = vzalloc(size);
776 if (!tx_ring->push_buf_intermediate_buf)
777 goto err_push_buf_intermediate_buf;
780 /* Req id ring for TX out of order completions */
781 for (i = 0; i < tx_ring->ring_size; i++)
782 tx_ring->free_ids[i] = i;
784 /* Reset tx statistics */
785 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
787 tx_ring->next_to_use = 0;
788 tx_ring->next_to_clean = 0;
789 tx_ring->cpu = ena_irq->cpu;
790 tx_ring->numa_node = node;
793 err_push_buf_intermediate_buf:
794 vfree(tx_ring->free_ids);
795 tx_ring->free_ids = NULL;
797 vfree(tx_ring->tx_buffer_info);
798 tx_ring->tx_buffer_info = NULL;
803 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
804 * @adapter: network interface device structure
807 * Free all transmit software resources
809 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
811 struct ena_ring *tx_ring = &adapter->tx_ring[qid];
813 vfree(tx_ring->tx_buffer_info);
814 tx_ring->tx_buffer_info = NULL;
816 vfree(tx_ring->free_ids);
817 tx_ring->free_ids = NULL;
819 vfree(tx_ring->push_buf_intermediate_buf);
820 tx_ring->push_buf_intermediate_buf = NULL;
823 static int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
829 for (i = first_index; i < first_index + count; i++) {
830 rc = ena_setup_tx_resources(adapter, i);
839 netif_err(adapter, ifup, adapter->netdev,
840 "Tx queue %d: allocation failed\n", i);
842 /* rewind the index freeing the rings as we go */
843 while (first_index < i--)
844 ena_free_tx_resources(adapter, i);
848 static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
849 int first_index, int count)
853 for (i = first_index; i < first_index + count; i++)
854 ena_free_tx_resources(adapter, i);
857 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
858 * @adapter: board private structure
860 * Free all transmit software resources
862 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
864 ena_free_all_io_tx_resources_in_range(adapter,
866 adapter->xdp_num_queues +
867 adapter->num_io_queues);
870 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
871 * @adapter: network interface device structure
874 * Returns 0 on success, negative on failure
876 static int ena_setup_rx_resources(struct ena_adapter *adapter,
879 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
880 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
883 if (rx_ring->rx_buffer_info) {
884 netif_err(adapter, ifup, adapter->netdev,
885 "rx_buffer_info is not NULL");
889 /* alloc extra element so in rx path
890 * we can always prefetch rx_info + 1
892 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
893 node = cpu_to_node(ena_irq->cpu);
895 rx_ring->rx_buffer_info = vzalloc_node(size, node);
896 if (!rx_ring->rx_buffer_info) {
897 rx_ring->rx_buffer_info = vzalloc(size);
898 if (!rx_ring->rx_buffer_info)
902 size = sizeof(u16) * rx_ring->ring_size;
903 rx_ring->free_ids = vzalloc_node(size, node);
904 if (!rx_ring->free_ids) {
905 rx_ring->free_ids = vzalloc(size);
906 if (!rx_ring->free_ids) {
907 vfree(rx_ring->rx_buffer_info);
908 rx_ring->rx_buffer_info = NULL;
913 /* Req id ring for receiving RX pkts out of order */
914 for (i = 0; i < rx_ring->ring_size; i++)
915 rx_ring->free_ids[i] = i;
917 /* Reset rx statistics */
918 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
920 rx_ring->next_to_clean = 0;
921 rx_ring->next_to_use = 0;
922 rx_ring->cpu = ena_irq->cpu;
923 rx_ring->numa_node = node;
928 /* ena_free_rx_resources - Free I/O Rx Resources
929 * @adapter: network interface device structure
932 * Free all receive software resources
934 static void ena_free_rx_resources(struct ena_adapter *adapter,
937 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
939 vfree(rx_ring->rx_buffer_info);
940 rx_ring->rx_buffer_info = NULL;
942 vfree(rx_ring->free_ids);
943 rx_ring->free_ids = NULL;
946 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
947 * @adapter: board private structure
949 * Return 0 on success, negative on failure
951 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
955 for (i = 0; i < adapter->num_io_queues; i++) {
956 rc = ena_setup_rx_resources(adapter, i);
965 netif_err(adapter, ifup, adapter->netdev,
966 "Rx queue %d: allocation failed\n", i);
968 /* rewind the index freeing the rings as we go */
970 ena_free_rx_resources(adapter, i);
974 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
975 * @adapter: board private structure
977 * Free all receive software resources
979 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
983 for (i = 0; i < adapter->num_io_queues; i++)
984 ena_free_rx_resources(adapter, i);
987 static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
992 /* This would allocate the page on the same NUMA node the executing code
995 page = dev_alloc_page();
997 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
999 return ERR_PTR(-ENOSPC);
1002 /* To enable NIC-side port-mirroring, AKA SPAN port,
1003 * we make the buffer readable from the nic as well
1005 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
1007 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
1008 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
1011 return ERR_PTR(-EIO);
1017 static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
1018 struct ena_rx_buffer *rx_info)
1020 int headroom = rx_ring->rx_headroom;
1021 struct ena_com_buf *ena_buf;
1026 /* restore page offset value in case it has been changed by device */
1027 rx_info->buf_offset = headroom;
1029 /* if previous allocated page is not used */
1030 if (unlikely(rx_info->page))
1033 /* We handle DMA here */
1034 page = ena_alloc_map_page(rx_ring, &dma);
1035 if (unlikely(IS_ERR(page)))
1036 return PTR_ERR(page);
1038 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1039 "Allocate page %p, rx_info %p\n", page, rx_info);
1041 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1043 rx_info->page = page;
1044 rx_info->dma_addr = dma;
1045 rx_info->page_offset = 0;
1046 ena_buf = &rx_info->ena_buf;
1047 ena_buf->paddr = dma + headroom;
1048 ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
1053 static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
1054 struct ena_rx_buffer *rx_info,
1055 unsigned long attrs)
1057 dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
1058 DMA_BIDIRECTIONAL, attrs);
1061 static void ena_free_rx_page(struct ena_ring *rx_ring,
1062 struct ena_rx_buffer *rx_info)
1064 struct page *page = rx_info->page;
1066 if (unlikely(!page)) {
1067 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1068 "Trying to free unallocated buffer\n");
1072 ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0);
1075 rx_info->page = NULL;
1078 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
1080 u16 next_to_use, req_id;
1084 next_to_use = rx_ring->next_to_use;
1086 for (i = 0; i < num; i++) {
1087 struct ena_rx_buffer *rx_info;
1089 req_id = rx_ring->free_ids[next_to_use];
1091 rx_info = &rx_ring->rx_buffer_info[req_id];
1093 rc = ena_alloc_rx_buffer(rx_ring, rx_info);
1094 if (unlikely(rc < 0)) {
1095 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1096 "Failed to allocate buffer for rx queue %d\n",
1100 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
1104 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1105 "Failed to add buffer for rx queue %d\n",
1109 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
1110 rx_ring->ring_size);
1113 if (unlikely(i < num)) {
1114 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1,
1116 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
1117 "Refilled rx qid %d with only %d buffers (from %d)\n",
1118 rx_ring->qid, i, num);
1121 /* ena_com_write_sq_doorbell issues a wmb() */
1123 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
1125 rx_ring->next_to_use = next_to_use;
1130 static void ena_free_rx_bufs(struct ena_adapter *adapter,
1133 struct ena_ring *rx_ring = &adapter->rx_ring[qid];
1136 for (i = 0; i < rx_ring->ring_size; i++) {
1137 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
1140 ena_free_rx_page(rx_ring, rx_info);
1144 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
1145 * @adapter: board private structure
1147 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
1149 struct ena_ring *rx_ring;
1150 int i, rc, bufs_num;
1152 for (i = 0; i < adapter->num_io_queues; i++) {
1153 rx_ring = &adapter->rx_ring[i];
1154 bufs_num = rx_ring->ring_size - 1;
1155 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
1157 if (unlikely(rc != bufs_num))
1158 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
1159 "Refilling Queue %d failed. allocated %d buffers from: %d\n",
1164 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
1168 for (i = 0; i < adapter->num_io_queues; i++)
1169 ena_free_rx_bufs(adapter, i);
1172 static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
1173 struct ena_tx_buffer *tx_info)
1175 struct ena_com_buf *ena_buf;
1179 ena_buf = tx_info->bufs;
1180 cnt = tx_info->num_of_bufs;
1185 if (tx_info->map_linear_data) {
1186 dma_unmap_single(tx_ring->dev,
1187 dma_unmap_addr(ena_buf, paddr),
1188 dma_unmap_len(ena_buf, len),
1194 /* unmap remaining mapped pages */
1195 for (i = 0; i < cnt; i++) {
1196 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
1197 dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
1202 /* ena_free_tx_bufs - Free Tx Buffers per Queue
1203 * @tx_ring: TX ring for which buffers be freed
1205 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
1207 bool print_once = true;
1210 for (i = 0; i < tx_ring->ring_size; i++) {
1211 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
1217 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
1218 "Free uncompleted tx skb qid %d idx 0x%x\n",
1222 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
1223 "Free uncompleted tx skb qid %d idx 0x%x\n",
1227 ena_unmap_tx_buff(tx_ring, tx_info);
1229 dev_kfree_skb_any(tx_info->skb);
1231 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
1235 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
1237 struct ena_ring *tx_ring;
1240 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1241 tx_ring = &adapter->tx_ring[i];
1242 ena_free_tx_bufs(tx_ring);
1246 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
1251 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
1252 ena_qid = ENA_IO_TXQ_IDX(i);
1253 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1257 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
1262 for (i = 0; i < adapter->num_io_queues; i++) {
1263 ena_qid = ENA_IO_RXQ_IDX(i);
1264 cancel_work_sync(&adapter->ena_napi[i].dim.work);
1265 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
1269 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
1271 ena_destroy_all_tx_queues(adapter);
1272 ena_destroy_all_rx_queues(adapter);
1275 static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
1276 struct ena_tx_buffer *tx_info, bool is_xdp)
1279 netif_err(ring->adapter,
1282 "tx_info doesn't have valid %s. qid %u req_id %u",
1283 is_xdp ? "xdp frame" : "skb", ring->qid, req_id);
1285 netif_err(ring->adapter,
1288 "Invalid req_id %u in qid %u\n",
1291 ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
1292 ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
1297 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
1299 struct ena_tx_buffer *tx_info;
1301 tx_info = &tx_ring->tx_buffer_info[req_id];
1302 if (likely(tx_info->skb))
1305 return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
1308 static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
1310 struct ena_tx_buffer *tx_info;
1312 tx_info = &xdp_ring->tx_buffer_info[req_id];
1313 if (likely(tx_info->xdpf))
1316 return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
1319 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
1321 struct netdev_queue *txq;
1330 next_to_clean = tx_ring->next_to_clean;
1331 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
1333 while (tx_pkts < budget) {
1334 struct ena_tx_buffer *tx_info;
1335 struct sk_buff *skb;
1337 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
1340 if (unlikely(rc == -EINVAL))
1341 handle_invalid_req_id(tx_ring, req_id, NULL,
1346 /* validate that the request id points to a valid skb */
1347 rc = validate_tx_req_id(tx_ring, req_id);
1351 tx_info = &tx_ring->tx_buffer_info[req_id];
1354 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
1355 prefetch(&skb->end);
1357 tx_info->skb = NULL;
1358 tx_info->last_jiffies = 0;
1360 ena_unmap_tx_buff(tx_ring, tx_info);
1362 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1363 "tx_poll: q %d skb %p completed\n", tx_ring->qid,
1366 tx_bytes += skb->len;
1369 total_done += tx_info->tx_descs;
1371 tx_ring->free_ids[next_to_clean] = req_id;
1372 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
1373 tx_ring->ring_size);
1376 tx_ring->next_to_clean = next_to_clean;
1377 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
1378 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
1380 netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
1382 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
1383 "tx_poll: q %d done. total pkts: %d\n",
1384 tx_ring->qid, tx_pkts);
1386 /* need to make the rings circular update visible to
1387 * ena_start_xmit() before checking for netif_queue_stopped().
1391 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1392 ENA_TX_WAKEUP_THRESH);
1393 if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
1394 __netif_tx_lock(txq, smp_processor_id());
1396 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
1397 ENA_TX_WAKEUP_THRESH);
1398 if (netif_tx_queue_stopped(txq) && above_thresh &&
1399 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
1400 netif_tx_wake_queue(txq);
1401 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
1404 __netif_tx_unlock(txq);
1410 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len)
1412 struct sk_buff *skb;
1415 skb = napi_alloc_skb(rx_ring->napi, len);
1417 skb = napi_build_skb(first_frag, len);
1419 if (unlikely(!skb)) {
1420 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
1423 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1424 "Failed to allocate skb. first_frag %s\n",
1425 first_frag ? "provided" : "not provided");
1431 static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len,
1432 u16 len, int pkt_offset)
1434 struct ena_com_buf *ena_buf = &rx_info->ena_buf;
1436 /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer
1437 * for data + headroom + tailroom.
1439 if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) {
1440 page_ref_inc(rx_info->page);
1441 rx_info->page_offset += buf_len;
1442 ena_buf->paddr += buf_len;
1443 ena_buf->len -= buf_len;
1450 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
1451 struct ena_com_rx_buf_info *ena_bufs,
1455 int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1456 bool is_xdp_loaded = ena_xdp_present_ring(rx_ring);
1457 struct ena_rx_buffer *rx_info;
1458 struct ena_adapter *adapter;
1459 int page_offset, pkt_offset;
1460 dma_addr_t pre_reuse_paddr;
1461 u16 len, req_id, buf = 0;
1462 bool reuse_rx_buf_page;
1463 struct sk_buff *skb;
1468 len = ena_bufs[buf].len;
1469 req_id = ena_bufs[buf].req_id;
1471 rx_info = &rx_ring->rx_buffer_info[req_id];
1473 if (unlikely(!rx_info->page)) {
1474 adapter = rx_ring->adapter;
1475 netif_err(adapter, rx_err, rx_ring->netdev,
1476 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
1477 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
1478 ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
1482 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1483 "rx_info %p page %p\n",
1484 rx_info, rx_info->page);
1486 buf_offset = rx_info->buf_offset;
1487 pkt_offset = buf_offset - rx_ring->rx_headroom;
1488 page_offset = rx_info->page_offset;
1489 buf_addr = page_address(rx_info->page) + page_offset;
1491 if (len <= rx_ring->rx_copybreak) {
1492 skb = ena_alloc_skb(rx_ring, NULL, len);
1496 /* sync this buffer for CPU use */
1497 dma_sync_single_for_cpu(rx_ring->dev,
1498 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1501 skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
1502 dma_sync_single_for_device(rx_ring->dev,
1503 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
1508 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1509 "RX allocated small packet. len %d.\n", skb->len);
1510 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1511 rx_ring->free_ids[*next_to_clean] = req_id;
1512 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
1513 rx_ring->ring_size);
1517 buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
1519 pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
1521 /* If XDP isn't loaded try to reuse part of the RX buffer */
1522 reuse_rx_buf_page = !is_xdp_loaded &&
1523 ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
1525 dma_sync_single_for_cpu(rx_ring->dev,
1526 pre_reuse_paddr + pkt_offset,
1530 if (!reuse_rx_buf_page)
1531 ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
1533 skb = ena_alloc_skb(rx_ring, buf_addr, buf_len);
1537 /* Populate skb's linear part */
1538 skb_reserve(skb, buf_offset);
1540 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1543 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1544 "RX skb updated. len %d. data_len %d\n",
1545 skb->len, skb->data_len);
1547 if (!reuse_rx_buf_page)
1548 rx_info->page = NULL;
1550 rx_ring->free_ids[*next_to_clean] = req_id;
1552 ENA_RX_RING_IDX_NEXT(*next_to_clean,
1553 rx_ring->ring_size);
1554 if (likely(--descs == 0))
1558 len = ena_bufs[buf].len;
1559 req_id = ena_bufs[buf].req_id;
1561 rx_info = &rx_ring->rx_buffer_info[req_id];
1563 /* rx_info->buf_offset includes rx_ring->rx_headroom */
1564 buf_offset = rx_info->buf_offset;
1565 pkt_offset = buf_offset - rx_ring->rx_headroom;
1566 buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
1567 page_offset = rx_info->page_offset;
1569 pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
1571 reuse_rx_buf_page = !is_xdp_loaded &&
1572 ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
1574 dma_sync_single_for_cpu(rx_ring->dev,
1575 pre_reuse_paddr + pkt_offset,
1579 if (!reuse_rx_buf_page)
1580 ena_unmap_rx_buff_attrs(rx_ring, rx_info,
1581 DMA_ATTR_SKIP_CPU_SYNC);
1583 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
1584 page_offset + buf_offset, len, buf_len);
1591 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1592 * @adapter: structure containing adapter specific data
1593 * @ena_rx_ctx: received packet context/metadata
1594 * @skb: skb currently being received and modified
1596 static void ena_rx_checksum(struct ena_ring *rx_ring,
1597 struct ena_com_rx_ctx *ena_rx_ctx,
1598 struct sk_buff *skb)
1600 /* Rx csum disabled */
1601 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
1602 skb->ip_summed = CHECKSUM_NONE;
1606 /* For fragmented packets the checksum isn't valid */
1607 if (ena_rx_ctx->frag) {
1608 skb->ip_summed = CHECKSUM_NONE;
1612 /* if IP and error */
1613 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
1614 (ena_rx_ctx->l3_csum_err))) {
1615 /* ipv4 checksum error */
1616 skb->ip_summed = CHECKSUM_NONE;
1617 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
1619 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1620 "RX IPv4 header checksum error\n");
1625 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1626 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
1627 if (unlikely(ena_rx_ctx->l4_csum_err)) {
1628 /* TCP/UDP checksum error */
1629 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1,
1631 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
1632 "RX L4 checksum error\n");
1633 skb->ip_summed = CHECKSUM_NONE;
1637 if (likely(ena_rx_ctx->l4_csum_checked)) {
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
1639 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1,
1642 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1,
1644 skb->ip_summed = CHECKSUM_NONE;
1647 skb->ip_summed = CHECKSUM_NONE;
1653 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1654 struct ena_com_rx_ctx *ena_rx_ctx,
1655 struct sk_buff *skb)
1657 enum pkt_hash_types hash_type;
1659 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1660 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1661 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1663 hash_type = PKT_HASH_TYPE_L4;
1665 hash_type = PKT_HASH_TYPE_NONE;
1667 /* Override hash type if the packet is fragmented */
1668 if (ena_rx_ctx->frag)
1669 hash_type = PKT_HASH_TYPE_NONE;
1671 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1675 static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
1677 struct ena_rx_buffer *rx_info;
1680 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1681 xdp_prepare_buff(xdp, page_address(rx_info->page),
1682 rx_info->buf_offset,
1683 rx_ring->ena_bufs[0].len, false);
1684 /* If for some reason we received a bigger packet than
1685 * we expect, then we simply drop it
1687 if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
1688 return ENA_XDP_DROP;
1690 ret = ena_xdp_execute(rx_ring, xdp);
1692 /* The xdp program might expand the headers */
1693 if (ret == ENA_XDP_PASS) {
1694 rx_info->buf_offset = xdp->data - xdp->data_hard_start;
1695 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
1700 /* ena_clean_rx_irq - Cleanup RX irq
1701 * @rx_ring: RX ring to clean
1702 * @napi: napi handler
1703 * @budget: how many packets driver is allowed to clean
1705 * Returns the number of cleaned buffers.
1707 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1710 u16 next_to_clean = rx_ring->next_to_clean;
1711 struct ena_com_rx_ctx ena_rx_ctx;
1712 struct ena_rx_buffer *rx_info;
1713 struct ena_adapter *adapter;
1714 u32 res_budget, work_done;
1715 int rx_copybreak_pkt = 0;
1716 int refill_threshold;
1717 struct sk_buff *skb;
1718 int refill_required;
1719 struct xdp_buff xdp;
1726 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1727 "%s qid %d\n", __func__, rx_ring->qid);
1728 res_budget = budget;
1729 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
1732 xdp_verdict = ENA_XDP_PASS;
1734 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1735 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1736 ena_rx_ctx.descs = 0;
1737 ena_rx_ctx.pkt_offset = 0;
1738 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1739 rx_ring->ena_com_io_sq,
1744 if (unlikely(ena_rx_ctx.descs == 0))
1747 /* First descriptor might have an offset set by the device */
1748 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
1749 rx_info->buf_offset += ena_rx_ctx.pkt_offset;
1751 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1752 "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1753 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1754 ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1756 if (ena_xdp_present_ring(rx_ring))
1757 xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
1759 /* allocate skb and fill it */
1760 if (xdp_verdict == ENA_XDP_PASS)
1761 skb = ena_rx_skb(rx_ring,
1766 if (unlikely(!skb)) {
1767 for (i = 0; i < ena_rx_ctx.descs; i++) {
1768 int req_id = rx_ring->ena_bufs[i].req_id;
1770 rx_ring->free_ids[next_to_clean] = req_id;
1772 ENA_RX_RING_IDX_NEXT(next_to_clean,
1773 rx_ring->ring_size);
1775 /* Packets was passed for transmission, unmap it
1778 if (xdp_verdict & ENA_XDP_FORWARDED) {
1779 ena_unmap_rx_buff_attrs(rx_ring,
1780 &rx_ring->rx_buffer_info[req_id],
1782 rx_ring->rx_buffer_info[req_id].page = NULL;
1785 if (xdp_verdict != ENA_XDP_PASS) {
1786 xdp_flags |= xdp_verdict;
1787 total_len += ena_rx_ctx.ena_bufs[0].len;
1794 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1796 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1798 skb_record_rx_queue(skb, rx_ring->qid);
1800 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak)
1803 total_len += skb->len;
1805 napi_gro_receive(napi, skb);
1808 } while (likely(res_budget));
1810 work_done = budget - res_budget;
1811 rx_ring->per_napi_packets += work_done;
1812 u64_stats_update_begin(&rx_ring->syncp);
1813 rx_ring->rx_stats.bytes += total_len;
1814 rx_ring->rx_stats.cnt += work_done;
1815 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1816 u64_stats_update_end(&rx_ring->syncp);
1818 rx_ring->next_to_clean = next_to_clean;
1820 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
1822 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1823 ENA_RX_REFILL_THRESH_PACKET);
1825 /* Optimization, try to batch new rx buffers */
1826 if (refill_required > refill_threshold) {
1827 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1828 ena_refill_rx_bufs(rx_ring, refill_required);
1831 if (xdp_flags & ENA_XDP_REDIRECT)
1837 if (xdp_flags & ENA_XDP_REDIRECT)
1840 adapter = netdev_priv(rx_ring->netdev);
1842 if (rc == -ENOSPC) {
1843 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
1845 ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
1847 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
1849 ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
1854 static void ena_dim_work(struct work_struct *w)
1856 struct dim *dim = container_of(w, struct dim, work);
1857 struct dim_cq_moder cur_moder =
1858 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1859 struct ena_napi *ena_napi = container_of(dim, struct ena_napi, dim);
1861 ena_napi->rx_ring->smoothed_interval = cur_moder.usec;
1862 dim->state = DIM_START_MEASURE;
1865 static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
1867 struct dim_sample dim_sample;
1868 struct ena_ring *rx_ring = ena_napi->rx_ring;
1870 if (!rx_ring->per_napi_packets)
1873 rx_ring->non_empty_napi_events++;
1875 dim_update_sample(rx_ring->non_empty_napi_events,
1876 rx_ring->rx_stats.cnt,
1877 rx_ring->rx_stats.bytes,
1880 net_dim(&ena_napi->dim, dim_sample);
1882 rx_ring->per_napi_packets = 0;
1885 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
1886 struct ena_ring *rx_ring)
1888 u32 rx_interval = tx_ring->smoothed_interval;
1889 struct ena_eth_io_intr_reg intr_reg;
1891 /* Rx ring can be NULL when for XDP tx queues which don't have an
1892 * accompanying rx_ring pair.
1895 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ?
1896 rx_ring->smoothed_interval :
1897 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev);
1899 /* Update intr register: rx intr delay,
1900 * tx intr delay and interrupt unmask
1902 ena_com_update_intr_reg(&intr_reg,
1904 tx_ring->smoothed_interval,
1907 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1,
1910 /* It is a shared MSI-X.
1911 * Tx and Rx CQ have pointer to it.
1912 * So we use one of them to reach the intr reg
1913 * The Tx ring is used because the rx_ring is NULL for XDP queues
1915 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg);
1918 static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1919 struct ena_ring *rx_ring)
1921 int cpu = get_cpu();
1924 /* Check only one ring since the 2 rings are running on the same cpu */
1925 if (likely(tx_ring->cpu == cpu))
1932 numa_node = cpu_to_node(cpu);
1934 if (likely(tx_ring->numa_node == numa_node))
1939 if (numa_node != NUMA_NO_NODE) {
1940 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1941 tx_ring->numa_node = numa_node;
1943 rx_ring->numa_node = numa_node;
1944 ena_com_update_numa_node(rx_ring->ena_com_io_cq,
1954 static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
1962 if (unlikely(!xdp_ring))
1964 next_to_clean = xdp_ring->next_to_clean;
1966 while (tx_pkts < budget) {
1967 struct ena_tx_buffer *tx_info;
1968 struct xdp_frame *xdpf;
1970 rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
1973 if (unlikely(rc == -EINVAL))
1974 handle_invalid_req_id(xdp_ring, req_id, NULL,
1979 /* validate that the request id points to a valid xdp_frame */
1980 rc = validate_xdp_req_id(xdp_ring, req_id);
1984 tx_info = &xdp_ring->tx_buffer_info[req_id];
1985 xdpf = tx_info->xdpf;
1987 tx_info->xdpf = NULL;
1988 tx_info->last_jiffies = 0;
1989 ena_unmap_tx_buff(xdp_ring, tx_info);
1991 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
1992 "tx_poll: q %d skb %p completed\n", xdp_ring->qid,
1996 total_done += tx_info->tx_descs;
1998 xdp_return_frame(xdpf);
1999 xdp_ring->free_ids[next_to_clean] = req_id;
2000 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
2001 xdp_ring->ring_size);
2004 xdp_ring->next_to_clean = next_to_clean;
2005 ena_com_comp_ack(xdp_ring->ena_com_io_sq, total_done);
2006 ena_com_update_dev_comp_head(xdp_ring->ena_com_io_cq);
2008 netif_dbg(xdp_ring->adapter, tx_done, xdp_ring->netdev,
2009 "tx_poll: q %d done. total pkts: %d\n",
2010 xdp_ring->qid, tx_pkts);
2015 static int ena_io_poll(struct napi_struct *napi, int budget)
2017 struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
2018 struct ena_ring *tx_ring, *rx_ring;
2020 int rx_work_done = 0;
2022 int napi_comp_call = 0;
2025 tx_ring = ena_napi->tx_ring;
2026 rx_ring = ena_napi->rx_ring;
2028 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
2030 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
2031 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
2032 napi_complete_done(napi, 0);
2036 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
2037 /* On netpoll the budget is zero and the handler should only clean the
2041 rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
2043 /* If the device is about to reset or down, avoid unmask
2044 * the interrupt and return 0 so NAPI won't reschedule
2046 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
2047 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
2048 napi_complete_done(napi, 0);
2051 } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
2054 /* Update numa and unmask the interrupt only when schedule
2055 * from the interrupt context (vs from sk_busy_loop)
2057 if (napi_complete_done(napi, rx_work_done) &&
2058 READ_ONCE(ena_napi->interrupts_masked)) {
2059 smp_rmb(); /* make sure interrupts_masked is read */
2060 WRITE_ONCE(ena_napi->interrupts_masked, false);
2061 /* We apply adaptive moderation on Rx path only.
2062 * Tx uses static interrupt moderation.
2064 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
2065 ena_adjust_adaptive_rx_intr_moderation(ena_napi);
2067 ena_update_ring_numa_node(tx_ring, rx_ring);
2068 ena_unmask_interrupt(tx_ring, rx_ring);
2076 u64_stats_update_begin(&tx_ring->syncp);
2077 tx_ring->tx_stats.napi_comp += napi_comp_call;
2078 tx_ring->tx_stats.tx_poll++;
2079 u64_stats_update_end(&tx_ring->syncp);
2081 tx_ring->tx_stats.last_napi_jiffies = jiffies;
2086 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
2088 struct ena_adapter *adapter = (struct ena_adapter *)data;
2090 ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
2092 /* Don't call the aenq handler before probe is done */
2093 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
2094 ena_com_aenq_intr_handler(adapter->ena_dev, data);
2099 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
2100 * @irq: interrupt number
2101 * @data: pointer to a network interface private napi device structure
2103 static irqreturn_t ena_intr_msix_io(int irq, void *data)
2105 struct ena_napi *ena_napi = data;
2107 /* Used to check HW health */
2108 WRITE_ONCE(ena_napi->first_interrupt, true);
2110 WRITE_ONCE(ena_napi->interrupts_masked, true);
2111 smp_wmb(); /* write interrupts_masked before calling napi */
2113 napi_schedule_irqoff(&ena_napi->napi);
2118 /* Reserve a single MSI-X vector for management (admin + aenq).
2119 * plus reserve one vector for each potential io queue.
2120 * the number of potential io queues is the minimum of what the device
2121 * supports and the number of vCPUs.
2123 static int ena_enable_msix(struct ena_adapter *adapter)
2125 int msix_vecs, irq_cnt;
2127 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2128 netif_err(adapter, probe, adapter->netdev,
2129 "Error, MSI-X is already enabled\n");
2133 /* Reserved the max msix vectors we might need */
2134 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
2135 netif_dbg(adapter, probe, adapter->netdev,
2136 "Trying to enable MSI-X, vectors %d\n", msix_vecs);
2138 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
2139 msix_vecs, PCI_IRQ_MSIX);
2142 netif_err(adapter, probe, adapter->netdev,
2143 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
2147 if (irq_cnt != msix_vecs) {
2148 netif_notice(adapter, probe, adapter->netdev,
2149 "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
2150 irq_cnt, msix_vecs);
2151 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
2154 if (ena_init_rx_cpu_rmap(adapter))
2155 netif_warn(adapter, probe, adapter->netdev,
2156 "Failed to map IRQs to CPUs\n");
2158 adapter->msix_vecs = irq_cnt;
2159 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
2164 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
2168 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
2169 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
2170 pci_name(adapter->pdev));
2171 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
2172 ena_intr_msix_mgmnt;
2173 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
2174 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
2175 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
2176 cpu = cpumask_first(cpu_online_mask);
2177 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
2178 cpumask_set_cpu(cpu,
2179 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
2182 static void ena_setup_io_intr(struct ena_adapter *adapter)
2184 struct net_device *netdev;
2185 int irq_idx, i, cpu;
2188 netdev = adapter->netdev;
2189 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2191 for (i = 0; i < io_queue_count; i++) {
2192 irq_idx = ENA_IO_IRQ_IDX(i);
2193 cpu = i % num_online_cpus();
2195 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
2196 "%s-Tx-Rx-%d", netdev->name, i);
2197 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
2198 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
2199 adapter->irq_tbl[irq_idx].vector =
2200 pci_irq_vector(adapter->pdev, irq_idx);
2201 adapter->irq_tbl[irq_idx].cpu = cpu;
2203 cpumask_set_cpu(cpu,
2204 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
2208 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
2210 unsigned long flags = 0;
2211 struct ena_irq *irq;
2214 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2215 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2218 netif_err(adapter, probe, adapter->netdev,
2219 "Failed to request admin irq\n");
2223 netif_dbg(adapter, probe, adapter->netdev,
2224 "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
2225 irq->affinity_hint_mask.bits[0], irq->vector);
2227 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2232 static int ena_request_io_irq(struct ena_adapter *adapter)
2234 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2235 unsigned long flags = 0;
2236 struct ena_irq *irq;
2239 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
2240 netif_err(adapter, ifup, adapter->netdev,
2241 "Failed to request I/O IRQ: MSI-X is not enabled\n");
2245 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2246 irq = &adapter->irq_tbl[i];
2247 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
2250 netif_err(adapter, ifup, adapter->netdev,
2251 "Failed to request I/O IRQ. index %d rc %d\n",
2256 netif_dbg(adapter, ifup, adapter->netdev,
2257 "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
2258 i, irq->affinity_hint_mask.bits[0], irq->vector);
2260 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
2266 for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
2267 irq = &adapter->irq_tbl[k];
2268 free_irq(irq->vector, irq->data);
2274 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
2276 struct ena_irq *irq;
2278 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
2279 synchronize_irq(irq->vector);
2280 irq_set_affinity_hint(irq->vector, NULL);
2281 free_irq(irq->vector, irq->data);
2284 static void ena_free_io_irq(struct ena_adapter *adapter)
2286 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2287 struct ena_irq *irq;
2290 #ifdef CONFIG_RFS_ACCEL
2291 if (adapter->msix_vecs >= 1) {
2292 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
2293 adapter->netdev->rx_cpu_rmap = NULL;
2295 #endif /* CONFIG_RFS_ACCEL */
2297 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++) {
2298 irq = &adapter->irq_tbl[i];
2299 irq_set_affinity_hint(irq->vector, NULL);
2300 free_irq(irq->vector, irq->data);
2304 static void ena_disable_msix(struct ena_adapter *adapter)
2306 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
2307 pci_free_irq_vectors(adapter->pdev);
2310 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
2312 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2315 if (!netif_running(adapter->netdev))
2318 for (i = ENA_IO_IRQ_FIRST_IDX; i < ENA_MAX_MSIX_VEC(io_queue_count); i++)
2319 synchronize_irq(adapter->irq_tbl[i].vector);
2322 static void ena_del_napi_in_range(struct ena_adapter *adapter,
2328 for (i = first_index; i < first_index + count; i++) {
2329 netif_napi_del(&adapter->ena_napi[i].napi);
2331 WARN_ON(!ENA_IS_XDP_INDEX(adapter, i) &&
2332 adapter->ena_napi[i].xdp_ring);
2336 static void ena_init_napi_in_range(struct ena_adapter *adapter,
2337 int first_index, int count)
2341 for (i = first_index; i < first_index + count; i++) {
2342 struct ena_napi *napi = &adapter->ena_napi[i];
2344 netif_napi_add(adapter->netdev, &napi->napi,
2345 ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
2347 if (!ENA_IS_XDP_INDEX(adapter, i)) {
2348 napi->rx_ring = &adapter->rx_ring[i];
2349 napi->tx_ring = &adapter->tx_ring[i];
2351 napi->xdp_ring = &adapter->tx_ring[i];
2357 static void ena_napi_disable_in_range(struct ena_adapter *adapter,
2363 for (i = first_index; i < first_index + count; i++)
2364 napi_disable(&adapter->ena_napi[i].napi);
2367 static void ena_napi_enable_in_range(struct ena_adapter *adapter,
2373 for (i = first_index; i < first_index + count; i++)
2374 napi_enable(&adapter->ena_napi[i].napi);
2377 /* Configure the Rx forwarding */
2378 static int ena_rss_configure(struct ena_adapter *adapter)
2380 struct ena_com_dev *ena_dev = adapter->ena_dev;
2383 /* In case the RSS table wasn't initialized by probe */
2384 if (!ena_dev->rss.tbl_log_size) {
2385 rc = ena_rss_init_default(adapter);
2386 if (rc && (rc != -EOPNOTSUPP)) {
2387 netif_err(adapter, ifup, adapter->netdev,
2388 "Failed to init RSS rc: %d\n", rc);
2393 /* Set indirect table */
2394 rc = ena_com_indirect_table_set(ena_dev);
2395 if (unlikely(rc && rc != -EOPNOTSUPP))
2398 /* Configure hash function (if supported) */
2399 rc = ena_com_set_hash_function(ena_dev);
2400 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2403 /* Configure hash inputs (if supported) */
2404 rc = ena_com_set_hash_ctrl(ena_dev);
2405 if (unlikely(rc && (rc != -EOPNOTSUPP)))
2411 static int ena_up_complete(struct ena_adapter *adapter)
2415 rc = ena_rss_configure(adapter);
2419 ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
2421 ena_refill_all_rx_bufs(adapter);
2423 /* enable transmits */
2424 netif_tx_start_all_queues(adapter->netdev);
2426 ena_napi_enable_in_range(adapter,
2428 adapter->xdp_num_queues + adapter->num_io_queues);
2433 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
2435 struct ena_com_create_io_ctx ctx;
2436 struct ena_com_dev *ena_dev;
2437 struct ena_ring *tx_ring;
2442 ena_dev = adapter->ena_dev;
2444 tx_ring = &adapter->tx_ring[qid];
2445 msix_vector = ENA_IO_IRQ_IDX(qid);
2446 ena_qid = ENA_IO_TXQ_IDX(qid);
2448 memset(&ctx, 0x0, sizeof(ctx));
2450 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
2452 ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
2453 ctx.msix_vector = msix_vector;
2454 ctx.queue_size = tx_ring->ring_size;
2455 ctx.numa_node = tx_ring->numa_node;
2457 rc = ena_com_create_io_queue(ena_dev, &ctx);
2459 netif_err(adapter, ifup, adapter->netdev,
2460 "Failed to create I/O TX queue num %d rc: %d\n",
2465 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2466 &tx_ring->ena_com_io_sq,
2467 &tx_ring->ena_com_io_cq);
2469 netif_err(adapter, ifup, adapter->netdev,
2470 "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
2472 ena_com_destroy_io_queue(ena_dev, ena_qid);
2476 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
2480 static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
2481 int first_index, int count)
2483 struct ena_com_dev *ena_dev = adapter->ena_dev;
2486 for (i = first_index; i < first_index + count; i++) {
2487 rc = ena_create_io_tx_queue(adapter, i);
2495 while (i-- > first_index)
2496 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
2501 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
2503 struct ena_com_dev *ena_dev;
2504 struct ena_com_create_io_ctx ctx;
2505 struct ena_ring *rx_ring;
2510 ena_dev = adapter->ena_dev;
2512 rx_ring = &adapter->rx_ring[qid];
2513 msix_vector = ENA_IO_IRQ_IDX(qid);
2514 ena_qid = ENA_IO_RXQ_IDX(qid);
2516 memset(&ctx, 0x0, sizeof(ctx));
2519 ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
2520 ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2521 ctx.msix_vector = msix_vector;
2522 ctx.queue_size = rx_ring->ring_size;
2523 ctx.numa_node = rx_ring->numa_node;
2525 rc = ena_com_create_io_queue(ena_dev, &ctx);
2527 netif_err(adapter, ifup, adapter->netdev,
2528 "Failed to create I/O RX queue num %d rc: %d\n",
2533 rc = ena_com_get_io_handlers(ena_dev, ena_qid,
2534 &rx_ring->ena_com_io_sq,
2535 &rx_ring->ena_com_io_cq);
2537 netif_err(adapter, ifup, adapter->netdev,
2538 "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
2543 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
2547 ena_com_destroy_io_queue(ena_dev, ena_qid);
2551 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
2553 struct ena_com_dev *ena_dev = adapter->ena_dev;
2556 for (i = 0; i < adapter->num_io_queues; i++) {
2557 rc = ena_create_io_rx_queue(adapter, i);
2560 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work);
2567 cancel_work_sync(&adapter->ena_napi[i].dim.work);
2568 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
2574 static void set_io_rings_size(struct ena_adapter *adapter,
2580 for (i = 0; i < adapter->num_io_queues; i++) {
2581 adapter->tx_ring[i].ring_size = new_tx_size;
2582 adapter->rx_ring[i].ring_size = new_rx_size;
2586 /* This function allows queue allocation to backoff when the system is
2587 * low on memory. If there is not enough memory to allocate io queues
2588 * the driver will try to allocate smaller queues.
2590 * The backoff algorithm is as follows:
2591 * 1. Try to allocate TX and RX and if successful.
2592 * 1.1. return success
2594 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2596 * 3. If TX or RX is smaller than 256
2597 * 3.1. return failure.
2599 * 4.1. go back to 1.
2601 static int create_queues_with_size_backoff(struct ena_adapter *adapter)
2603 int rc, cur_rx_ring_size, cur_tx_ring_size;
2604 int new_rx_ring_size, new_tx_ring_size;
2606 /* current queue sizes might be set to smaller than the requested
2607 * ones due to past queue allocation failures.
2609 set_io_rings_size(adapter, adapter->requested_tx_ring_size,
2610 adapter->requested_rx_ring_size);
2613 if (ena_xdp_present(adapter)) {
2614 rc = ena_setup_and_create_all_xdp_queues(adapter);
2619 rc = ena_setup_tx_resources_in_range(adapter,
2621 adapter->num_io_queues);
2625 rc = ena_create_io_tx_queues_in_range(adapter,
2627 adapter->num_io_queues);
2629 goto err_create_tx_queues;
2631 rc = ena_setup_all_rx_resources(adapter);
2635 rc = ena_create_all_io_rx_queues(adapter);
2637 goto err_create_rx_queues;
2641 err_create_rx_queues:
2642 ena_free_all_io_rx_resources(adapter);
2644 ena_destroy_all_tx_queues(adapter);
2645 err_create_tx_queues:
2646 ena_free_all_io_tx_resources(adapter);
2648 if (rc != -ENOMEM) {
2649 netif_err(adapter, ifup, adapter->netdev,
2650 "Queue creation failed with error code %d\n",
2655 cur_tx_ring_size = adapter->tx_ring[0].ring_size;
2656 cur_rx_ring_size = adapter->rx_ring[0].ring_size;
2658 netif_err(adapter, ifup, adapter->netdev,
2659 "Not enough memory to create queues with sizes TX=%d, RX=%d\n",
2660 cur_tx_ring_size, cur_rx_ring_size);
2662 new_tx_ring_size = cur_tx_ring_size;
2663 new_rx_ring_size = cur_rx_ring_size;
2665 /* Decrease the size of the larger queue, or
2666 * decrease both if they are the same size.
2668 if (cur_rx_ring_size <= cur_tx_ring_size)
2669 new_tx_ring_size = cur_tx_ring_size / 2;
2670 if (cur_rx_ring_size >= cur_tx_ring_size)
2671 new_rx_ring_size = cur_rx_ring_size / 2;
2673 if (new_tx_ring_size < ENA_MIN_RING_SIZE ||
2674 new_rx_ring_size < ENA_MIN_RING_SIZE) {
2675 netif_err(adapter, ifup, adapter->netdev,
2676 "Queue creation failed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n",
2681 netif_err(adapter, ifup, adapter->netdev,
2682 "Retrying queue creation with sizes TX=%d, RX=%d\n",
2686 set_io_rings_size(adapter, new_tx_ring_size,
2691 static int ena_up(struct ena_adapter *adapter)
2693 int io_queue_count, rc, i;
2695 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
2697 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2698 ena_setup_io_intr(adapter);
2700 /* napi poll functions should be initialized before running
2701 * request_irq(), to handle a rare condition where there is a pending
2702 * interrupt, causing the ISR to fire immediately while the poll
2703 * function wasn't set yet, causing a null dereference
2705 ena_init_napi_in_range(adapter, 0, io_queue_count);
2707 rc = ena_request_io_irq(adapter);
2711 rc = create_queues_with_size_backoff(adapter);
2713 goto err_create_queues_with_backoff;
2715 rc = ena_up_complete(adapter);
2719 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2720 netif_carrier_on(adapter->netdev);
2722 ena_increase_stat(&adapter->dev_stats.interface_up, 1,
2725 set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2727 /* Enable completion queues interrupt */
2728 for (i = 0; i < adapter->num_io_queues; i++)
2729 ena_unmask_interrupt(&adapter->tx_ring[i],
2730 &adapter->rx_ring[i]);
2732 /* schedule napi in case we had pending packets
2733 * from the last time we disable napi
2735 for (i = 0; i < io_queue_count; i++)
2736 napi_schedule(&adapter->ena_napi[i].napi);
2741 ena_destroy_all_tx_queues(adapter);
2742 ena_free_all_io_tx_resources(adapter);
2743 ena_destroy_all_rx_queues(adapter);
2744 ena_free_all_io_rx_resources(adapter);
2745 err_create_queues_with_backoff:
2746 ena_free_io_irq(adapter);
2748 ena_del_napi_in_range(adapter, 0, io_queue_count);
2753 static void ena_down(struct ena_adapter *adapter)
2755 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
2757 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
2759 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2761 ena_increase_stat(&adapter->dev_stats.interface_down, 1,
2764 netif_carrier_off(adapter->netdev);
2765 netif_tx_disable(adapter->netdev);
2767 /* After this point the napi handler won't enable the tx queue */
2768 ena_napi_disable_in_range(adapter, 0, io_queue_count);
2770 /* After destroy the queue there won't be any new interrupts */
2772 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
2775 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2777 netif_err(adapter, ifdown, adapter->netdev,
2778 "Device reset failed\n");
2779 /* stop submitting admin commands on a device that was reset */
2780 ena_com_set_admin_running_state(adapter->ena_dev, false);
2783 ena_destroy_all_io_queues(adapter);
2785 ena_disable_io_intr_sync(adapter);
2786 ena_free_io_irq(adapter);
2787 ena_del_napi_in_range(adapter, 0, io_queue_count);
2789 ena_free_all_tx_bufs(adapter);
2790 ena_free_all_rx_bufs(adapter);
2791 ena_free_all_io_tx_resources(adapter);
2792 ena_free_all_io_rx_resources(adapter);
2795 /* ena_open - Called when a network interface is made active
2796 * @netdev: network interface device structure
2798 * Returns 0 on success, negative value on failure
2800 * The open entry point is called when a network interface is made
2801 * active by the system (IFF_UP). At this point all resources needed
2802 * for transmit and receive operations are allocated, the interrupt
2803 * handler is registered with the OS, the watchdog timer is started,
2804 * and the stack is notified that the interface is ready.
2806 static int ena_open(struct net_device *netdev)
2808 struct ena_adapter *adapter = netdev_priv(netdev);
2811 /* Notify the stack of the actual queue counts. */
2812 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues);
2814 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
2818 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues);
2820 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
2824 rc = ena_up(adapter);
2831 /* ena_close - Disables a network interface
2832 * @netdev: network interface device structure
2834 * Returns 0, this is not allowed to fail
2836 * The close entry point is called when an interface is de-activated
2837 * by the OS. The hardware is still under the drivers control, but
2838 * needs to be disabled. A global MAC reset is issued to stop the
2839 * hardware, and all transmit and receive resources are freed.
2841 static int ena_close(struct net_device *netdev)
2843 struct ena_adapter *adapter = netdev_priv(netdev);
2845 netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
2847 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2850 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2853 /* Check for device status and issue reset if needed*/
2854 check_for_admin_com_state(adapter);
2855 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2856 netif_err(adapter, ifdown, adapter->netdev,
2857 "Destroy failure, restarting device\n");
2858 ena_dump_stats_to_dmesg(adapter);
2859 /* rtnl lock already obtained in dev_ioctl() layer */
2860 ena_destroy_device(adapter, false);
2861 ena_restore_device(adapter);
2867 int ena_update_queue_params(struct ena_adapter *adapter,
2870 u32 new_llq_header_len)
2872 bool dev_was_up, large_llq_changed = false;
2875 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2876 ena_close(adapter->netdev);
2877 adapter->requested_tx_ring_size = new_tx_size;
2878 adapter->requested_rx_ring_size = new_rx_size;
2879 ena_init_io_rings(adapter,
2881 adapter->xdp_num_queues +
2882 adapter->num_io_queues);
2884 large_llq_changed = adapter->ena_dev->tx_mem_queue_type ==
2885 ENA_ADMIN_PLACEMENT_POLICY_DEV;
2886 large_llq_changed &=
2887 new_llq_header_len != adapter->ena_dev->tx_max_header_size;
2889 /* a check that the configuration is valid is done by caller */
2890 if (large_llq_changed) {
2891 adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled;
2893 ena_destroy_device(adapter, false);
2894 rc = ena_restore_device(adapter);
2897 return dev_was_up && !rc ? ena_up(adapter) : rc;
2900 int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
2902 struct ena_ring *rx_ring;
2905 if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
2908 adapter->rx_copybreak = rx_copybreak;
2910 for (i = 0; i < adapter->num_io_queues; i++) {
2911 rx_ring = &adapter->rx_ring[i];
2912 rx_ring->rx_copybreak = rx_copybreak;
2918 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
2920 struct ena_com_dev *ena_dev = adapter->ena_dev;
2921 int prev_channel_count;
2924 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2925 ena_close(adapter->netdev);
2926 prev_channel_count = adapter->num_io_queues;
2927 adapter->num_io_queues = new_channel_count;
2928 if (ena_xdp_present(adapter) &&
2929 ena_xdp_allowed(adapter) == ENA_XDP_ALLOWED) {
2930 adapter->xdp_first_ring = new_channel_count;
2931 adapter->xdp_num_queues = new_channel_count;
2932 if (prev_channel_count > new_channel_count)
2933 ena_xdp_exchange_program_rx_in_range(adapter,
2936 prev_channel_count);
2938 ena_xdp_exchange_program_rx_in_range(adapter,
2939 adapter->xdp_bpf_prog,
2944 /* We need to destroy the rss table so that the indirection
2945 * table will be reinitialized by ena_up()
2947 ena_com_rss_destroy(ena_dev);
2948 ena_init_io_rings(adapter,
2950 adapter->xdp_num_queues +
2951 adapter->num_io_queues);
2952 return dev_was_up ? ena_open(adapter->netdev) : 0;
2955 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
2956 struct sk_buff *skb,
2957 bool disable_meta_caching)
2959 u32 mss = skb_shinfo(skb)->gso_size;
2960 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
2963 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
2964 ena_tx_ctx->l4_csum_enable = 1;
2966 ena_tx_ctx->tso_enable = 1;
2967 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
2968 ena_tx_ctx->l4_csum_partial = 0;
2970 ena_tx_ctx->tso_enable = 0;
2971 ena_meta->l4_hdr_len = 0;
2972 ena_tx_ctx->l4_csum_partial = 1;
2975 switch (ip_hdr(skb)->version) {
2977 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
2978 if (ip_hdr(skb)->frag_off & htons(IP_DF))
2981 ena_tx_ctx->l3_csum_enable = 1;
2982 l4_protocol = ip_hdr(skb)->protocol;
2985 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
2986 l4_protocol = ipv6_hdr(skb)->nexthdr;
2992 if (l4_protocol == IPPROTO_TCP)
2993 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
2995 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
2997 ena_meta->mss = mss;
2998 ena_meta->l3_hdr_len = skb_network_header_len(skb);
2999 ena_meta->l3_hdr_offset = skb_network_offset(skb);
3000 ena_tx_ctx->meta_valid = 1;
3001 } else if (disable_meta_caching) {
3002 memset(ena_meta, 0, sizeof(*ena_meta));
3003 ena_tx_ctx->meta_valid = 1;
3005 ena_tx_ctx->meta_valid = 0;
3009 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
3010 struct sk_buff *skb)
3012 int num_frags, header_len, rc;
3014 num_frags = skb_shinfo(skb)->nr_frags;
3015 header_len = skb_headlen(skb);
3017 if (num_frags < tx_ring->sgl_size)
3020 if ((num_frags == tx_ring->sgl_size) &&
3021 (header_len < tx_ring->tx_max_header_size))
3024 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp);
3026 rc = skb_linearize(skb);
3028 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1,
3035 static int ena_tx_map_skb(struct ena_ring *tx_ring,
3036 struct ena_tx_buffer *tx_info,
3037 struct sk_buff *skb,
3041 struct ena_adapter *adapter = tx_ring->adapter;
3042 struct ena_com_buf *ena_buf;
3044 u32 skb_head_len, frag_len, last_frag;
3049 skb_head_len = skb_headlen(skb);
3051 ena_buf = tx_info->bufs;
3053 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3054 /* When the device is LLQ mode, the driver will copy
3055 * the header into the device memory space.
3056 * the ena_com layer assume the header is in a linear
3058 * This assumption might be wrong since part of the header
3059 * can be in the fragmented buffers.
3060 * Use skb_header_pointer to make sure the header is in a
3061 * linear memory space.
3064 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
3065 *push_hdr = skb_header_pointer(skb, 0, push_len,
3066 tx_ring->push_buf_intermediate_buf);
3067 *header_len = push_len;
3068 if (unlikely(skb->data != *push_hdr)) {
3069 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1,
3072 delta = push_len - skb_head_len;
3076 *header_len = min_t(u32, skb_head_len,
3077 tx_ring->tx_max_header_size);
3080 netif_dbg(adapter, tx_queued, adapter->netdev,
3081 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
3082 *push_hdr, push_len);
3084 if (skb_head_len > push_len) {
3085 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
3086 skb_head_len - push_len, DMA_TO_DEVICE);
3087 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
3088 goto error_report_dma_error;
3090 ena_buf->paddr = dma;
3091 ena_buf->len = skb_head_len - push_len;
3094 tx_info->num_of_bufs++;
3095 tx_info->map_linear_data = 1;
3097 tx_info->map_linear_data = 0;
3100 last_frag = skb_shinfo(skb)->nr_frags;
3102 for (i = 0; i < last_frag; i++) {
3103 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3105 frag_len = skb_frag_size(frag);
3107 if (unlikely(delta >= frag_len)) {
3112 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
3113 frag_len - delta, DMA_TO_DEVICE);
3114 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
3115 goto error_report_dma_error;
3117 ena_buf->paddr = dma;
3118 ena_buf->len = frag_len - delta;
3120 tx_info->num_of_bufs++;
3126 error_report_dma_error:
3127 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1,
3129 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
3131 tx_info->skb = NULL;
3133 tx_info->num_of_bufs += i;
3134 ena_unmap_tx_buff(tx_ring, tx_info);
3139 /* Called with netif_tx_lock. */
3140 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
3142 struct ena_adapter *adapter = netdev_priv(dev);
3143 struct ena_tx_buffer *tx_info;
3144 struct ena_com_tx_ctx ena_tx_ctx;
3145 struct ena_ring *tx_ring;
3146 struct netdev_queue *txq;
3148 u16 next_to_use, req_id, header_len;
3151 netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
3152 /* Determine which tx ring we will be placed on */
3153 qid = skb_get_queue_mapping(skb);
3154 tx_ring = &adapter->tx_ring[qid];
3155 txq = netdev_get_tx_queue(dev, qid);
3157 rc = ena_check_and_linearize_skb(tx_ring, skb);
3159 goto error_drop_packet;
3161 skb_tx_timestamp(skb);
3163 next_to_use = tx_ring->next_to_use;
3164 req_id = tx_ring->free_ids[next_to_use];
3165 tx_info = &tx_ring->tx_buffer_info[req_id];
3166 tx_info->num_of_bufs = 0;
3168 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
3170 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
3172 goto error_drop_packet;
3174 memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
3175 ena_tx_ctx.ena_bufs = tx_info->bufs;
3176 ena_tx_ctx.push_header = push_hdr;
3177 ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
3178 ena_tx_ctx.req_id = req_id;
3179 ena_tx_ctx.header_len = header_len;
3181 /* set flags and meta data */
3182 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
3184 rc = ena_xmit_common(dev,
3191 goto error_unmap_dma;
3193 netdev_tx_sent_queue(txq, skb->len);
3195 /* stop the queue when no more space available, the packet can have up
3196 * to sgl_size + 2. one for the meta descriptor and one for header
3197 * (if the header is larger than tx_max_header_size).
3199 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3200 tx_ring->sgl_size + 2))) {
3201 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
3204 netif_tx_stop_queue(txq);
3205 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1,
3208 /* There is a rare condition where this function decide to
3209 * stop the queue but meanwhile clean_tx_irq updates
3210 * next_to_completion and terminates.
3211 * The queue will remain stopped forever.
3212 * To solve this issue add a mb() to make sure that
3213 * netif_tx_stop_queue() write is vissible before checking if
3214 * there is additional space in the queue.
3218 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
3219 ENA_TX_WAKEUP_THRESH)) {
3220 netif_tx_wake_queue(txq);
3221 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1,
3226 if (netif_xmit_stopped(txq) || !netdev_xmit_more())
3227 /* trigger the dma engine. ena_ring_tx_doorbell()
3228 * calls a memory barrier inside it.
3230 ena_ring_tx_doorbell(tx_ring);
3232 return NETDEV_TX_OK;
3235 ena_unmap_tx_buff(tx_ring, tx_info);
3236 tx_info->skb = NULL;
3240 return NETDEV_TX_OK;
3243 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
3244 struct net_device *sb_dev)
3247 /* we suspect that this is good for in--kernel network services that
3248 * want to loop incoming skb rx to tx in normal user generated traffic,
3249 * most probably we will not get to this
3251 if (skb_rx_queue_recorded(skb))
3252 qid = skb_get_rx_queue(skb);
3254 qid = netdev_pick_tx(dev, skb, NULL);
3259 static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3261 struct device *dev = &pdev->dev;
3262 struct ena_admin_host_info *host_info;
3265 /* Allocate only the host info */
3266 rc = ena_com_allocate_host_info(ena_dev);
3268 dev_err(dev, "Cannot allocate host info\n");
3272 host_info = ena_dev->host_attr.host_info;
3274 host_info->bdf = pci_dev_id(pdev);
3275 host_info->os_type = ENA_ADMIN_OS_LINUX;
3276 host_info->kernel_ver = LINUX_VERSION_CODE;
3277 strscpy(host_info->kernel_ver_str, utsname()->version,
3278 sizeof(host_info->kernel_ver_str) - 1);
3279 host_info->os_dist = 0;
3280 strncpy(host_info->os_dist_str, utsname()->release,
3281 sizeof(host_info->os_dist_str) - 1);
3282 host_info->driver_version =
3283 (DRV_MODULE_GEN_MAJOR) |
3284 (DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
3285 (DRV_MODULE_GEN_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
3286 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
3287 host_info->num_cpus = num_online_cpus();
3289 host_info->driver_supported_features =
3290 ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
3291 ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
3292 ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
3293 ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
3294 ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
3296 rc = ena_com_set_host_attributes(ena_dev);
3298 if (rc == -EOPNOTSUPP)
3299 dev_warn(dev, "Cannot set host attributes\n");
3301 dev_err(dev, "Cannot set host attributes\n");
3309 ena_com_delete_host_info(ena_dev);
3312 static void ena_config_debug_area(struct ena_adapter *adapter)
3314 u32 debug_area_size;
3317 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
3318 if (ss_count <= 0) {
3319 netif_err(adapter, drv, adapter->netdev,
3320 "SS count is negative\n");
3324 /* allocate 32 bytes for each string and 64bit for the value */
3325 debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
3327 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
3329 netif_err(adapter, drv, adapter->netdev,
3330 "Cannot allocate debug area\n");
3334 rc = ena_com_set_host_attributes(adapter->ena_dev);
3336 if (rc == -EOPNOTSUPP)
3337 netif_warn(adapter, drv, adapter->netdev,
3338 "Cannot set host attributes\n");
3340 netif_err(adapter, drv, adapter->netdev,
3341 "Cannot set host attributes\n");
3347 ena_com_delete_debug_area(adapter->ena_dev);
3350 int ena_update_hw_stats(struct ena_adapter *adapter)
3354 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
3356 netdev_err(adapter->netdev, "Failed to get ENI stats\n");
3363 static void ena_get_stats64(struct net_device *netdev,
3364 struct rtnl_link_stats64 *stats)
3366 struct ena_adapter *adapter = netdev_priv(netdev);
3367 struct ena_ring *rx_ring, *tx_ring;
3373 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3376 for (i = 0; i < adapter->num_io_queues; i++) {
3379 tx_ring = &adapter->tx_ring[i];
3382 start = u64_stats_fetch_begin(&tx_ring->syncp);
3383 packets = tx_ring->tx_stats.cnt;
3384 bytes = tx_ring->tx_stats.bytes;
3385 } while (u64_stats_fetch_retry(&tx_ring->syncp, start));
3387 stats->tx_packets += packets;
3388 stats->tx_bytes += bytes;
3390 rx_ring = &adapter->rx_ring[i];
3393 start = u64_stats_fetch_begin(&rx_ring->syncp);
3394 packets = rx_ring->rx_stats.cnt;
3395 bytes = rx_ring->rx_stats.bytes;
3396 } while (u64_stats_fetch_retry(&rx_ring->syncp, start));
3398 stats->rx_packets += packets;
3399 stats->rx_bytes += bytes;
3403 start = u64_stats_fetch_begin(&adapter->syncp);
3404 rx_drops = adapter->dev_stats.rx_drops;
3405 tx_drops = adapter->dev_stats.tx_drops;
3406 } while (u64_stats_fetch_retry(&adapter->syncp, start));
3408 stats->rx_dropped = rx_drops;
3409 stats->tx_dropped = tx_drops;
3411 stats->multicast = 0;
3412 stats->collisions = 0;
3414 stats->rx_length_errors = 0;
3415 stats->rx_crc_errors = 0;
3416 stats->rx_frame_errors = 0;
3417 stats->rx_fifo_errors = 0;
3418 stats->rx_missed_errors = 0;
3419 stats->tx_window_errors = 0;
3421 stats->rx_errors = 0;
3422 stats->tx_errors = 0;
3425 static const struct net_device_ops ena_netdev_ops = {
3426 .ndo_open = ena_open,
3427 .ndo_stop = ena_close,
3428 .ndo_start_xmit = ena_start_xmit,
3429 .ndo_select_queue = ena_select_queue,
3430 .ndo_get_stats64 = ena_get_stats64,
3431 .ndo_tx_timeout = ena_tx_timeout,
3432 .ndo_change_mtu = ena_change_mtu,
3433 .ndo_set_mac_address = NULL,
3434 .ndo_validate_addr = eth_validate_addr,
3436 .ndo_xdp_xmit = ena_xdp_xmit,
3439 static void ena_calc_io_queue_size(struct ena_adapter *adapter,
3440 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3442 struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq;
3443 struct ena_com_dev *ena_dev = adapter->ena_dev;
3444 u32 tx_queue_size = ENA_DEFAULT_RING_SIZE;
3445 u32 rx_queue_size = ENA_DEFAULT_RING_SIZE;
3446 u32 max_tx_queue_size;
3447 u32 max_rx_queue_size;
3449 /* If this function is called after driver load, the ring sizes have already
3450 * been configured. Take it into account when recalculating ring size.
3452 if (adapter->tx_ring->ring_size)
3453 tx_queue_size = adapter->tx_ring->ring_size;
3455 if (adapter->rx_ring->ring_size)
3456 rx_queue_size = adapter->rx_ring->ring_size;
3458 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
3459 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
3460 &get_feat_ctx->max_queue_ext.max_queue_ext;
3461 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
3462 max_queue_ext->max_rx_sq_depth);
3463 max_tx_queue_size = max_queue_ext->max_tx_cq_depth;
3465 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3466 max_tx_queue_size = min_t(u32, max_tx_queue_size,
3467 llq->max_llq_depth);
3469 max_tx_queue_size = min_t(u32, max_tx_queue_size,
3470 max_queue_ext->max_tx_sq_depth);
3472 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3473 max_queue_ext->max_per_packet_tx_descs);
3474 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3475 max_queue_ext->max_per_packet_rx_descs);
3477 struct ena_admin_queue_feature_desc *max_queues =
3478 &get_feat_ctx->max_queues;
3479 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth,
3480 max_queues->max_sq_depth);
3481 max_tx_queue_size = max_queues->max_cq_depth;
3483 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3484 max_tx_queue_size = min_t(u32, max_tx_queue_size,
3485 llq->max_llq_depth);
3487 max_tx_queue_size = min_t(u32, max_tx_queue_size,
3488 max_queues->max_sq_depth);
3490 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3491 max_queues->max_packet_tx_descs);
3492 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3493 max_queues->max_packet_rx_descs);
3496 max_tx_queue_size = rounddown_pow_of_two(max_tx_queue_size);
3497 max_rx_queue_size = rounddown_pow_of_two(max_rx_queue_size);
3499 /* When forcing large headers, we multiply the entry size by 2, and therefore divide
3500 * the queue size by 2, leaving the amount of memory used by the queues unchanged.
3502 if (adapter->large_llq_header_enabled) {
3503 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
3504 ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
3505 max_tx_queue_size /= 2;
3506 dev_info(&adapter->pdev->dev,
3507 "Forcing large headers and decreasing maximum TX queue size to %d\n",
3510 dev_err(&adapter->pdev->dev,
3511 "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
3513 adapter->large_llq_header_enabled = false;
3517 tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
3519 rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
3522 tx_queue_size = rounddown_pow_of_two(tx_queue_size);
3523 rx_queue_size = rounddown_pow_of_two(rx_queue_size);
3525 adapter->max_tx_ring_size = max_tx_queue_size;
3526 adapter->max_rx_ring_size = max_rx_queue_size;
3527 adapter->requested_tx_ring_size = tx_queue_size;
3528 adapter->requested_rx_ring_size = rx_queue_size;
3531 static int ena_device_validate_params(struct ena_adapter *adapter,
3532 struct ena_com_dev_get_features_ctx *get_feat_ctx)
3534 struct net_device *netdev = adapter->netdev;
3537 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
3540 netif_err(adapter, drv, netdev,
3541 "Error, mac address are different\n");
3545 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
3546 netif_err(adapter, drv, netdev,
3547 "Error, device max mtu is smaller than netdev MTU\n");
3554 static void set_default_llq_configurations(struct ena_adapter *adapter,
3555 struct ena_llq_configurations *llq_config,
3556 struct ena_admin_feature_llq_desc *llq)
3558 struct ena_com_dev *ena_dev = adapter->ena_dev;
3560 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3561 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3562 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3564 adapter->large_llq_header_supported =
3565 !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ));
3566 adapter->large_llq_header_supported &=
3567 !!(llq->entry_size_ctrl_supported &
3568 ENA_ADMIN_LIST_ENTRY_SIZE_256B);
3570 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) &&
3571 adapter->large_llq_header_enabled) {
3572 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
3573 llq_config->llq_ring_entry_size_value = 256;
3575 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3576 llq_config->llq_ring_entry_size_value = 128;
3580 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3581 struct ena_com_dev *ena_dev,
3582 struct ena_admin_feature_llq_desc *llq,
3583 struct ena_llq_configurations *llq_default_configurations)
3586 u32 llq_feature_mask;
3588 llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3589 if (!(ena_dev->supported_features & llq_feature_mask)) {
3590 dev_warn(&pdev->dev,
3591 "LLQ is not supported Fallback to host mode policy.\n");
3592 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3596 if (!ena_dev->mem_bar) {
3597 netdev_err(ena_dev->net_device,
3598 "LLQ is advertised as supported but device doesn't expose mem bar\n");
3599 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3603 rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3606 "Failed to configure the device mode. Fallback to host mode policy.\n");
3607 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3613 static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
3616 bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
3621 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3622 pci_resource_start(pdev, ENA_MEM_BAR),
3623 pci_resource_len(pdev, ENA_MEM_BAR));
3625 if (!ena_dev->mem_bar)
3631 static int ena_device_init(struct ena_adapter *adapter, struct pci_dev *pdev,
3632 struct ena_com_dev_get_features_ctx *get_feat_ctx,
3635 struct ena_com_dev *ena_dev = adapter->ena_dev;
3636 struct ena_llq_configurations llq_config;
3637 struct device *dev = &pdev->dev;
3638 bool readless_supported;
3643 rc = ena_com_mmio_reg_read_request_init(ena_dev);
3645 dev_err(dev, "Failed to init mmio read less\n");
3649 /* The PCIe configuration space revision id indicate if mmio reg
3652 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
3653 ena_com_set_mmio_read_mode(ena_dev, readless_supported);
3655 rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
3657 dev_err(dev, "Can not reset device\n");
3658 goto err_mmio_read_less;
3661 rc = ena_com_validate_version(ena_dev);
3663 dev_err(dev, "Device version is too low\n");
3664 goto err_mmio_read_less;
3667 dma_width = ena_com_get_dma_width(ena_dev);
3668 if (dma_width < 0) {
3669 dev_err(dev, "Invalid dma width value %d", dma_width);
3671 goto err_mmio_read_less;
3674 rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_width));
3676 dev_err(dev, "dma_set_mask_and_coherent failed %d\n", rc);
3677 goto err_mmio_read_less;
3680 /* ENA admin level init */
3681 rc = ena_com_admin_init(ena_dev, &aenq_handlers);
3684 "Can not initialize ena admin queue with device\n");
3685 goto err_mmio_read_less;
3688 /* To enable the msix interrupts the driver needs to know the number
3689 * of queues. So the driver uses polling mode to retrieve this
3692 ena_com_set_admin_polling_mode(ena_dev, true);
3694 ena_config_host_info(ena_dev, pdev);
3696 /* Get Device Attributes*/
3697 rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
3699 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
3700 goto err_admin_init;
3703 /* Try to turn all the available aenq groups */
3704 aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
3705 BIT(ENA_ADMIN_FATAL_ERROR) |
3706 BIT(ENA_ADMIN_WARNING) |
3707 BIT(ENA_ADMIN_NOTIFICATION) |
3708 BIT(ENA_ADMIN_KEEP_ALIVE);
3710 aenq_groups &= get_feat_ctx->aenq.supported_groups;
3712 rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
3714 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
3715 goto err_admin_init;
3718 *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
3720 set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq);
3722 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
3725 dev_err(dev, "ENA device init failed\n");
3726 goto err_admin_init;
3729 ena_calc_io_queue_size(adapter, get_feat_ctx);
3734 ena_com_delete_host_info(ena_dev);
3735 ena_com_admin_destroy(ena_dev);
3737 ena_com_mmio_reg_read_request_destroy(ena_dev);
3742 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
3744 struct ena_com_dev *ena_dev = adapter->ena_dev;
3745 struct device *dev = &adapter->pdev->dev;
3748 rc = ena_enable_msix(adapter);
3750 dev_err(dev, "Can not reserve msix vectors\n");
3754 ena_setup_mgmnt_intr(adapter);
3756 rc = ena_request_mgmnt_irq(adapter);
3758 dev_err(dev, "Can not setup management interrupts\n");
3759 goto err_disable_msix;
3762 ena_com_set_admin_polling_mode(ena_dev, false);
3764 ena_com_admin_aenq_enable(ena_dev);
3769 ena_disable_msix(adapter);
3774 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
3776 struct net_device *netdev = adapter->netdev;
3777 struct ena_com_dev *ena_dev = adapter->ena_dev;
3780 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
3783 netif_carrier_off(netdev);
3785 del_timer_sync(&adapter->timer_service);
3787 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
3788 adapter->dev_up_before_reset = dev_up;
3790 ena_com_set_admin_running_state(ena_dev, false);
3792 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
3795 /* Stop the device from sending AENQ events (in case reset flag is set
3796 * and device is up, ena_down() already reset the device.
3798 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
3799 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
3801 ena_free_mgmnt_irq(adapter);
3803 ena_disable_msix(adapter);
3805 ena_com_abort_admin_commands(ena_dev);
3807 ena_com_wait_for_abort_completion(ena_dev);
3809 ena_com_admin_destroy(ena_dev);
3811 ena_com_mmio_reg_read_request_destroy(ena_dev);
3813 /* return reset reason to default value */
3814 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3816 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3817 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3820 static int ena_restore_device(struct ena_adapter *adapter)
3822 struct ena_com_dev_get_features_ctx get_feat_ctx;
3823 struct ena_com_dev *ena_dev = adapter->ena_dev;
3824 struct pci_dev *pdev = adapter->pdev;
3825 struct ena_ring *txr;
3829 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3830 rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state);
3832 dev_err(&pdev->dev, "Can not initialize device\n");
3835 adapter->wd_state = wd_state;
3837 count = adapter->xdp_num_queues + adapter->num_io_queues;
3838 for (i = 0 ; i < count; i++) {
3839 txr = &adapter->tx_ring[i];
3840 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
3841 txr->tx_max_header_size = ena_dev->tx_max_header_size;
3844 rc = ena_device_validate_params(adapter, &get_feat_ctx);
3846 dev_err(&pdev->dev, "Validation of device parameters failed\n");
3847 goto err_device_destroy;
3850 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
3852 dev_err(&pdev->dev, "Enable MSI-X failed\n");
3853 goto err_device_destroy;
3855 /* If the interface was up before the reset bring it up */
3856 if (adapter->dev_up_before_reset) {
3857 rc = ena_up(adapter);
3859 dev_err(&pdev->dev, "Failed to create I/O queues\n");
3860 goto err_disable_msix;
3864 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3866 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3867 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
3868 netif_carrier_on(adapter->netdev);
3870 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3871 adapter->last_keep_alive_jiffies = jiffies;
3875 ena_free_mgmnt_irq(adapter);
3876 ena_disable_msix(adapter);
3878 ena_com_abort_admin_commands(ena_dev);
3879 ena_com_wait_for_abort_completion(ena_dev);
3880 ena_com_admin_destroy(ena_dev);
3881 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
3882 ena_com_mmio_reg_read_request_destroy(ena_dev);
3884 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3885 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
3887 "Reset attempt failed. Can not reset the device\n");
3892 static void ena_fw_reset_device(struct work_struct *work)
3894 struct ena_adapter *adapter =
3895 container_of(work, struct ena_adapter, reset_task);
3899 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3900 ena_destroy_device(adapter, false);
3901 ena_restore_device(adapter);
3903 dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
3909 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
3910 struct ena_ring *rx_ring)
3912 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi);
3914 if (likely(READ_ONCE(ena_napi->first_interrupt)))
3917 if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
3920 rx_ring->no_interrupt_event_cnt++;
3922 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
3923 netif_err(adapter, rx_err, adapter->netdev,
3924 "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
3927 ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3934 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
3935 struct ena_ring *tx_ring)
3937 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
3938 unsigned int time_since_last_napi;
3939 unsigned int missing_tx_comp_to;
3940 bool is_tx_comp_time_expired;
3941 struct ena_tx_buffer *tx_buf;
3942 unsigned long last_jiffies;
3946 for (i = 0; i < tx_ring->ring_size; i++) {
3947 tx_buf = &tx_ring->tx_buffer_info[i];
3948 last_jiffies = tx_buf->last_jiffies;
3950 if (last_jiffies == 0)
3951 /* no pending Tx at this location */
3954 is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
3955 2 * adapter->missing_tx_completion_to);
3957 if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) {
3958 /* If after graceful period interrupt is still not
3959 * received, we schedule a reset
3961 netif_err(adapter, tx_err, adapter->netdev,
3962 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
3964 ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
3968 is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
3969 adapter->missing_tx_completion_to);
3971 if (unlikely(is_tx_comp_time_expired)) {
3972 if (!tx_buf->print_once) {
3973 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
3974 missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
3975 netif_notice(adapter, tx_err, adapter->netdev,
3976 "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
3977 tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
3980 tx_buf->print_once = 1;
3985 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
3986 netif_err(adapter, tx_err, adapter->netdev,
3987 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
3989 adapter->missing_tx_completion_threshold);
3990 ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
3994 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx,
4000 static void check_for_missing_completions(struct ena_adapter *adapter)
4002 struct ena_ring *tx_ring;
4003 struct ena_ring *rx_ring;
4007 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues;
4008 /* Make sure the driver doesn't turn the device in other process */
4011 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
4014 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
4017 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
4020 budget = ENA_MONITORED_TX_QUEUES;
4022 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) {
4023 tx_ring = &adapter->tx_ring[i];
4024 rx_ring = &adapter->rx_ring[i];
4026 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
4030 rc = !ENA_IS_XDP_INDEX(adapter, i) ?
4031 check_for_rx_interrupt_queue(adapter, rx_ring) : 0;
4040 adapter->last_monitored_tx_qid = i % io_queue_count;
4043 /* trigger napi schedule after 2 consecutive detections */
4044 #define EMPTY_RX_REFILL 2
4045 /* For the rare case where the device runs out of Rx descriptors and the
4046 * napi handler failed to refill new Rx descriptors (due to a lack of memory
4048 * This case will lead to a deadlock:
4049 * The device won't send interrupts since all the new Rx packets will be dropped
4050 * The napi handler won't allocate new Rx descriptors so the device will be
4051 * able to send new packets.
4053 * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
4054 * It is recommended to have at least 512MB, with a minimum of 128MB for
4055 * constrained environment).
4057 * When such a situation is detected - Reschedule napi
4059 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
4061 struct ena_ring *rx_ring;
4062 int i, refill_required;
4064 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
4067 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
4070 for (i = 0; i < adapter->num_io_queues; i++) {
4071 rx_ring = &adapter->rx_ring[i];
4073 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq);
4074 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
4075 rx_ring->empty_rx_queue++;
4077 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
4078 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1,
4081 netif_err(adapter, drv, adapter->netdev,
4082 "Trigger refill for ring %d\n", i);
4084 napi_schedule(rx_ring->napi);
4085 rx_ring->empty_rx_queue = 0;
4088 rx_ring->empty_rx_queue = 0;
4093 /* Check for keep alive expiration */
4094 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
4096 unsigned long keep_alive_expired;
4098 if (!adapter->wd_state)
4101 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
4104 keep_alive_expired = adapter->last_keep_alive_jiffies +
4105 adapter->keep_alive_timeout;
4106 if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
4107 netif_err(adapter, drv, adapter->netdev,
4108 "Keep alive watchdog timeout.\n");
4109 ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
4111 ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
4115 static void check_for_admin_com_state(struct ena_adapter *adapter)
4117 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
4118 netif_err(adapter, drv, adapter->netdev,
4119 "ENA admin queue is not in running state!\n");
4120 ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
4122 ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
4126 static void ena_update_hints(struct ena_adapter *adapter,
4127 struct ena_admin_ena_hw_hints *hints)
4129 struct net_device *netdev = adapter->netdev;
4131 if (hints->admin_completion_tx_timeout)
4132 adapter->ena_dev->admin_queue.completion_timeout =
4133 hints->admin_completion_tx_timeout * 1000;
4135 if (hints->mmio_read_timeout)
4136 /* convert to usec */
4137 adapter->ena_dev->mmio_read.reg_read_to =
4138 hints->mmio_read_timeout * 1000;
4140 if (hints->missed_tx_completion_count_threshold_to_reset)
4141 adapter->missing_tx_completion_threshold =
4142 hints->missed_tx_completion_count_threshold_to_reset;
4144 if (hints->missing_tx_completion_timeout) {
4145 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
4146 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
4148 adapter->missing_tx_completion_to =
4149 msecs_to_jiffies(hints->missing_tx_completion_timeout);
4152 if (hints->netdev_wd_timeout)
4153 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
4155 if (hints->driver_watchdog_timeout) {
4156 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
4157 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
4159 adapter->keep_alive_timeout =
4160 msecs_to_jiffies(hints->driver_watchdog_timeout);
4164 static void ena_update_host_info(struct ena_admin_host_info *host_info,
4165 struct net_device *netdev)
4167 host_info->supported_network_features[0] =
4168 netdev->features & GENMASK_ULL(31, 0);
4169 host_info->supported_network_features[1] =
4170 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
4173 static void ena_timer_service(struct timer_list *t)
4175 struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
4176 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
4177 struct ena_admin_host_info *host_info =
4178 adapter->ena_dev->host_attr.host_info;
4180 check_for_missing_keep_alive(adapter);
4182 check_for_admin_com_state(adapter);
4184 check_for_missing_completions(adapter);
4186 check_for_empty_rx_ring(adapter);
4189 ena_dump_stats_to_buf(adapter, debug_area);
4192 ena_update_host_info(host_info, adapter->netdev);
4194 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4195 netif_err(adapter, drv, adapter->netdev,
4196 "Trigger reset is on\n");
4197 ena_dump_stats_to_dmesg(adapter);
4198 queue_work(ena_wq, &adapter->reset_task);
4202 /* Reset the timer */
4203 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4206 static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
4207 struct ena_com_dev *ena_dev,
4208 struct ena_com_dev_get_features_ctx *get_feat_ctx)
4210 u32 io_tx_sq_num, io_tx_cq_num, io_rx_num, max_num_io_queues;
4212 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
4213 struct ena_admin_queue_ext_feature_fields *max_queue_ext =
4214 &get_feat_ctx->max_queue_ext.max_queue_ext;
4215 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num,
4216 max_queue_ext->max_rx_cq_num);
4218 io_tx_sq_num = max_queue_ext->max_tx_sq_num;
4219 io_tx_cq_num = max_queue_ext->max_tx_cq_num;
4221 struct ena_admin_queue_feature_desc *max_queues =
4222 &get_feat_ctx->max_queues;
4223 io_tx_sq_num = max_queues->max_sq_num;
4224 io_tx_cq_num = max_queues->max_cq_num;
4225 io_rx_num = min_t(u32, io_tx_sq_num, io_tx_cq_num);
4228 /* In case of LLQ use the llq fields for the tx SQ/CQ */
4229 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4230 io_tx_sq_num = get_feat_ctx->llq.max_llq_num;
4232 max_num_io_queues = min_t(u32, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
4233 max_num_io_queues = min_t(u32, max_num_io_queues, io_rx_num);
4234 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_sq_num);
4235 max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
4236 /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
4237 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
4239 return max_num_io_queues;
4242 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
4243 struct net_device *netdev)
4245 netdev_features_t dev_features = 0;
4247 /* Set offload features */
4248 if (feat->offload.tx &
4249 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
4250 dev_features |= NETIF_F_IP_CSUM;
4252 if (feat->offload.tx &
4253 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
4254 dev_features |= NETIF_F_IPV6_CSUM;
4256 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
4257 dev_features |= NETIF_F_TSO;
4259 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
4260 dev_features |= NETIF_F_TSO6;
4262 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
4263 dev_features |= NETIF_F_TSO_ECN;
4265 if (feat->offload.rx_supported &
4266 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
4267 dev_features |= NETIF_F_RXCSUM;
4269 if (feat->offload.rx_supported &
4270 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
4271 dev_features |= NETIF_F_RXCSUM;
4279 netdev->hw_features |= netdev->features;
4280 netdev->vlan_features |= netdev->features;
4283 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
4284 struct ena_com_dev_get_features_ctx *feat)
4286 struct net_device *netdev = adapter->netdev;
4288 /* Copy mac address */
4289 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
4290 eth_hw_addr_random(netdev);
4291 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
4293 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
4294 eth_hw_addr_set(netdev, adapter->mac_addr);
4297 /* Set offload features */
4298 ena_set_dev_offloads(feat, netdev);
4300 adapter->max_mtu = feat->dev_attr.max_mtu;
4301 netdev->max_mtu = adapter->max_mtu;
4302 netdev->min_mtu = ENA_MIN_MTU;
4305 static int ena_rss_init_default(struct ena_adapter *adapter)
4307 struct ena_com_dev *ena_dev = adapter->ena_dev;
4308 struct device *dev = &adapter->pdev->dev;
4312 rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
4314 dev_err(dev, "Cannot init indirect table\n");
4318 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
4319 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues);
4320 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
4321 ENA_IO_RXQ_IDX(val));
4323 dev_err(dev, "Cannot fill indirect table\n");
4324 goto err_fill_indir;
4328 rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
4329 ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
4330 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4331 dev_err(dev, "Cannot fill hash function\n");
4332 goto err_fill_indir;
4335 rc = ena_com_set_default_hash_ctrl(ena_dev);
4336 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
4337 dev_err(dev, "Cannot fill hash control\n");
4338 goto err_fill_indir;
4344 ena_com_rss_destroy(ena_dev);
4350 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
4352 int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4354 pci_release_selected_regions(pdev, release_bars);
4357 /* ena_probe - Device Initialization Routine
4358 * @pdev: PCI device information struct
4359 * @ent: entry in ena_pci_tbl
4361 * Returns 0 on success, negative on failure
4363 * ena_probe initializes an adapter identified by a pci_dev structure.
4364 * The OS initialization, configuring of the adapter private structure,
4365 * and a hardware reset occur.
4367 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4369 struct ena_com_dev_get_features_ctx get_feat_ctx;
4370 struct ena_com_dev *ena_dev = NULL;
4371 struct ena_adapter *adapter;
4372 struct net_device *netdev;
4373 static int adapters_found;
4374 u32 max_num_io_queues;
4378 dev_dbg(&pdev->dev, "%s\n", __func__);
4380 rc = pci_enable_device_mem(pdev);
4382 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
4386 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS));
4388 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc);
4389 goto err_disable_device;
4392 pci_set_master(pdev);
4394 ena_dev = vzalloc(sizeof(*ena_dev));
4397 goto err_disable_device;
4400 bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
4401 rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
4403 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
4405 goto err_free_ena_dev;
4408 ena_dev->reg_bar = devm_ioremap(&pdev->dev,
4409 pci_resource_start(pdev, ENA_REG_BAR),
4410 pci_resource_len(pdev, ENA_REG_BAR));
4411 if (!ena_dev->reg_bar) {
4412 dev_err(&pdev->dev, "Failed to remap regs bar\n");
4414 goto err_free_region;
4417 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
4419 ena_dev->dmadev = &pdev->dev;
4421 netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), ENA_MAX_RINGS);
4423 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
4425 goto err_free_region;
4428 SET_NETDEV_DEV(netdev, &pdev->dev);
4429 adapter = netdev_priv(netdev);
4430 adapter->ena_dev = ena_dev;
4431 adapter->netdev = netdev;
4432 adapter->pdev = pdev;
4433 adapter->msg_enable = DEFAULT_MSG_ENABLE;
4435 ena_dev->net_device = netdev;
4437 pci_set_drvdata(pdev, adapter);
4439 rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
4441 dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n");
4442 goto err_netdev_destroy;
4445 rc = ena_device_init(adapter, pdev, &get_feat_ctx, &wd_state);
4447 dev_err(&pdev->dev, "ENA device init failed\n");
4450 goto err_netdev_destroy;
4453 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity.
4454 * Updated during device initialization with the real granularity
4456 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
4457 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
4458 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
4459 max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev, &get_feat_ctx);
4460 if (unlikely(!max_num_io_queues)) {
4462 goto err_device_destroy;
4465 ena_set_conf_feat_params(adapter, &get_feat_ctx);
4467 adapter->reset_reason = ENA_REGS_RESET_NORMAL;
4469 adapter->num_io_queues = max_num_io_queues;
4470 adapter->max_num_io_queues = max_num_io_queues;
4471 adapter->last_monitored_tx_qid = 0;
4473 adapter->xdp_first_ring = 0;
4474 adapter->xdp_num_queues = 0;
4476 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
4477 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
4478 adapter->disable_meta_caching =
4479 !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
4480 BIT(ENA_ADMIN_DISABLE_META_CACHING));
4482 adapter->wd_state = wd_state;
4484 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
4486 rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
4489 "Failed to query interrupt moderation feature\n");
4490 goto err_device_destroy;
4493 ena_init_io_rings(adapter,
4495 adapter->xdp_num_queues +
4496 adapter->num_io_queues);
4498 netdev->netdev_ops = &ena_netdev_ops;
4499 netdev->watchdog_timeo = TX_TIMEOUT;
4500 ena_set_ethtool_ops(netdev);
4502 netdev->priv_flags |= IFF_UNICAST_FLT;
4504 u64_stats_init(&adapter->syncp);
4506 rc = ena_enable_msix_and_set_admin_interrupts(adapter);
4509 "Failed to enable and set the admin interrupts\n");
4510 goto err_worker_destroy;
4512 rc = ena_rss_init_default(adapter);
4513 if (rc && (rc != -EOPNOTSUPP)) {
4514 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
4518 ena_config_debug_area(adapter);
4520 if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
4521 netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
4522 NETDEV_XDP_ACT_REDIRECT;
4524 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
4526 netif_carrier_off(netdev);
4528 rc = register_netdev(netdev);
4530 dev_err(&pdev->dev, "Cannot register net device\n");
4534 INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
4536 adapter->last_keep_alive_jiffies = jiffies;
4537 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
4538 adapter->missing_tx_completion_to = TX_TIMEOUT;
4539 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
4541 ena_update_hints(adapter, &get_feat_ctx.hw_hints);
4543 timer_setup(&adapter->timer_service, ena_timer_service, 0);
4544 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
4546 dev_info(&pdev->dev,
4547 "%s found at mem %lx, mac addr %pM\n",
4548 DEVICE_NAME, (long)pci_resource_start(pdev, 0),
4551 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
4558 ena_com_delete_debug_area(ena_dev);
4559 ena_com_rss_destroy(ena_dev);
4561 ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
4562 /* stop submitting admin commands on a device that was reset */
4563 ena_com_set_admin_running_state(ena_dev, false);
4564 ena_free_mgmnt_irq(adapter);
4565 ena_disable_msix(adapter);
4567 del_timer(&adapter->timer_service);
4569 ena_com_delete_host_info(ena_dev);
4570 ena_com_admin_destroy(ena_dev);
4572 free_netdev(netdev);
4574 ena_release_bars(ena_dev, pdev);
4578 pci_disable_device(pdev);
4582 /*****************************************************************************/
4584 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4585 * @pdev: PCI device information struct
4586 * @shutdown: Is it a shutdown operation? If false, means it is a removal
4588 * __ena_shutoff is a helper routine that does the real work on shutdown and
4589 * removal paths; the difference between those paths is with regards to whether
4590 * dettach or unregister the netdevice.
4592 static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
4594 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4595 struct ena_com_dev *ena_dev;
4596 struct net_device *netdev;
4598 ena_dev = adapter->ena_dev;
4599 netdev = adapter->netdev;
4601 #ifdef CONFIG_RFS_ACCEL
4602 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
4603 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
4604 netdev->rx_cpu_rmap = NULL;
4606 #endif /* CONFIG_RFS_ACCEL */
4608 /* Make sure timer and reset routine won't be called after
4609 * freeing device resources.
4611 del_timer_sync(&adapter->timer_service);
4612 cancel_work_sync(&adapter->reset_task);
4614 rtnl_lock(); /* lock released inside the below if-else block */
4615 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN;
4616 ena_destroy_device(adapter, true);
4619 netif_device_detach(netdev);
4624 unregister_netdev(netdev);
4625 free_netdev(netdev);
4628 ena_com_rss_destroy(ena_dev);
4630 ena_com_delete_debug_area(ena_dev);
4632 ena_com_delete_host_info(ena_dev);
4634 ena_release_bars(ena_dev, pdev);
4636 pci_disable_device(pdev);
4641 /* ena_remove - Device Removal Routine
4642 * @pdev: PCI device information struct
4644 * ena_remove is called by the PCI subsystem to alert the driver
4645 * that it should release a PCI device.
4648 static void ena_remove(struct pci_dev *pdev)
4650 __ena_shutoff(pdev, false);
4653 /* ena_shutdown - Device Shutdown Routine
4654 * @pdev: PCI device information struct
4656 * ena_shutdown is called by the PCI subsystem to alert the driver that
4657 * a shutdown/reboot (or kexec) is happening and device must be disabled.
4660 static void ena_shutdown(struct pci_dev *pdev)
4662 __ena_shutoff(pdev, true);
4665 /* ena_suspend - PM suspend callback
4666 * @dev_d: Device information struct
4668 static int __maybe_unused ena_suspend(struct device *dev_d)
4670 struct pci_dev *pdev = to_pci_dev(dev_d);
4671 struct ena_adapter *adapter = pci_get_drvdata(pdev);
4673 ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp);
4676 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
4678 "Ignoring device reset request as the device is being suspended\n");
4679 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
4681 ena_destroy_device(adapter, true);
4686 /* ena_resume - PM resume callback
4687 * @dev_d: Device information struct
4689 static int __maybe_unused ena_resume(struct device *dev_d)
4691 struct ena_adapter *adapter = dev_get_drvdata(dev_d);
4694 ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp);
4697 rc = ena_restore_device(adapter);
4702 static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
4704 static struct pci_driver ena_pci_driver = {
4705 .name = DRV_MODULE_NAME,
4706 .id_table = ena_pci_tbl,
4708 .remove = ena_remove,
4709 .shutdown = ena_shutdown,
4710 .driver.pm = &ena_pm_ops,
4711 .sriov_configure = pci_sriov_configure_simple,
4714 static int __init ena_init(void)
4718 ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
4720 pr_err("Failed to create workqueue\n");
4724 ret = pci_register_driver(&ena_pci_driver);
4726 destroy_workqueue(ena_wq);
4731 static void __exit ena_cleanup(void)
4733 pci_unregister_driver(&ena_pci_driver);
4736 destroy_workqueue(ena_wq);
4741 /******************************************************************************
4742 ******************************** AENQ Handlers *******************************
4743 *****************************************************************************/
4744 /* ena_update_on_link_change:
4745 * Notify the network interface about the change in link status
4747 static void ena_update_on_link_change(void *adapter_data,
4748 struct ena_admin_aenq_entry *aenq_e)
4750 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4751 struct ena_admin_aenq_link_change_desc *aenq_desc =
4752 (struct ena_admin_aenq_link_change_desc *)aenq_e;
4753 int status = aenq_desc->flags &
4754 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
4757 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
4758 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4759 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
4760 netif_carrier_on(adapter->netdev);
4762 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
4763 netif_carrier_off(adapter->netdev);
4767 static void ena_keep_alive_wd(void *adapter_data,
4768 struct ena_admin_aenq_entry *aenq_e)
4770 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4771 struct ena_admin_aenq_keep_alive_desc *desc;
4775 desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
4776 adapter->last_keep_alive_jiffies = jiffies;
4778 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
4779 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low;
4781 u64_stats_update_begin(&adapter->syncp);
4782 /* These stats are accumulated by the device, so the counters indicate
4783 * all drops since last reset.
4785 adapter->dev_stats.rx_drops = rx_drops;
4786 adapter->dev_stats.tx_drops = tx_drops;
4787 u64_stats_update_end(&adapter->syncp);
4790 static void ena_notification(void *adapter_data,
4791 struct ena_admin_aenq_entry *aenq_e)
4793 struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
4794 struct ena_admin_ena_hw_hints *hints;
4796 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
4797 "Invalid group(%x) expected %x\n",
4798 aenq_e->aenq_common_desc.group,
4799 ENA_ADMIN_NOTIFICATION);
4801 switch (aenq_e->aenq_common_desc.syndrome) {
4802 case ENA_ADMIN_UPDATE_HINTS:
4803 hints = (struct ena_admin_ena_hw_hints *)
4804 (&aenq_e->inline_data_w4);
4805 ena_update_hints(adapter, hints);
4808 netif_err(adapter, drv, adapter->netdev,
4809 "Invalid aenq notification link state %d\n",
4810 aenq_e->aenq_common_desc.syndrome);
4814 /* This handler will called for unknown event group or unimplemented handlers*/
4815 static void unimplemented_aenq_handler(void *data,
4816 struct ena_admin_aenq_entry *aenq_e)
4818 struct ena_adapter *adapter = (struct ena_adapter *)data;
4820 netif_err(adapter, drv, adapter->netdev,
4821 "Unknown event was received or event with unimplemented handler\n");
4824 static struct ena_aenq_handlers aenq_handlers = {
4826 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
4827 [ENA_ADMIN_NOTIFICATION] = ena_notification,
4828 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
4830 .unimplemented_handler = unimplemented_aenq_handler
4833 module_init(ena_init);
4834 module_exit(ena_cleanup);