Merge tag 'mips_fixes_4.21_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips...
[linux-2.6-microblaze.git] / drivers / net / ethernet / amazon / ena / ena_netdev.c
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #ifdef CONFIG_RFS_ACCEL
36 #include <linux/cpu_rmap.h>
37 #endif /* CONFIG_RFS_ACCEL */
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/numa.h>
43 #include <linux/pci.h>
44 #include <linux/utsname.h>
45 #include <linux/version.h>
46 #include <linux/vmalloc.h>
47 #include <net/ip.h>
48
49 #include "ena_netdev.h"
50 #include "ena_pci_id_tbl.h"
51
52 static char version[] = DEVICE_NAME " v" DRV_MODULE_VERSION "\n";
53
54 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
55 MODULE_DESCRIPTION(DEVICE_NAME);
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(DRV_MODULE_VERSION);
58
59 /* Time in jiffies before concluding the transmitter is hung. */
60 #define TX_TIMEOUT  (5 * HZ)
61
62 #define ENA_NAPI_BUDGET 64
63
64 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
65                 NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
66 static int debug = -1;
67 module_param(debug, int, 0);
68 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69
70 static struct ena_aenq_handlers aenq_handlers;
71
72 static struct workqueue_struct *ena_wq;
73
74 MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
75
76 static int ena_rss_init_default(struct ena_adapter *adapter);
77 static void check_for_admin_com_state(struct ena_adapter *adapter);
78 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
79 static int ena_restore_device(struct ena_adapter *adapter);
80
81 static void ena_tx_timeout(struct net_device *dev)
82 {
83         struct ena_adapter *adapter = netdev_priv(dev);
84
85         /* Change the state of the device to trigger reset
86          * Check that we are not in the middle or a trigger already
87          */
88
89         if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
90                 return;
91
92         adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
93         u64_stats_update_begin(&adapter->syncp);
94         adapter->dev_stats.tx_timeout++;
95         u64_stats_update_end(&adapter->syncp);
96
97         netif_err(adapter, tx_err, dev, "Transmit time out\n");
98 }
99
100 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
101 {
102         int i;
103
104         for (i = 0; i < adapter->num_queues; i++)
105                 adapter->rx_ring[i].mtu = mtu;
106 }
107
108 static int ena_change_mtu(struct net_device *dev, int new_mtu)
109 {
110         struct ena_adapter *adapter = netdev_priv(dev);
111         int ret;
112
113         ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
114         if (!ret) {
115                 netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
116                 update_rx_ring_mtu(adapter, new_mtu);
117                 dev->mtu = new_mtu;
118         } else {
119                 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
120                           new_mtu);
121         }
122
123         return ret;
124 }
125
126 static int ena_init_rx_cpu_rmap(struct ena_adapter *adapter)
127 {
128 #ifdef CONFIG_RFS_ACCEL
129         u32 i;
130         int rc;
131
132         adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_queues);
133         if (!adapter->netdev->rx_cpu_rmap)
134                 return -ENOMEM;
135         for (i = 0; i < adapter->num_queues; i++) {
136                 int irq_idx = ENA_IO_IRQ_IDX(i);
137
138                 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap,
139                                       pci_irq_vector(adapter->pdev, irq_idx));
140                 if (rc) {
141                         free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
142                         adapter->netdev->rx_cpu_rmap = NULL;
143                         return rc;
144                 }
145         }
146 #endif /* CONFIG_RFS_ACCEL */
147         return 0;
148 }
149
150 static void ena_init_io_rings_common(struct ena_adapter *adapter,
151                                      struct ena_ring *ring, u16 qid)
152 {
153         ring->qid = qid;
154         ring->pdev = adapter->pdev;
155         ring->dev = &adapter->pdev->dev;
156         ring->netdev = adapter->netdev;
157         ring->napi = &adapter->ena_napi[qid].napi;
158         ring->adapter = adapter;
159         ring->ena_dev = adapter->ena_dev;
160         ring->per_napi_packets = 0;
161         ring->per_napi_bytes = 0;
162         ring->cpu = 0;
163         ring->first_interrupt = false;
164         ring->no_interrupt_event_cnt = 0;
165         u64_stats_init(&ring->syncp);
166 }
167
168 static void ena_init_io_rings(struct ena_adapter *adapter)
169 {
170         struct ena_com_dev *ena_dev;
171         struct ena_ring *txr, *rxr;
172         int i;
173
174         ena_dev = adapter->ena_dev;
175
176         for (i = 0; i < adapter->num_queues; i++) {
177                 txr = &adapter->tx_ring[i];
178                 rxr = &adapter->rx_ring[i];
179
180                 /* TX/RX common ring state */
181                 ena_init_io_rings_common(adapter, txr, i);
182                 ena_init_io_rings_common(adapter, rxr, i);
183
184                 /* TX specific ring state */
185                 txr->ring_size = adapter->tx_ring_size;
186                 txr->tx_max_header_size = ena_dev->tx_max_header_size;
187                 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type;
188                 txr->sgl_size = adapter->max_tx_sgl_size;
189                 txr->smoothed_interval =
190                         ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
191
192                 /* RX specific ring state */
193                 rxr->ring_size = adapter->rx_ring_size;
194                 rxr->rx_copybreak = adapter->rx_copybreak;
195                 rxr->sgl_size = adapter->max_rx_sgl_size;
196                 rxr->smoothed_interval =
197                         ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
198                 rxr->empty_rx_queue = 0;
199         }
200 }
201
202 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
203  * @adapter: network interface device structure
204  * @qid: queue index
205  *
206  * Return 0 on success, negative on failure
207  */
208 static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
209 {
210         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
211         struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
212         int size, i, node;
213
214         if (tx_ring->tx_buffer_info) {
215                 netif_err(adapter, ifup,
216                           adapter->netdev, "tx_buffer_info info is not NULL");
217                 return -EEXIST;
218         }
219
220         size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size;
221         node = cpu_to_node(ena_irq->cpu);
222
223         tx_ring->tx_buffer_info = vzalloc_node(size, node);
224         if (!tx_ring->tx_buffer_info) {
225                 tx_ring->tx_buffer_info = vzalloc(size);
226                 if (!tx_ring->tx_buffer_info)
227                         return -ENOMEM;
228         }
229
230         size = sizeof(u16) * tx_ring->ring_size;
231         tx_ring->free_tx_ids = vzalloc_node(size, node);
232         if (!tx_ring->free_tx_ids) {
233                 tx_ring->free_tx_ids = vzalloc(size);
234                 if (!tx_ring->free_tx_ids) {
235                         vfree(tx_ring->tx_buffer_info);
236                         return -ENOMEM;
237                 }
238         }
239
240         size = tx_ring->tx_max_header_size;
241         tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
242         if (!tx_ring->push_buf_intermediate_buf) {
243                 tx_ring->push_buf_intermediate_buf = vzalloc(size);
244                 if (!tx_ring->push_buf_intermediate_buf) {
245                         vfree(tx_ring->tx_buffer_info);
246                         vfree(tx_ring->free_tx_ids);
247                         return -ENOMEM;
248                 }
249         }
250
251         /* Req id ring for TX out of order completions */
252         for (i = 0; i < tx_ring->ring_size; i++)
253                 tx_ring->free_tx_ids[i] = i;
254
255         /* Reset tx statistics */
256         memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats));
257
258         tx_ring->next_to_use = 0;
259         tx_ring->next_to_clean = 0;
260         tx_ring->cpu = ena_irq->cpu;
261         return 0;
262 }
263
264 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
265  * @adapter: network interface device structure
266  * @qid: queue index
267  *
268  * Free all transmit software resources
269  */
270 static void ena_free_tx_resources(struct ena_adapter *adapter, int qid)
271 {
272         struct ena_ring *tx_ring = &adapter->tx_ring[qid];
273
274         vfree(tx_ring->tx_buffer_info);
275         tx_ring->tx_buffer_info = NULL;
276
277         vfree(tx_ring->free_tx_ids);
278         tx_ring->free_tx_ids = NULL;
279
280         vfree(tx_ring->push_buf_intermediate_buf);
281         tx_ring->push_buf_intermediate_buf = NULL;
282 }
283
284 /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues
285  * @adapter: private structure
286  *
287  * Return 0 on success, negative on failure
288  */
289 static int ena_setup_all_tx_resources(struct ena_adapter *adapter)
290 {
291         int i, rc = 0;
292
293         for (i = 0; i < adapter->num_queues; i++) {
294                 rc = ena_setup_tx_resources(adapter, i);
295                 if (rc)
296                         goto err_setup_tx;
297         }
298
299         return 0;
300
301 err_setup_tx:
302
303         netif_err(adapter, ifup, adapter->netdev,
304                   "Tx queue %d: allocation failed\n", i);
305
306         /* rewind the index freeing the rings as we go */
307         while (i--)
308                 ena_free_tx_resources(adapter, i);
309         return rc;
310 }
311
312 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
313  * @adapter: board private structure
314  *
315  * Free all transmit software resources
316  */
317 static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
318 {
319         int i;
320
321         for (i = 0; i < adapter->num_queues; i++)
322                 ena_free_tx_resources(adapter, i);
323 }
324
325 static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
326 {
327         if (likely(req_id < rx_ring->ring_size))
328                 return 0;
329
330         netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
331                   "Invalid rx req_id: %hu\n", req_id);
332
333         u64_stats_update_begin(&rx_ring->syncp);
334         rx_ring->rx_stats.bad_req_id++;
335         u64_stats_update_end(&rx_ring->syncp);
336
337         /* Trigger device reset */
338         rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
339         set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
340         return -EFAULT;
341 }
342
343 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
344  * @adapter: network interface device structure
345  * @qid: queue index
346  *
347  * Returns 0 on success, negative on failure
348  */
349 static int ena_setup_rx_resources(struct ena_adapter *adapter,
350                                   u32 qid)
351 {
352         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
353         struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
354         int size, node, i;
355
356         if (rx_ring->rx_buffer_info) {
357                 netif_err(adapter, ifup, adapter->netdev,
358                           "rx_buffer_info is not NULL");
359                 return -EEXIST;
360         }
361
362         /* alloc extra element so in rx path
363          * we can always prefetch rx_info + 1
364          */
365         size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1);
366         node = cpu_to_node(ena_irq->cpu);
367
368         rx_ring->rx_buffer_info = vzalloc_node(size, node);
369         if (!rx_ring->rx_buffer_info) {
370                 rx_ring->rx_buffer_info = vzalloc(size);
371                 if (!rx_ring->rx_buffer_info)
372                         return -ENOMEM;
373         }
374
375         size = sizeof(u16) * rx_ring->ring_size;
376         rx_ring->free_rx_ids = vzalloc_node(size, node);
377         if (!rx_ring->free_rx_ids) {
378                 rx_ring->free_rx_ids = vzalloc(size);
379                 if (!rx_ring->free_rx_ids) {
380                         vfree(rx_ring->rx_buffer_info);
381                         return -ENOMEM;
382                 }
383         }
384
385         /* Req id ring for receiving RX pkts out of order */
386         for (i = 0; i < rx_ring->ring_size; i++)
387                 rx_ring->free_rx_ids[i] = i;
388
389         /* Reset rx statistics */
390         memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
391
392         rx_ring->next_to_clean = 0;
393         rx_ring->next_to_use = 0;
394         rx_ring->cpu = ena_irq->cpu;
395
396         return 0;
397 }
398
399 /* ena_free_rx_resources - Free I/O Rx Resources
400  * @adapter: network interface device structure
401  * @qid: queue index
402  *
403  * Free all receive software resources
404  */
405 static void ena_free_rx_resources(struct ena_adapter *adapter,
406                                   u32 qid)
407 {
408         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
409
410         vfree(rx_ring->rx_buffer_info);
411         rx_ring->rx_buffer_info = NULL;
412
413         vfree(rx_ring->free_rx_ids);
414         rx_ring->free_rx_ids = NULL;
415 }
416
417 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
418  * @adapter: board private structure
419  *
420  * Return 0 on success, negative on failure
421  */
422 static int ena_setup_all_rx_resources(struct ena_adapter *adapter)
423 {
424         int i, rc = 0;
425
426         for (i = 0; i < adapter->num_queues; i++) {
427                 rc = ena_setup_rx_resources(adapter, i);
428                 if (rc)
429                         goto err_setup_rx;
430         }
431
432         return 0;
433
434 err_setup_rx:
435
436         netif_err(adapter, ifup, adapter->netdev,
437                   "Rx queue %d: allocation failed\n", i);
438
439         /* rewind the index freeing the rings as we go */
440         while (i--)
441                 ena_free_rx_resources(adapter, i);
442         return rc;
443 }
444
445 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
446  * @adapter: board private structure
447  *
448  * Free all receive software resources
449  */
450 static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
451 {
452         int i;
453
454         for (i = 0; i < adapter->num_queues; i++)
455                 ena_free_rx_resources(adapter, i);
456 }
457
458 static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
459                                     struct ena_rx_buffer *rx_info, gfp_t gfp)
460 {
461         struct ena_com_buf *ena_buf;
462         struct page *page;
463         dma_addr_t dma;
464
465         /* if previous allocated page is not used */
466         if (unlikely(rx_info->page))
467                 return 0;
468
469         page = alloc_page(gfp);
470         if (unlikely(!page)) {
471                 u64_stats_update_begin(&rx_ring->syncp);
472                 rx_ring->rx_stats.page_alloc_fail++;
473                 u64_stats_update_end(&rx_ring->syncp);
474                 return -ENOMEM;
475         }
476
477         dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
478                            DMA_FROM_DEVICE);
479         if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
480                 u64_stats_update_begin(&rx_ring->syncp);
481                 rx_ring->rx_stats.dma_mapping_err++;
482                 u64_stats_update_end(&rx_ring->syncp);
483
484                 __free_page(page);
485                 return -EIO;
486         }
487         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
488                   "alloc page %p, rx_info %p\n", page, rx_info);
489
490         rx_info->page = page;
491         rx_info->page_offset = 0;
492         ena_buf = &rx_info->ena_buf;
493         ena_buf->paddr = dma;
494         ena_buf->len = ENA_PAGE_SIZE;
495
496         return 0;
497 }
498
499 static void ena_free_rx_page(struct ena_ring *rx_ring,
500                              struct ena_rx_buffer *rx_info)
501 {
502         struct page *page = rx_info->page;
503         struct ena_com_buf *ena_buf = &rx_info->ena_buf;
504
505         if (unlikely(!page)) {
506                 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
507                            "Trying to free unallocated buffer\n");
508                 return;
509         }
510
511         dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
512                        DMA_FROM_DEVICE);
513
514         __free_page(page);
515         rx_info->page = NULL;
516 }
517
518 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
519 {
520         u16 next_to_use, req_id;
521         u32 i;
522         int rc;
523
524         next_to_use = rx_ring->next_to_use;
525
526         for (i = 0; i < num; i++) {
527                 struct ena_rx_buffer *rx_info;
528
529                 req_id = rx_ring->free_rx_ids[next_to_use];
530                 rc = validate_rx_req_id(rx_ring, req_id);
531                 if (unlikely(rc < 0))
532                         break;
533
534                 rx_info = &rx_ring->rx_buffer_info[req_id];
535
536
537                 rc = ena_alloc_rx_page(rx_ring, rx_info,
538                                        GFP_ATOMIC | __GFP_COMP);
539                 if (unlikely(rc < 0)) {
540                         netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
541                                    "failed to alloc buffer for rx queue %d\n",
542                                    rx_ring->qid);
543                         break;
544                 }
545                 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
546                                                 &rx_info->ena_buf,
547                                                 req_id);
548                 if (unlikely(rc)) {
549                         netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
550                                    "failed to add buffer for rx queue %d\n",
551                                    rx_ring->qid);
552                         break;
553                 }
554                 next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use,
555                                                    rx_ring->ring_size);
556         }
557
558         if (unlikely(i < num)) {
559                 u64_stats_update_begin(&rx_ring->syncp);
560                 rx_ring->rx_stats.refil_partial++;
561                 u64_stats_update_end(&rx_ring->syncp);
562                 netdev_warn(rx_ring->netdev,
563                             "refilled rx qid %d with only %d buffers (from %d)\n",
564                             rx_ring->qid, i, num);
565         }
566
567         /* ena_com_write_sq_doorbell issues a wmb() */
568         if (likely(i))
569                 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
570
571         rx_ring->next_to_use = next_to_use;
572
573         return i;
574 }
575
576 static void ena_free_rx_bufs(struct ena_adapter *adapter,
577                              u32 qid)
578 {
579         struct ena_ring *rx_ring = &adapter->rx_ring[qid];
580         u32 i;
581
582         for (i = 0; i < rx_ring->ring_size; i++) {
583                 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i];
584
585                 if (rx_info->page)
586                         ena_free_rx_page(rx_ring, rx_info);
587         }
588 }
589
590 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
591  * @adapter: board private structure
592  *
593  */
594 static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
595 {
596         struct ena_ring *rx_ring;
597         int i, rc, bufs_num;
598
599         for (i = 0; i < adapter->num_queues; i++) {
600                 rx_ring = &adapter->rx_ring[i];
601                 bufs_num = rx_ring->ring_size - 1;
602                 rc = ena_refill_rx_bufs(rx_ring, bufs_num);
603
604                 if (unlikely(rc != bufs_num))
605                         netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
606                                    "refilling Queue %d failed. allocated %d buffers from: %d\n",
607                                    i, rc, bufs_num);
608         }
609 }
610
611 static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
612 {
613         int i;
614
615         for (i = 0; i < adapter->num_queues; i++)
616                 ena_free_rx_bufs(adapter, i);
617 }
618
619 static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring,
620                                     struct ena_tx_buffer *tx_info)
621 {
622         struct ena_com_buf *ena_buf;
623         u32 cnt;
624         int i;
625
626         ena_buf = tx_info->bufs;
627         cnt = tx_info->num_of_bufs;
628
629         if (unlikely(!cnt))
630                 return;
631
632         if (tx_info->map_linear_data) {
633                 dma_unmap_single(tx_ring->dev,
634                                  dma_unmap_addr(ena_buf, paddr),
635                                  dma_unmap_len(ena_buf, len),
636                                  DMA_TO_DEVICE);
637                 ena_buf++;
638                 cnt--;
639         }
640
641         /* unmap remaining mapped pages */
642         for (i = 0; i < cnt; i++) {
643                 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr),
644                                dma_unmap_len(ena_buf, len), DMA_TO_DEVICE);
645                 ena_buf++;
646         }
647 }
648
649 /* ena_free_tx_bufs - Free Tx Buffers per Queue
650  * @tx_ring: TX ring for which buffers be freed
651  */
652 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
653 {
654         bool print_once = true;
655         u32 i;
656
657         for (i = 0; i < tx_ring->ring_size; i++) {
658                 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i];
659
660                 if (!tx_info->skb)
661                         continue;
662
663                 if (print_once) {
664                         netdev_notice(tx_ring->netdev,
665                                       "free uncompleted tx skb qid %d idx 0x%x\n",
666                                       tx_ring->qid, i);
667                         print_once = false;
668                 } else {
669                         netdev_dbg(tx_ring->netdev,
670                                    "free uncompleted tx skb qid %d idx 0x%x\n",
671                                    tx_ring->qid, i);
672                 }
673
674                 ena_unmap_tx_skb(tx_ring, tx_info);
675
676                 dev_kfree_skb_any(tx_info->skb);
677         }
678         netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev,
679                                                   tx_ring->qid));
680 }
681
682 static void ena_free_all_tx_bufs(struct ena_adapter *adapter)
683 {
684         struct ena_ring *tx_ring;
685         int i;
686
687         for (i = 0; i < adapter->num_queues; i++) {
688                 tx_ring = &adapter->tx_ring[i];
689                 ena_free_tx_bufs(tx_ring);
690         }
691 }
692
693 static void ena_destroy_all_tx_queues(struct ena_adapter *adapter)
694 {
695         u16 ena_qid;
696         int i;
697
698         for (i = 0; i < adapter->num_queues; i++) {
699                 ena_qid = ENA_IO_TXQ_IDX(i);
700                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
701         }
702 }
703
704 static void ena_destroy_all_rx_queues(struct ena_adapter *adapter)
705 {
706         u16 ena_qid;
707         int i;
708
709         for (i = 0; i < adapter->num_queues; i++) {
710                 ena_qid = ENA_IO_RXQ_IDX(i);
711                 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid);
712         }
713 }
714
715 static void ena_destroy_all_io_queues(struct ena_adapter *adapter)
716 {
717         ena_destroy_all_tx_queues(adapter);
718         ena_destroy_all_rx_queues(adapter);
719 }
720
721 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
722 {
723         struct ena_tx_buffer *tx_info = NULL;
724
725         if (likely(req_id < tx_ring->ring_size)) {
726                 tx_info = &tx_ring->tx_buffer_info[req_id];
727                 if (likely(tx_info->skb))
728                         return 0;
729         }
730
731         if (tx_info)
732                 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
733                           "tx_info doesn't have valid skb\n");
734         else
735                 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev,
736                           "Invalid req_id: %hu\n", req_id);
737
738         u64_stats_update_begin(&tx_ring->syncp);
739         tx_ring->tx_stats.bad_req_id++;
740         u64_stats_update_end(&tx_ring->syncp);
741
742         /* Trigger device reset */
743         tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
744         set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
745         return -EFAULT;
746 }
747
748 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
749 {
750         struct netdev_queue *txq;
751         bool above_thresh;
752         u32 tx_bytes = 0;
753         u32 total_done = 0;
754         u16 next_to_clean;
755         u16 req_id;
756         int tx_pkts = 0;
757         int rc;
758
759         next_to_clean = tx_ring->next_to_clean;
760         txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid);
761
762         while (tx_pkts < budget) {
763                 struct ena_tx_buffer *tx_info;
764                 struct sk_buff *skb;
765
766                 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
767                                                 &req_id);
768                 if (rc)
769                         break;
770
771                 rc = validate_tx_req_id(tx_ring, req_id);
772                 if (rc)
773                         break;
774
775                 tx_info = &tx_ring->tx_buffer_info[req_id];
776                 skb = tx_info->skb;
777
778                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
779                 prefetch(&skb->end);
780
781                 tx_info->skb = NULL;
782                 tx_info->last_jiffies = 0;
783
784                 ena_unmap_tx_skb(tx_ring, tx_info);
785
786                 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
787                           "tx_poll: q %d skb %p completed\n", tx_ring->qid,
788                           skb);
789
790                 tx_bytes += skb->len;
791                 dev_kfree_skb(skb);
792                 tx_pkts++;
793                 total_done += tx_info->tx_descs;
794
795                 tx_ring->free_tx_ids[next_to_clean] = req_id;
796                 next_to_clean = ENA_TX_RING_IDX_NEXT(next_to_clean,
797                                                      tx_ring->ring_size);
798         }
799
800         tx_ring->next_to_clean = next_to_clean;
801         ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
802         ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
803
804         netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
805
806         netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
807                   "tx_poll: q %d done. total pkts: %d\n",
808                   tx_ring->qid, tx_pkts);
809
810         /* need to make the rings circular update visible to
811          * ena_start_xmit() before checking for netif_queue_stopped().
812          */
813         smp_mb();
814
815         above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
816                                                     ENA_TX_WAKEUP_THRESH);
817         if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
818                 __netif_tx_lock(txq, smp_processor_id());
819                 above_thresh =
820                         ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
821                                                      ENA_TX_WAKEUP_THRESH);
822                 if (netif_tx_queue_stopped(txq) && above_thresh) {
823                         netif_tx_wake_queue(txq);
824                         u64_stats_update_begin(&tx_ring->syncp);
825                         tx_ring->tx_stats.queue_wakeup++;
826                         u64_stats_update_end(&tx_ring->syncp);
827                 }
828                 __netif_tx_unlock(txq);
829         }
830
831         tx_ring->per_napi_bytes += tx_bytes;
832         tx_ring->per_napi_packets += tx_pkts;
833
834         return tx_pkts;
835 }
836
837 static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
838 {
839         struct sk_buff *skb;
840
841         if (frags)
842                 skb = napi_get_frags(rx_ring->napi);
843         else
844                 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
845                                                 rx_ring->rx_copybreak);
846
847         if (unlikely(!skb)) {
848                 u64_stats_update_begin(&rx_ring->syncp);
849                 rx_ring->rx_stats.skb_alloc_fail++;
850                 u64_stats_update_end(&rx_ring->syncp);
851                 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
852                           "Failed to allocate skb. frags: %d\n", frags);
853                 return NULL;
854         }
855
856         return skb;
857 }
858
859 static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
860                                   struct ena_com_rx_buf_info *ena_bufs,
861                                   u32 descs,
862                                   u16 *next_to_clean)
863 {
864         struct sk_buff *skb;
865         struct ena_rx_buffer *rx_info;
866         u16 len, req_id, buf = 0;
867         void *va;
868
869         len = ena_bufs[buf].len;
870         req_id = ena_bufs[buf].req_id;
871         rx_info = &rx_ring->rx_buffer_info[req_id];
872
873         if (unlikely(!rx_info->page)) {
874                 netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
875                           "Page is NULL\n");
876                 return NULL;
877         }
878
879         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
880                   "rx_info %p page %p\n",
881                   rx_info, rx_info->page);
882
883         /* save virt address of first buffer */
884         va = page_address(rx_info->page) + rx_info->page_offset;
885         prefetch(va + NET_IP_ALIGN);
886
887         if (len <= rx_ring->rx_copybreak) {
888                 skb = ena_alloc_skb(rx_ring, false);
889                 if (unlikely(!skb))
890                         return NULL;
891
892                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
893                           "rx allocated small packet. len %d. data_len %d\n",
894                           skb->len, skb->data_len);
895
896                 /* sync this buffer for CPU use */
897                 dma_sync_single_for_cpu(rx_ring->dev,
898                                         dma_unmap_addr(&rx_info->ena_buf, paddr),
899                                         len,
900                                         DMA_FROM_DEVICE);
901                 skb_copy_to_linear_data(skb, va, len);
902                 dma_sync_single_for_device(rx_ring->dev,
903                                            dma_unmap_addr(&rx_info->ena_buf, paddr),
904                                            len,
905                                            DMA_FROM_DEVICE);
906
907                 skb_put(skb, len);
908                 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
909                 rx_ring->free_rx_ids[*next_to_clean] = req_id;
910                 *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
911                                                      rx_ring->ring_size);
912                 return skb;
913         }
914
915         skb = ena_alloc_skb(rx_ring, true);
916         if (unlikely(!skb))
917                 return NULL;
918
919         do {
920                 dma_unmap_page(rx_ring->dev,
921                                dma_unmap_addr(&rx_info->ena_buf, paddr),
922                                ENA_PAGE_SIZE, DMA_FROM_DEVICE);
923
924                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
925                                 rx_info->page_offset, len, ENA_PAGE_SIZE);
926
927                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
928                           "rx skb updated. len %d. data_len %d\n",
929                           skb->len, skb->data_len);
930
931                 rx_info->page = NULL;
932
933                 rx_ring->free_rx_ids[*next_to_clean] = req_id;
934                 *next_to_clean =
935                         ENA_RX_RING_IDX_NEXT(*next_to_clean,
936                                              rx_ring->ring_size);
937                 if (likely(--descs == 0))
938                         break;
939
940                 buf++;
941                 len = ena_bufs[buf].len;
942                 req_id = ena_bufs[buf].req_id;
943                 rx_info = &rx_ring->rx_buffer_info[req_id];
944         } while (1);
945
946         return skb;
947 }
948
949 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
950  * @adapter: structure containing adapter specific data
951  * @ena_rx_ctx: received packet context/metadata
952  * @skb: skb currently being received and modified
953  */
954 static inline void ena_rx_checksum(struct ena_ring *rx_ring,
955                                    struct ena_com_rx_ctx *ena_rx_ctx,
956                                    struct sk_buff *skb)
957 {
958         /* Rx csum disabled */
959         if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) {
960                 skb->ip_summed = CHECKSUM_NONE;
961                 return;
962         }
963
964         /* For fragmented packets the checksum isn't valid */
965         if (ena_rx_ctx->frag) {
966                 skb->ip_summed = CHECKSUM_NONE;
967                 return;
968         }
969
970         /* if IP and error */
971         if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) &&
972                      (ena_rx_ctx->l3_csum_err))) {
973                 /* ipv4 checksum error */
974                 skb->ip_summed = CHECKSUM_NONE;
975                 u64_stats_update_begin(&rx_ring->syncp);
976                 rx_ring->rx_stats.bad_csum++;
977                 u64_stats_update_end(&rx_ring->syncp);
978                 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
979                           "RX IPv4 header checksum error\n");
980                 return;
981         }
982
983         /* if TCP/UDP */
984         if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
985                    (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) {
986                 if (unlikely(ena_rx_ctx->l4_csum_err)) {
987                         /* TCP/UDP checksum error */
988                         u64_stats_update_begin(&rx_ring->syncp);
989                         rx_ring->rx_stats.bad_csum++;
990                         u64_stats_update_end(&rx_ring->syncp);
991                         netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
992                                   "RX L4 checksum error\n");
993                         skb->ip_summed = CHECKSUM_NONE;
994                         return;
995                 }
996
997                 if (likely(ena_rx_ctx->l4_csum_checked)) {
998                         skb->ip_summed = CHECKSUM_UNNECESSARY;
999                 } else {
1000                         u64_stats_update_begin(&rx_ring->syncp);
1001                         rx_ring->rx_stats.csum_unchecked++;
1002                         u64_stats_update_end(&rx_ring->syncp);
1003                         skb->ip_summed = CHECKSUM_NONE;
1004                 }
1005         } else {
1006                 skb->ip_summed = CHECKSUM_NONE;
1007                 return;
1008         }
1009
1010 }
1011
1012 static void ena_set_rx_hash(struct ena_ring *rx_ring,
1013                             struct ena_com_rx_ctx *ena_rx_ctx,
1014                             struct sk_buff *skb)
1015 {
1016         enum pkt_hash_types hash_type;
1017
1018         if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) {
1019                 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) ||
1020                            (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP)))
1021
1022                         hash_type = PKT_HASH_TYPE_L4;
1023                 else
1024                         hash_type = PKT_HASH_TYPE_NONE;
1025
1026                 /* Override hash type if the packet is fragmented */
1027                 if (ena_rx_ctx->frag)
1028                         hash_type = PKT_HASH_TYPE_NONE;
1029
1030                 skb_set_hash(skb, ena_rx_ctx->hash, hash_type);
1031         }
1032 }
1033
1034 /* ena_clean_rx_irq - Cleanup RX irq
1035  * @rx_ring: RX ring to clean
1036  * @napi: napi handler
1037  * @budget: how many packets driver is allowed to clean
1038  *
1039  * Returns the number of cleaned buffers.
1040  */
1041 static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
1042                             u32 budget)
1043 {
1044         u16 next_to_clean = rx_ring->next_to_clean;
1045         u32 res_budget, work_done;
1046
1047         struct ena_com_rx_ctx ena_rx_ctx;
1048         struct ena_adapter *adapter;
1049         struct sk_buff *skb;
1050         int refill_required;
1051         int refill_threshold;
1052         int rc = 0;
1053         int total_len = 0;
1054         int rx_copybreak_pkt = 0;
1055         int i;
1056
1057         netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1058                   "%s qid %d\n", __func__, rx_ring->qid);
1059         res_budget = budget;
1060
1061         do {
1062                 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
1063                 ena_rx_ctx.max_bufs = rx_ring->sgl_size;
1064                 ena_rx_ctx.descs = 0;
1065                 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq,
1066                                     rx_ring->ena_com_io_sq,
1067                                     &ena_rx_ctx);
1068                 if (unlikely(rc))
1069                         goto error;
1070
1071                 if (unlikely(ena_rx_ctx.descs == 0))
1072                         break;
1073
1074                 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
1075                           "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
1076                           rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
1077                           ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
1078
1079                 /* allocate skb and fill it */
1080                 skb = ena_rx_skb(rx_ring, rx_ring->ena_bufs, ena_rx_ctx.descs,
1081                                  &next_to_clean);
1082
1083                 /* exit if we failed to retrieve a buffer */
1084                 if (unlikely(!skb)) {
1085                         for (i = 0; i < ena_rx_ctx.descs; i++) {
1086                                 rx_ring->free_tx_ids[next_to_clean] =
1087                                         rx_ring->ena_bufs[i].req_id;
1088                                 next_to_clean =
1089                                         ENA_RX_RING_IDX_NEXT(next_to_clean,
1090                                                              rx_ring->ring_size);
1091                         }
1092                         break;
1093                 }
1094
1095                 ena_rx_checksum(rx_ring, &ena_rx_ctx, skb);
1096
1097                 ena_set_rx_hash(rx_ring, &ena_rx_ctx, skb);
1098
1099                 skb_record_rx_queue(skb, rx_ring->qid);
1100
1101                 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
1102                         total_len += rx_ring->ena_bufs[0].len;
1103                         rx_copybreak_pkt++;
1104                         napi_gro_receive(napi, skb);
1105                 } else {
1106                         total_len += skb->len;
1107                         napi_gro_frags(napi);
1108                 }
1109
1110                 res_budget--;
1111         } while (likely(res_budget));
1112
1113         work_done = budget - res_budget;
1114         rx_ring->per_napi_bytes += total_len;
1115         rx_ring->per_napi_packets += work_done;
1116         u64_stats_update_begin(&rx_ring->syncp);
1117         rx_ring->rx_stats.bytes += total_len;
1118         rx_ring->rx_stats.cnt += work_done;
1119         rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt;
1120         u64_stats_update_end(&rx_ring->syncp);
1121
1122         rx_ring->next_to_clean = next_to_clean;
1123
1124         refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
1125         refill_threshold =
1126                 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER,
1127                       ENA_RX_REFILL_THRESH_PACKET);
1128
1129         /* Optimization, try to batch new rx buffers */
1130         if (refill_required > refill_threshold) {
1131                 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
1132                 ena_refill_rx_bufs(rx_ring, refill_required);
1133         }
1134
1135         return work_done;
1136
1137 error:
1138         adapter = netdev_priv(rx_ring->netdev);
1139
1140         u64_stats_update_begin(&rx_ring->syncp);
1141         rx_ring->rx_stats.bad_desc_num++;
1142         u64_stats_update_end(&rx_ring->syncp);
1143
1144         /* Too many desc from the device. Trigger reset */
1145         adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
1146         set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
1147
1148         return 0;
1149 }
1150
1151 inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring,
1152                                        struct ena_ring *tx_ring)
1153 {
1154         /* We apply adaptive moderation on Rx path only.
1155          * Tx uses static interrupt moderation.
1156          */
1157         ena_com_calculate_interrupt_delay(rx_ring->ena_dev,
1158                                           rx_ring->per_napi_packets,
1159                                           rx_ring->per_napi_bytes,
1160                                           &rx_ring->smoothed_interval,
1161                                           &rx_ring->moder_tbl_idx);
1162
1163         /* Reset per napi packets/bytes */
1164         tx_ring->per_napi_packets = 0;
1165         tx_ring->per_napi_bytes = 0;
1166         rx_ring->per_napi_packets = 0;
1167         rx_ring->per_napi_bytes = 0;
1168 }
1169
1170 static inline void ena_unmask_interrupt(struct ena_ring *tx_ring,
1171                                         struct ena_ring *rx_ring)
1172 {
1173         struct ena_eth_io_intr_reg intr_reg;
1174
1175         /* Update intr register: rx intr delay,
1176          * tx intr delay and interrupt unmask
1177          */
1178         ena_com_update_intr_reg(&intr_reg,
1179                                 rx_ring->smoothed_interval,
1180                                 tx_ring->smoothed_interval,
1181                                 true);
1182
1183         /* It is a shared MSI-X.
1184          * Tx and Rx CQ have pointer to it.
1185          * So we use one of them to reach the intr reg
1186          */
1187         ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
1188 }
1189
1190 static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring,
1191                                              struct ena_ring *rx_ring)
1192 {
1193         int cpu = get_cpu();
1194         int numa_node;
1195
1196         /* Check only one ring since the 2 rings are running on the same cpu */
1197         if (likely(tx_ring->cpu == cpu))
1198                 goto out;
1199
1200         numa_node = cpu_to_node(cpu);
1201         put_cpu();
1202
1203         if (numa_node != NUMA_NO_NODE) {
1204                 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
1205                 ena_com_update_numa_node(rx_ring->ena_com_io_cq, numa_node);
1206         }
1207
1208         tx_ring->cpu = cpu;
1209         rx_ring->cpu = cpu;
1210
1211         return;
1212 out:
1213         put_cpu();
1214 }
1215
1216 static int ena_io_poll(struct napi_struct *napi, int budget)
1217 {
1218         struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
1219         struct ena_ring *tx_ring, *rx_ring;
1220
1221         u32 tx_work_done;
1222         u32 rx_work_done;
1223         int tx_budget;
1224         int napi_comp_call = 0;
1225         int ret;
1226
1227         tx_ring = ena_napi->tx_ring;
1228         rx_ring = ena_napi->rx_ring;
1229
1230         tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
1231
1232         if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1233             test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
1234                 napi_complete_done(napi, 0);
1235                 return 0;
1236         }
1237
1238         tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
1239         rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
1240
1241         /* If the device is about to reset or down, avoid unmask
1242          * the interrupt and return 0 so NAPI won't reschedule
1243          */
1244         if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
1245                      test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
1246                 napi_complete_done(napi, 0);
1247                 ret = 0;
1248
1249         } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
1250                 napi_comp_call = 1;
1251
1252                 /* Update numa and unmask the interrupt only when schedule
1253                  * from the interrupt context (vs from sk_busy_loop)
1254                  */
1255                 if (napi_complete_done(napi, rx_work_done)) {
1256                         /* Tx and Rx share the same interrupt vector */
1257                         if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
1258                                 ena_adjust_intr_moderation(rx_ring, tx_ring);
1259
1260                         ena_unmask_interrupt(tx_ring, rx_ring);
1261                 }
1262
1263                 ena_update_ring_numa_node(tx_ring, rx_ring);
1264
1265                 ret = rx_work_done;
1266         } else {
1267                 ret = budget;
1268         }
1269
1270         u64_stats_update_begin(&tx_ring->syncp);
1271         tx_ring->tx_stats.napi_comp += napi_comp_call;
1272         tx_ring->tx_stats.tx_poll++;
1273         u64_stats_update_end(&tx_ring->syncp);
1274
1275         return ret;
1276 }
1277
1278 static irqreturn_t ena_intr_msix_mgmnt(int irq, void *data)
1279 {
1280         struct ena_adapter *adapter = (struct ena_adapter *)data;
1281
1282         ena_com_admin_q_comp_intr_handler(adapter->ena_dev);
1283
1284         /* Don't call the aenq handler before probe is done */
1285         if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)))
1286                 ena_com_aenq_intr_handler(adapter->ena_dev, data);
1287
1288         return IRQ_HANDLED;
1289 }
1290
1291 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1292  * @irq: interrupt number
1293  * @data: pointer to a network interface private napi device structure
1294  */
1295 static irqreturn_t ena_intr_msix_io(int irq, void *data)
1296 {
1297         struct ena_napi *ena_napi = data;
1298
1299         ena_napi->tx_ring->first_interrupt = true;
1300         ena_napi->rx_ring->first_interrupt = true;
1301
1302         napi_schedule_irqoff(&ena_napi->napi);
1303
1304         return IRQ_HANDLED;
1305 }
1306
1307 /* Reserve a single MSI-X vector for management (admin + aenq).
1308  * plus reserve one vector for each potential io queue.
1309  * the number of potential io queues is the minimum of what the device
1310  * supports and the number of vCPUs.
1311  */
1312 static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
1313 {
1314         int msix_vecs, irq_cnt;
1315
1316         if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1317                 netif_err(adapter, probe, adapter->netdev,
1318                           "Error, MSI-X is already enabled\n");
1319                 return -EPERM;
1320         }
1321
1322         /* Reserved the max msix vectors we might need */
1323         msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
1324         netif_dbg(adapter, probe, adapter->netdev,
1325                   "trying to enable MSI-X, vectors %d\n", msix_vecs);
1326
1327         irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
1328                                         msix_vecs, PCI_IRQ_MSIX);
1329
1330         if (irq_cnt < 0) {
1331                 netif_err(adapter, probe, adapter->netdev,
1332                           "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
1333                 return -ENOSPC;
1334         }
1335
1336         if (irq_cnt != msix_vecs) {
1337                 netif_notice(adapter, probe, adapter->netdev,
1338                              "enable only %d MSI-X (out of %d), reduce the number of queues\n",
1339                              irq_cnt, msix_vecs);
1340                 adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
1341         }
1342
1343         if (ena_init_rx_cpu_rmap(adapter))
1344                 netif_warn(adapter, probe, adapter->netdev,
1345                            "Failed to map IRQs to CPUs\n");
1346
1347         adapter->msix_vecs = irq_cnt;
1348         set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
1349
1350         return 0;
1351 }
1352
1353 static void ena_setup_mgmnt_intr(struct ena_adapter *adapter)
1354 {
1355         u32 cpu;
1356
1357         snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name,
1358                  ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s",
1359                  pci_name(adapter->pdev));
1360         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler =
1361                 ena_intr_msix_mgmnt;
1362         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter;
1363         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector =
1364                 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX);
1365         cpu = cpumask_first(cpu_online_mask);
1366         adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu;
1367         cpumask_set_cpu(cpu,
1368                         &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask);
1369 }
1370
1371 static void ena_setup_io_intr(struct ena_adapter *adapter)
1372 {
1373         struct net_device *netdev;
1374         int irq_idx, i, cpu;
1375
1376         netdev = adapter->netdev;
1377
1378         for (i = 0; i < adapter->num_queues; i++) {
1379                 irq_idx = ENA_IO_IRQ_IDX(i);
1380                 cpu = i % num_online_cpus();
1381
1382                 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE,
1383                          "%s-Tx-Rx-%d", netdev->name, i);
1384                 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io;
1385                 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i];
1386                 adapter->irq_tbl[irq_idx].vector =
1387                         pci_irq_vector(adapter->pdev, irq_idx);
1388                 adapter->irq_tbl[irq_idx].cpu = cpu;
1389
1390                 cpumask_set_cpu(cpu,
1391                                 &adapter->irq_tbl[irq_idx].affinity_hint_mask);
1392         }
1393 }
1394
1395 static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
1396 {
1397         unsigned long flags = 0;
1398         struct ena_irq *irq;
1399         int rc;
1400
1401         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1402         rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1403                          irq->data);
1404         if (rc) {
1405                 netif_err(adapter, probe, adapter->netdev,
1406                           "failed to request admin irq\n");
1407                 return rc;
1408         }
1409
1410         netif_dbg(adapter, probe, adapter->netdev,
1411                   "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
1412                   irq->affinity_hint_mask.bits[0], irq->vector);
1413
1414         irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1415
1416         return rc;
1417 }
1418
1419 static int ena_request_io_irq(struct ena_adapter *adapter)
1420 {
1421         unsigned long flags = 0;
1422         struct ena_irq *irq;
1423         int rc = 0, i, k;
1424
1425         if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
1426                 netif_err(adapter, ifup, adapter->netdev,
1427                           "Failed to request I/O IRQ: MSI-X is not enabled\n");
1428                 return -EINVAL;
1429         }
1430
1431         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1432                 irq = &adapter->irq_tbl[i];
1433                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
1434                                  irq->data);
1435                 if (rc) {
1436                         netif_err(adapter, ifup, adapter->netdev,
1437                                   "Failed to request I/O IRQ. index %d rc %d\n",
1438                                    i, rc);
1439                         goto err;
1440                 }
1441
1442                 netif_dbg(adapter, ifup, adapter->netdev,
1443                           "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
1444                           i, irq->affinity_hint_mask.bits[0], irq->vector);
1445
1446                 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
1447         }
1448
1449         return rc;
1450
1451 err:
1452         for (k = ENA_IO_IRQ_FIRST_IDX; k < i; k++) {
1453                 irq = &adapter->irq_tbl[k];
1454                 free_irq(irq->vector, irq->data);
1455         }
1456
1457         return rc;
1458 }
1459
1460 static void ena_free_mgmnt_irq(struct ena_adapter *adapter)
1461 {
1462         struct ena_irq *irq;
1463
1464         irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX];
1465         synchronize_irq(irq->vector);
1466         irq_set_affinity_hint(irq->vector, NULL);
1467         free_irq(irq->vector, irq->data);
1468 }
1469
1470 static void ena_free_io_irq(struct ena_adapter *adapter)
1471 {
1472         struct ena_irq *irq;
1473         int i;
1474
1475 #ifdef CONFIG_RFS_ACCEL
1476         if (adapter->msix_vecs >= 1) {
1477                 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap);
1478                 adapter->netdev->rx_cpu_rmap = NULL;
1479         }
1480 #endif /* CONFIG_RFS_ACCEL */
1481
1482         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
1483                 irq = &adapter->irq_tbl[i];
1484                 irq_set_affinity_hint(irq->vector, NULL);
1485                 free_irq(irq->vector, irq->data);
1486         }
1487 }
1488
1489 static void ena_disable_msix(struct ena_adapter *adapter)
1490 {
1491         if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
1492                 pci_free_irq_vectors(adapter->pdev);
1493 }
1494
1495 static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
1496 {
1497         int i;
1498
1499         if (!netif_running(adapter->netdev))
1500                 return;
1501
1502         for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++)
1503                 synchronize_irq(adapter->irq_tbl[i].vector);
1504 }
1505
1506 static void ena_del_napi(struct ena_adapter *adapter)
1507 {
1508         int i;
1509
1510         for (i = 0; i < adapter->num_queues; i++)
1511                 netif_napi_del(&adapter->ena_napi[i].napi);
1512 }
1513
1514 static void ena_init_napi(struct ena_adapter *adapter)
1515 {
1516         struct ena_napi *napi;
1517         int i;
1518
1519         for (i = 0; i < adapter->num_queues; i++) {
1520                 napi = &adapter->ena_napi[i];
1521
1522                 netif_napi_add(adapter->netdev,
1523                                &adapter->ena_napi[i].napi,
1524                                ena_io_poll,
1525                                ENA_NAPI_BUDGET);
1526                 napi->rx_ring = &adapter->rx_ring[i];
1527                 napi->tx_ring = &adapter->tx_ring[i];
1528                 napi->qid = i;
1529         }
1530 }
1531
1532 static void ena_napi_disable_all(struct ena_adapter *adapter)
1533 {
1534         int i;
1535
1536         for (i = 0; i < adapter->num_queues; i++)
1537                 napi_disable(&adapter->ena_napi[i].napi);
1538 }
1539
1540 static void ena_napi_enable_all(struct ena_adapter *adapter)
1541 {
1542         int i;
1543
1544         for (i = 0; i < adapter->num_queues; i++)
1545                 napi_enable(&adapter->ena_napi[i].napi);
1546 }
1547
1548 static void ena_restore_ethtool_params(struct ena_adapter *adapter)
1549 {
1550         adapter->tx_usecs = 0;
1551         adapter->rx_usecs = 0;
1552         adapter->tx_frames = 1;
1553         adapter->rx_frames = 1;
1554 }
1555
1556 /* Configure the Rx forwarding */
1557 static int ena_rss_configure(struct ena_adapter *adapter)
1558 {
1559         struct ena_com_dev *ena_dev = adapter->ena_dev;
1560         int rc;
1561
1562         /* In case the RSS table wasn't initialized by probe */
1563         if (!ena_dev->rss.tbl_log_size) {
1564                 rc = ena_rss_init_default(adapter);
1565                 if (rc && (rc != -EOPNOTSUPP)) {
1566                         netif_err(adapter, ifup, adapter->netdev,
1567                                   "Failed to init RSS rc: %d\n", rc);
1568                         return rc;
1569                 }
1570         }
1571
1572         /* Set indirect table */
1573         rc = ena_com_indirect_table_set(ena_dev);
1574         if (unlikely(rc && rc != -EOPNOTSUPP))
1575                 return rc;
1576
1577         /* Configure hash function (if supported) */
1578         rc = ena_com_set_hash_function(ena_dev);
1579         if (unlikely(rc && (rc != -EOPNOTSUPP)))
1580                 return rc;
1581
1582         /* Configure hash inputs (if supported) */
1583         rc = ena_com_set_hash_ctrl(ena_dev);
1584         if (unlikely(rc && (rc != -EOPNOTSUPP)))
1585                 return rc;
1586
1587         return 0;
1588 }
1589
1590 static int ena_up_complete(struct ena_adapter *adapter)
1591 {
1592         int rc;
1593
1594         rc = ena_rss_configure(adapter);
1595         if (rc)
1596                 return rc;
1597
1598         ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
1599
1600         ena_refill_all_rx_bufs(adapter);
1601
1602         /* enable transmits */
1603         netif_tx_start_all_queues(adapter->netdev);
1604
1605         ena_restore_ethtool_params(adapter);
1606
1607         ena_napi_enable_all(adapter);
1608
1609         return 0;
1610 }
1611
1612 static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
1613 {
1614         struct ena_com_create_io_ctx ctx;
1615         struct ena_com_dev *ena_dev;
1616         struct ena_ring *tx_ring;
1617         u32 msix_vector;
1618         u16 ena_qid;
1619         int rc;
1620
1621         ena_dev = adapter->ena_dev;
1622
1623         tx_ring = &adapter->tx_ring[qid];
1624         msix_vector = ENA_IO_IRQ_IDX(qid);
1625         ena_qid = ENA_IO_TXQ_IDX(qid);
1626
1627         memset(&ctx, 0x0, sizeof(ctx));
1628
1629         ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX;
1630         ctx.qid = ena_qid;
1631         ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
1632         ctx.msix_vector = msix_vector;
1633         ctx.queue_size = adapter->tx_ring_size;
1634         ctx.numa_node = cpu_to_node(tx_ring->cpu);
1635
1636         rc = ena_com_create_io_queue(ena_dev, &ctx);
1637         if (rc) {
1638                 netif_err(adapter, ifup, adapter->netdev,
1639                           "Failed to create I/O TX queue num %d rc: %d\n",
1640                           qid, rc);
1641                 return rc;
1642         }
1643
1644         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1645                                      &tx_ring->ena_com_io_sq,
1646                                      &tx_ring->ena_com_io_cq);
1647         if (rc) {
1648                 netif_err(adapter, ifup, adapter->netdev,
1649                           "Failed to get TX queue handlers. TX queue num %d rc: %d\n",
1650                           qid, rc);
1651                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1652                 return rc;
1653         }
1654
1655         ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node);
1656         return rc;
1657 }
1658
1659 static int ena_create_all_io_tx_queues(struct ena_adapter *adapter)
1660 {
1661         struct ena_com_dev *ena_dev = adapter->ena_dev;
1662         int rc, i;
1663
1664         for (i = 0; i < adapter->num_queues; i++) {
1665                 rc = ena_create_io_tx_queue(adapter, i);
1666                 if (rc)
1667                         goto create_err;
1668         }
1669
1670         return 0;
1671
1672 create_err:
1673         while (i--)
1674                 ena_com_destroy_io_queue(ena_dev, ENA_IO_TXQ_IDX(i));
1675
1676         return rc;
1677 }
1678
1679 static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
1680 {
1681         struct ena_com_dev *ena_dev;
1682         struct ena_com_create_io_ctx ctx;
1683         struct ena_ring *rx_ring;
1684         u32 msix_vector;
1685         u16 ena_qid;
1686         int rc;
1687
1688         ena_dev = adapter->ena_dev;
1689
1690         rx_ring = &adapter->rx_ring[qid];
1691         msix_vector = ENA_IO_IRQ_IDX(qid);
1692         ena_qid = ENA_IO_RXQ_IDX(qid);
1693
1694         memset(&ctx, 0x0, sizeof(ctx));
1695
1696         ctx.qid = ena_qid;
1697         ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX;
1698         ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
1699         ctx.msix_vector = msix_vector;
1700         ctx.queue_size = adapter->rx_ring_size;
1701         ctx.numa_node = cpu_to_node(rx_ring->cpu);
1702
1703         rc = ena_com_create_io_queue(ena_dev, &ctx);
1704         if (rc) {
1705                 netif_err(adapter, ifup, adapter->netdev,
1706                           "Failed to create I/O RX queue num %d rc: %d\n",
1707                           qid, rc);
1708                 return rc;
1709         }
1710
1711         rc = ena_com_get_io_handlers(ena_dev, ena_qid,
1712                                      &rx_ring->ena_com_io_sq,
1713                                      &rx_ring->ena_com_io_cq);
1714         if (rc) {
1715                 netif_err(adapter, ifup, adapter->netdev,
1716                           "Failed to get RX queue handlers. RX queue num %d rc: %d\n",
1717                           qid, rc);
1718                 ena_com_destroy_io_queue(ena_dev, ena_qid);
1719                 return rc;
1720         }
1721
1722         ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node);
1723
1724         return rc;
1725 }
1726
1727 static int ena_create_all_io_rx_queues(struct ena_adapter *adapter)
1728 {
1729         struct ena_com_dev *ena_dev = adapter->ena_dev;
1730         int rc, i;
1731
1732         for (i = 0; i < adapter->num_queues; i++) {
1733                 rc = ena_create_io_rx_queue(adapter, i);
1734                 if (rc)
1735                         goto create_err;
1736         }
1737
1738         return 0;
1739
1740 create_err:
1741         while (i--)
1742                 ena_com_destroy_io_queue(ena_dev, ENA_IO_RXQ_IDX(i));
1743
1744         return rc;
1745 }
1746
1747 static int ena_up(struct ena_adapter *adapter)
1748 {
1749         int rc, i;
1750
1751         netdev_dbg(adapter->netdev, "%s\n", __func__);
1752
1753         ena_setup_io_intr(adapter);
1754
1755         /* napi poll functions should be initialized before running
1756          * request_irq(), to handle a rare condition where there is a pending
1757          * interrupt, causing the ISR to fire immediately while the poll
1758          * function wasn't set yet, causing a null dereference
1759          */
1760         ena_init_napi(adapter);
1761
1762         rc = ena_request_io_irq(adapter);
1763         if (rc)
1764                 goto err_req_irq;
1765
1766         /* allocate transmit descriptors */
1767         rc = ena_setup_all_tx_resources(adapter);
1768         if (rc)
1769                 goto err_setup_tx;
1770
1771         /* allocate receive descriptors */
1772         rc = ena_setup_all_rx_resources(adapter);
1773         if (rc)
1774                 goto err_setup_rx;
1775
1776         /* Create TX queues */
1777         rc = ena_create_all_io_tx_queues(adapter);
1778         if (rc)
1779                 goto err_create_tx_queues;
1780
1781         /* Create RX queues */
1782         rc = ena_create_all_io_rx_queues(adapter);
1783         if (rc)
1784                 goto err_create_rx_queues;
1785
1786         rc = ena_up_complete(adapter);
1787         if (rc)
1788                 goto err_up;
1789
1790         if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
1791                 netif_carrier_on(adapter->netdev);
1792
1793         u64_stats_update_begin(&adapter->syncp);
1794         adapter->dev_stats.interface_up++;
1795         u64_stats_update_end(&adapter->syncp);
1796
1797         set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1798
1799         /* Enable completion queues interrupt */
1800         for (i = 0; i < adapter->num_queues; i++)
1801                 ena_unmask_interrupt(&adapter->tx_ring[i],
1802                                      &adapter->rx_ring[i]);
1803
1804         /* schedule napi in case we had pending packets
1805          * from the last time we disable napi
1806          */
1807         for (i = 0; i < adapter->num_queues; i++)
1808                 napi_schedule(&adapter->ena_napi[i].napi);
1809
1810         return rc;
1811
1812 err_up:
1813         ena_destroy_all_rx_queues(adapter);
1814 err_create_rx_queues:
1815         ena_destroy_all_tx_queues(adapter);
1816 err_create_tx_queues:
1817         ena_free_all_io_rx_resources(adapter);
1818 err_setup_rx:
1819         ena_free_all_io_tx_resources(adapter);
1820 err_setup_tx:
1821         ena_free_io_irq(adapter);
1822 err_req_irq:
1823
1824         return rc;
1825 }
1826
1827 static void ena_down(struct ena_adapter *adapter)
1828 {
1829         netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__);
1830
1831         clear_bit(ENA_FLAG_DEV_UP, &adapter->flags);
1832
1833         u64_stats_update_begin(&adapter->syncp);
1834         adapter->dev_stats.interface_down++;
1835         u64_stats_update_end(&adapter->syncp);
1836
1837         netif_carrier_off(adapter->netdev);
1838         netif_tx_disable(adapter->netdev);
1839
1840         /* After this point the napi handler won't enable the tx queue */
1841         ena_napi_disable_all(adapter);
1842
1843         /* After destroy the queue there won't be any new interrupts */
1844
1845         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
1846                 int rc;
1847
1848                 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
1849                 if (rc)
1850                         dev_err(&adapter->pdev->dev, "Device reset failed\n");
1851                 /* stop submitting admin commands on a device that was reset */
1852                 ena_com_set_admin_running_state(adapter->ena_dev, false);
1853         }
1854
1855         ena_destroy_all_io_queues(adapter);
1856
1857         ena_disable_io_intr_sync(adapter);
1858         ena_free_io_irq(adapter);
1859         ena_del_napi(adapter);
1860
1861         ena_free_all_tx_bufs(adapter);
1862         ena_free_all_rx_bufs(adapter);
1863         ena_free_all_io_tx_resources(adapter);
1864         ena_free_all_io_rx_resources(adapter);
1865 }
1866
1867 /* ena_open - Called when a network interface is made active
1868  * @netdev: network interface device structure
1869  *
1870  * Returns 0 on success, negative value on failure
1871  *
1872  * The open entry point is called when a network interface is made
1873  * active by the system (IFF_UP).  At this point all resources needed
1874  * for transmit and receive operations are allocated, the interrupt
1875  * handler is registered with the OS, the watchdog timer is started,
1876  * and the stack is notified that the interface is ready.
1877  */
1878 static int ena_open(struct net_device *netdev)
1879 {
1880         struct ena_adapter *adapter = netdev_priv(netdev);
1881         int rc;
1882
1883         /* Notify the stack of the actual queue counts. */
1884         rc = netif_set_real_num_tx_queues(netdev, adapter->num_queues);
1885         if (rc) {
1886                 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n");
1887                 return rc;
1888         }
1889
1890         rc = netif_set_real_num_rx_queues(netdev, adapter->num_queues);
1891         if (rc) {
1892                 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n");
1893                 return rc;
1894         }
1895
1896         rc = ena_up(adapter);
1897         if (rc)
1898                 return rc;
1899
1900         return rc;
1901 }
1902
1903 /* ena_close - Disables a network interface
1904  * @netdev: network interface device structure
1905  *
1906  * Returns 0, this is not allowed to fail
1907  *
1908  * The close entry point is called when an interface is de-activated
1909  * by the OS.  The hardware is still under the drivers control, but
1910  * needs to be disabled.  A global MAC reset is issued to stop the
1911  * hardware, and all transmit and receive resources are freed.
1912  */
1913 static int ena_close(struct net_device *netdev)
1914 {
1915         struct ena_adapter *adapter = netdev_priv(netdev);
1916
1917         netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
1918
1919         if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
1920                 return 0;
1921
1922         if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
1923                 ena_down(adapter);
1924
1925         /* Check for device status and issue reset if needed*/
1926         check_for_admin_com_state(adapter);
1927         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
1928                 netif_err(adapter, ifdown, adapter->netdev,
1929                           "Destroy failure, restarting device\n");
1930                 ena_dump_stats_to_dmesg(adapter);
1931                 /* rtnl lock already obtained in dev_ioctl() layer */
1932                 ena_destroy_device(adapter, false);
1933                 ena_restore_device(adapter);
1934         }
1935
1936         return 0;
1937 }
1938
1939 static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
1940 {
1941         u32 mss = skb_shinfo(skb)->gso_size;
1942         struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
1943         u8 l4_protocol = 0;
1944
1945         if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) {
1946                 ena_tx_ctx->l4_csum_enable = 1;
1947                 if (mss) {
1948                         ena_tx_ctx->tso_enable = 1;
1949                         ena_meta->l4_hdr_len = tcp_hdr(skb)->doff;
1950                         ena_tx_ctx->l4_csum_partial = 0;
1951                 } else {
1952                         ena_tx_ctx->tso_enable = 0;
1953                         ena_meta->l4_hdr_len = 0;
1954                         ena_tx_ctx->l4_csum_partial = 1;
1955                 }
1956
1957                 switch (ip_hdr(skb)->version) {
1958                 case IPVERSION:
1959                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4;
1960                         if (ip_hdr(skb)->frag_off & htons(IP_DF))
1961                                 ena_tx_ctx->df = 1;
1962                         if (mss)
1963                                 ena_tx_ctx->l3_csum_enable = 1;
1964                         l4_protocol = ip_hdr(skb)->protocol;
1965                         break;
1966                 case 6:
1967                         ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6;
1968                         l4_protocol = ipv6_hdr(skb)->nexthdr;
1969                         break;
1970                 default:
1971                         break;
1972                 }
1973
1974                 if (l4_protocol == IPPROTO_TCP)
1975                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
1976                 else
1977                         ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
1978
1979                 ena_meta->mss = mss;
1980                 ena_meta->l3_hdr_len = skb_network_header_len(skb);
1981                 ena_meta->l3_hdr_offset = skb_network_offset(skb);
1982                 ena_tx_ctx->meta_valid = 1;
1983
1984         } else {
1985                 ena_tx_ctx->meta_valid = 0;
1986         }
1987 }
1988
1989 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring,
1990                                        struct sk_buff *skb)
1991 {
1992         int num_frags, header_len, rc;
1993
1994         num_frags = skb_shinfo(skb)->nr_frags;
1995         header_len = skb_headlen(skb);
1996
1997         if (num_frags < tx_ring->sgl_size)
1998                 return 0;
1999
2000         if ((num_frags == tx_ring->sgl_size) &&
2001             (header_len < tx_ring->tx_max_header_size))
2002                 return 0;
2003
2004         u64_stats_update_begin(&tx_ring->syncp);
2005         tx_ring->tx_stats.linearize++;
2006         u64_stats_update_end(&tx_ring->syncp);
2007
2008         rc = skb_linearize(skb);
2009         if (unlikely(rc)) {
2010                 u64_stats_update_begin(&tx_ring->syncp);
2011                 tx_ring->tx_stats.linearize_failed++;
2012                 u64_stats_update_end(&tx_ring->syncp);
2013         }
2014
2015         return rc;
2016 }
2017
2018 static int ena_tx_map_skb(struct ena_ring *tx_ring,
2019                           struct ena_tx_buffer *tx_info,
2020                           struct sk_buff *skb,
2021                           void **push_hdr,
2022                           u16 *header_len)
2023 {
2024         struct ena_adapter *adapter = tx_ring->adapter;
2025         struct ena_com_buf *ena_buf;
2026         dma_addr_t dma;
2027         u32 skb_head_len, frag_len, last_frag;
2028         u16 push_len = 0;
2029         u16 delta = 0;
2030         int i = 0;
2031
2032         skb_head_len = skb_headlen(skb);
2033         tx_info->skb = skb;
2034         ena_buf = tx_info->bufs;
2035
2036         if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
2037                 /* When the device is LLQ mode, the driver will copy
2038                  * the header into the device memory space.
2039                  * the ena_com layer assume the header is in a linear
2040                  * memory space.
2041                  * This assumption might be wrong since part of the header
2042                  * can be in the fragmented buffers.
2043                  * Use skb_header_pointer to make sure the header is in a
2044                  * linear memory space.
2045                  */
2046
2047                 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size);
2048                 *push_hdr = skb_header_pointer(skb, 0, push_len,
2049                                                tx_ring->push_buf_intermediate_buf);
2050                 *header_len = push_len;
2051                 if (unlikely(skb->data != *push_hdr)) {
2052                         u64_stats_update_begin(&tx_ring->syncp);
2053                         tx_ring->tx_stats.llq_buffer_copy++;
2054                         u64_stats_update_end(&tx_ring->syncp);
2055
2056                         delta = push_len - skb_head_len;
2057                 }
2058         } else {
2059                 *push_hdr = NULL;
2060                 *header_len = min_t(u32, skb_head_len,
2061                                     tx_ring->tx_max_header_size);
2062         }
2063
2064         netif_dbg(adapter, tx_queued, adapter->netdev,
2065                   "skb: %p header_buf->vaddr: %p push_len: %d\n", skb,
2066                   *push_hdr, push_len);
2067
2068         if (skb_head_len > push_len) {
2069                 dma = dma_map_single(tx_ring->dev, skb->data + push_len,
2070                                      skb_head_len - push_len, DMA_TO_DEVICE);
2071                 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2072                         goto error_report_dma_error;
2073
2074                 ena_buf->paddr = dma;
2075                 ena_buf->len = skb_head_len - push_len;
2076
2077                 ena_buf++;
2078                 tx_info->num_of_bufs++;
2079                 tx_info->map_linear_data = 1;
2080         } else {
2081                 tx_info->map_linear_data = 0;
2082         }
2083
2084         last_frag = skb_shinfo(skb)->nr_frags;
2085
2086         for (i = 0; i < last_frag; i++) {
2087                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2088
2089                 frag_len = skb_frag_size(frag);
2090
2091                 if (unlikely(delta >= frag_len)) {
2092                         delta -= frag_len;
2093                         continue;
2094                 }
2095
2096                 dma = skb_frag_dma_map(tx_ring->dev, frag, delta,
2097                                        frag_len - delta, DMA_TO_DEVICE);
2098                 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
2099                         goto error_report_dma_error;
2100
2101                 ena_buf->paddr = dma;
2102                 ena_buf->len = frag_len - delta;
2103                 ena_buf++;
2104                 tx_info->num_of_bufs++;
2105                 delta = 0;
2106         }
2107
2108         return 0;
2109
2110 error_report_dma_error:
2111         u64_stats_update_begin(&tx_ring->syncp);
2112         tx_ring->tx_stats.dma_mapping_err++;
2113         u64_stats_update_end(&tx_ring->syncp);
2114         netdev_warn(adapter->netdev, "failed to map skb\n");
2115
2116         tx_info->skb = NULL;
2117
2118         tx_info->num_of_bufs += i;
2119         ena_unmap_tx_skb(tx_ring, tx_info);
2120
2121         return -EINVAL;
2122 }
2123
2124 /* Called with netif_tx_lock. */
2125 static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
2126 {
2127         struct ena_adapter *adapter = netdev_priv(dev);
2128         struct ena_tx_buffer *tx_info;
2129         struct ena_com_tx_ctx ena_tx_ctx;
2130         struct ena_ring *tx_ring;
2131         struct netdev_queue *txq;
2132         void *push_hdr;
2133         u16 next_to_use, req_id, header_len;
2134         int qid, rc, nb_hw_desc;
2135
2136         netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb);
2137         /*  Determine which tx ring we will be placed on */
2138         qid = skb_get_queue_mapping(skb);
2139         tx_ring = &adapter->tx_ring[qid];
2140         txq = netdev_get_tx_queue(dev, qid);
2141
2142         rc = ena_check_and_linearize_skb(tx_ring, skb);
2143         if (unlikely(rc))
2144                 goto error_drop_packet;
2145
2146         skb_tx_timestamp(skb);
2147
2148         next_to_use = tx_ring->next_to_use;
2149         req_id = tx_ring->free_tx_ids[next_to_use];
2150         tx_info = &tx_ring->tx_buffer_info[req_id];
2151         tx_info->num_of_bufs = 0;
2152
2153         WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id);
2154
2155         rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len);
2156         if (unlikely(rc))
2157                 goto error_drop_packet;
2158
2159         memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx));
2160         ena_tx_ctx.ena_bufs = tx_info->bufs;
2161         ena_tx_ctx.push_header = push_hdr;
2162         ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
2163         ena_tx_ctx.req_id = req_id;
2164         ena_tx_ctx.header_len = header_len;
2165
2166         /* set flags and meta data */
2167         ena_tx_csum(&ena_tx_ctx, skb);
2168
2169         /* prepare the packet's descriptors to dma engine */
2170         rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx,
2171                                 &nb_hw_desc);
2172
2173         /* ena_com_prepare_tx() can't fail due to overflow of tx queue,
2174          * since the number of free descriptors in the queue is checked
2175          * after sending the previous packet. In case there isn't enough
2176          * space in the queue for the next packet, it is stopped
2177          * until there is again enough available space in the queue.
2178          * All other failure reasons of ena_com_prepare_tx() are fatal
2179          * and therefore require a device reset.
2180          */
2181         if (unlikely(rc)) {
2182                 netif_err(adapter, tx_queued, dev,
2183                           "failed to prepare tx bufs\n");
2184                 u64_stats_update_begin(&tx_ring->syncp);
2185                 tx_ring->tx_stats.prepare_ctx_err++;
2186                 u64_stats_update_end(&tx_ring->syncp);
2187                 adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE;
2188                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2189                 goto error_unmap_dma;
2190         }
2191
2192         netdev_tx_sent_queue(txq, skb->len);
2193
2194         u64_stats_update_begin(&tx_ring->syncp);
2195         tx_ring->tx_stats.cnt++;
2196         tx_ring->tx_stats.bytes += skb->len;
2197         u64_stats_update_end(&tx_ring->syncp);
2198
2199         tx_info->tx_descs = nb_hw_desc;
2200         tx_info->last_jiffies = jiffies;
2201         tx_info->print_once = 0;
2202
2203         tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
2204                 tx_ring->ring_size);
2205
2206         /* stop the queue when no more space available, the packet can have up
2207          * to sgl_size + 2. one for the meta descriptor and one for header
2208          * (if the header is larger than tx_max_header_size).
2209          */
2210         if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2211                                                    tx_ring->sgl_size + 2))) {
2212                 netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
2213                           __func__, qid);
2214
2215                 netif_tx_stop_queue(txq);
2216                 u64_stats_update_begin(&tx_ring->syncp);
2217                 tx_ring->tx_stats.queue_stop++;
2218                 u64_stats_update_end(&tx_ring->syncp);
2219
2220                 /* There is a rare condition where this function decide to
2221                  * stop the queue but meanwhile clean_tx_irq updates
2222                  * next_to_completion and terminates.
2223                  * The queue will remain stopped forever.
2224                  * To solve this issue add a mb() to make sure that
2225                  * netif_tx_stop_queue() write is vissible before checking if
2226                  * there is additional space in the queue.
2227                  */
2228                 smp_mb();
2229
2230                 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
2231                                                  ENA_TX_WAKEUP_THRESH)) {
2232                         netif_tx_wake_queue(txq);
2233                         u64_stats_update_begin(&tx_ring->syncp);
2234                         tx_ring->tx_stats.queue_wakeup++;
2235                         u64_stats_update_end(&tx_ring->syncp);
2236                 }
2237         }
2238
2239         if (netif_xmit_stopped(txq) || !skb->xmit_more) {
2240                 /* trigger the dma engine. ena_com_write_sq_doorbell()
2241                  * has a mb
2242                  */
2243                 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
2244                 u64_stats_update_begin(&tx_ring->syncp);
2245                 tx_ring->tx_stats.doorbells++;
2246                 u64_stats_update_end(&tx_ring->syncp);
2247         }
2248
2249         return NETDEV_TX_OK;
2250
2251 error_unmap_dma:
2252         ena_unmap_tx_skb(tx_ring, tx_info);
2253         tx_info->skb = NULL;
2254
2255 error_drop_packet:
2256         dev_kfree_skb(skb);
2257         return NETDEV_TX_OK;
2258 }
2259
2260 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
2261                             struct net_device *sb_dev,
2262                             select_queue_fallback_t fallback)
2263 {
2264         u16 qid;
2265         /* we suspect that this is good for in--kernel network services that
2266          * want to loop incoming skb rx to tx in normal user generated traffic,
2267          * most probably we will not get to this
2268          */
2269         if (skb_rx_queue_recorded(skb))
2270                 qid = skb_get_rx_queue(skb);
2271         else
2272                 qid = fallback(dev, skb, NULL);
2273
2274         return qid;
2275 }
2276
2277 static void ena_config_host_info(struct ena_com_dev *ena_dev,
2278                                  struct pci_dev *pdev)
2279 {
2280         struct ena_admin_host_info *host_info;
2281         int rc;
2282
2283         /* Allocate only the host info */
2284         rc = ena_com_allocate_host_info(ena_dev);
2285         if (rc) {
2286                 pr_err("Cannot allocate host info\n");
2287                 return;
2288         }
2289
2290         host_info = ena_dev->host_attr.host_info;
2291
2292         host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
2293         host_info->os_type = ENA_ADMIN_OS_LINUX;
2294         host_info->kernel_ver = LINUX_VERSION_CODE;
2295         strncpy(host_info->kernel_ver_str, utsname()->version,
2296                 sizeof(host_info->kernel_ver_str) - 1);
2297         host_info->os_dist = 0;
2298         strncpy(host_info->os_dist_str, utsname()->release,
2299                 sizeof(host_info->os_dist_str) - 1);
2300         host_info->driver_version =
2301                 (DRV_MODULE_VER_MAJOR) |
2302                 (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
2303                 (DRV_MODULE_VER_SUBMINOR << ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT) |
2304                 ("K"[0] << ENA_ADMIN_HOST_INFO_MODULE_TYPE_SHIFT);
2305         host_info->num_cpus = num_online_cpus();
2306
2307         rc = ena_com_set_host_attributes(ena_dev);
2308         if (rc) {
2309                 if (rc == -EOPNOTSUPP)
2310                         pr_warn("Cannot set host attributes\n");
2311                 else
2312                         pr_err("Cannot set host attributes\n");
2313
2314                 goto err;
2315         }
2316
2317         return;
2318
2319 err:
2320         ena_com_delete_host_info(ena_dev);
2321 }
2322
2323 static void ena_config_debug_area(struct ena_adapter *adapter)
2324 {
2325         u32 debug_area_size;
2326         int rc, ss_count;
2327
2328         ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS);
2329         if (ss_count <= 0) {
2330                 netif_err(adapter, drv, adapter->netdev,
2331                           "SS count is negative\n");
2332                 return;
2333         }
2334
2335         /* allocate 32 bytes for each string and 64bit for the value */
2336         debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count;
2337
2338         rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
2339         if (rc) {
2340                 pr_err("Cannot allocate debug area\n");
2341                 return;
2342         }
2343
2344         rc = ena_com_set_host_attributes(adapter->ena_dev);
2345         if (rc) {
2346                 if (rc == -EOPNOTSUPP)
2347                         netif_warn(adapter, drv, adapter->netdev,
2348                                    "Cannot set host attributes\n");
2349                 else
2350                         netif_err(adapter, drv, adapter->netdev,
2351                                   "Cannot set host attributes\n");
2352                 goto err;
2353         }
2354
2355         return;
2356 err:
2357         ena_com_delete_debug_area(adapter->ena_dev);
2358 }
2359
2360 static void ena_get_stats64(struct net_device *netdev,
2361                             struct rtnl_link_stats64 *stats)
2362 {
2363         struct ena_adapter *adapter = netdev_priv(netdev);
2364         struct ena_ring *rx_ring, *tx_ring;
2365         unsigned int start;
2366         u64 rx_drops;
2367         int i;
2368
2369         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2370                 return;
2371
2372         for (i = 0; i < adapter->num_queues; i++) {
2373                 u64 bytes, packets;
2374
2375                 tx_ring = &adapter->tx_ring[i];
2376
2377                 do {
2378                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
2379                         packets = tx_ring->tx_stats.cnt;
2380                         bytes = tx_ring->tx_stats.bytes;
2381                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
2382
2383                 stats->tx_packets += packets;
2384                 stats->tx_bytes += bytes;
2385
2386                 rx_ring = &adapter->rx_ring[i];
2387
2388                 do {
2389                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
2390                         packets = rx_ring->rx_stats.cnt;
2391                         bytes = rx_ring->rx_stats.bytes;
2392                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
2393
2394                 stats->rx_packets += packets;
2395                 stats->rx_bytes += bytes;
2396         }
2397
2398         do {
2399                 start = u64_stats_fetch_begin_irq(&adapter->syncp);
2400                 rx_drops = adapter->dev_stats.rx_drops;
2401         } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
2402
2403         stats->rx_dropped = rx_drops;
2404
2405         stats->multicast = 0;
2406         stats->collisions = 0;
2407
2408         stats->rx_length_errors = 0;
2409         stats->rx_crc_errors = 0;
2410         stats->rx_frame_errors = 0;
2411         stats->rx_fifo_errors = 0;
2412         stats->rx_missed_errors = 0;
2413         stats->tx_window_errors = 0;
2414
2415         stats->rx_errors = 0;
2416         stats->tx_errors = 0;
2417 }
2418
2419 static const struct net_device_ops ena_netdev_ops = {
2420         .ndo_open               = ena_open,
2421         .ndo_stop               = ena_close,
2422         .ndo_start_xmit         = ena_start_xmit,
2423         .ndo_select_queue       = ena_select_queue,
2424         .ndo_get_stats64        = ena_get_stats64,
2425         .ndo_tx_timeout         = ena_tx_timeout,
2426         .ndo_change_mtu         = ena_change_mtu,
2427         .ndo_set_mac_address    = NULL,
2428         .ndo_validate_addr      = eth_validate_addr,
2429 };
2430
2431 static int ena_device_validate_params(struct ena_adapter *adapter,
2432                                       struct ena_com_dev_get_features_ctx *get_feat_ctx)
2433 {
2434         struct net_device *netdev = adapter->netdev;
2435         int rc;
2436
2437         rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr,
2438                               adapter->mac_addr);
2439         if (!rc) {
2440                 netif_err(adapter, drv, netdev,
2441                           "Error, mac address are different\n");
2442                 return -EINVAL;
2443         }
2444
2445         if ((get_feat_ctx->max_queues.max_cq_num < adapter->num_queues) ||
2446             (get_feat_ctx->max_queues.max_sq_num < adapter->num_queues)) {
2447                 netif_err(adapter, drv, netdev,
2448                           "Error, device doesn't support enough queues\n");
2449                 return -EINVAL;
2450         }
2451
2452         if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) {
2453                 netif_err(adapter, drv, netdev,
2454                           "Error, device max mtu is smaller than netdev MTU\n");
2455                 return -EINVAL;
2456         }
2457
2458         return 0;
2459 }
2460
2461 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
2462                            struct ena_com_dev_get_features_ctx *get_feat_ctx,
2463                            bool *wd_state)
2464 {
2465         struct device *dev = &pdev->dev;
2466         bool readless_supported;
2467         u32 aenq_groups;
2468         int dma_width;
2469         int rc;
2470
2471         rc = ena_com_mmio_reg_read_request_init(ena_dev);
2472         if (rc) {
2473                 dev_err(dev, "failed to init mmio read less\n");
2474                 return rc;
2475         }
2476
2477         /* The PCIe configuration space revision id indicate if mmio reg
2478          * read is disabled
2479          */
2480         readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
2481         ena_com_set_mmio_read_mode(ena_dev, readless_supported);
2482
2483         rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
2484         if (rc) {
2485                 dev_err(dev, "Can not reset device\n");
2486                 goto err_mmio_read_less;
2487         }
2488
2489         rc = ena_com_validate_version(ena_dev);
2490         if (rc) {
2491                 dev_err(dev, "device version is too low\n");
2492                 goto err_mmio_read_less;
2493         }
2494
2495         dma_width = ena_com_get_dma_width(ena_dev);
2496         if (dma_width < 0) {
2497                 dev_err(dev, "Invalid dma width value %d", dma_width);
2498                 rc = dma_width;
2499                 goto err_mmio_read_less;
2500         }
2501
2502         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2503         if (rc) {
2504                 dev_err(dev, "pci_set_dma_mask failed 0x%x\n", rc);
2505                 goto err_mmio_read_less;
2506         }
2507
2508         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width));
2509         if (rc) {
2510                 dev_err(dev, "err_pci_set_consistent_dma_mask failed 0x%x\n",
2511                         rc);
2512                 goto err_mmio_read_less;
2513         }
2514
2515         /* ENA admin level init */
2516         rc = ena_com_admin_init(ena_dev, &aenq_handlers);
2517         if (rc) {
2518                 dev_err(dev,
2519                         "Can not initialize ena admin queue with device\n");
2520                 goto err_mmio_read_less;
2521         }
2522
2523         /* To enable the msix interrupts the driver needs to know the number
2524          * of queues. So the driver uses polling mode to retrieve this
2525          * information
2526          */
2527         ena_com_set_admin_polling_mode(ena_dev, true);
2528
2529         ena_config_host_info(ena_dev, pdev);
2530
2531         /* Get Device Attributes*/
2532         rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
2533         if (rc) {
2534                 dev_err(dev, "Cannot get attribute for ena device rc=%d\n", rc);
2535                 goto err_admin_init;
2536         }
2537
2538         /* Try to turn all the available aenq groups */
2539         aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) |
2540                 BIT(ENA_ADMIN_FATAL_ERROR) |
2541                 BIT(ENA_ADMIN_WARNING) |
2542                 BIT(ENA_ADMIN_NOTIFICATION) |
2543                 BIT(ENA_ADMIN_KEEP_ALIVE);
2544
2545         aenq_groups &= get_feat_ctx->aenq.supported_groups;
2546
2547         rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
2548         if (rc) {
2549                 dev_err(dev, "Cannot configure aenq groups rc= %d\n", rc);
2550                 goto err_admin_init;
2551         }
2552
2553         *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
2554
2555         return 0;
2556
2557 err_admin_init:
2558         ena_com_delete_host_info(ena_dev);
2559         ena_com_admin_destroy(ena_dev);
2560 err_mmio_read_less:
2561         ena_com_mmio_reg_read_request_destroy(ena_dev);
2562
2563         return rc;
2564 }
2565
2566 static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
2567                                                     int io_vectors)
2568 {
2569         struct ena_com_dev *ena_dev = adapter->ena_dev;
2570         struct device *dev = &adapter->pdev->dev;
2571         int rc;
2572
2573         rc = ena_enable_msix(adapter, io_vectors);
2574         if (rc) {
2575                 dev_err(dev, "Can not reserve msix vectors\n");
2576                 return rc;
2577         }
2578
2579         ena_setup_mgmnt_intr(adapter);
2580
2581         rc = ena_request_mgmnt_irq(adapter);
2582         if (rc) {
2583                 dev_err(dev, "Can not setup management interrupts\n");
2584                 goto err_disable_msix;
2585         }
2586
2587         ena_com_set_admin_polling_mode(ena_dev, false);
2588
2589         ena_com_admin_aenq_enable(ena_dev);
2590
2591         return 0;
2592
2593 err_disable_msix:
2594         ena_disable_msix(adapter);
2595
2596         return rc;
2597 }
2598
2599 static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
2600 {
2601         struct net_device *netdev = adapter->netdev;
2602         struct ena_com_dev *ena_dev = adapter->ena_dev;
2603         bool dev_up;
2604
2605         if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
2606                 return;
2607
2608         netif_carrier_off(netdev);
2609
2610         del_timer_sync(&adapter->timer_service);
2611
2612         dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
2613         adapter->dev_up_before_reset = dev_up;
2614         if (!graceful)
2615                 ena_com_set_admin_running_state(ena_dev, false);
2616
2617         if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2618                 ena_down(adapter);
2619
2620         /* Stop the device from sending AENQ events (in case reset flag is set
2621          *  and device is up, ena_down() already reset the device.
2622          */
2623         if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
2624                 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
2625
2626         ena_free_mgmnt_irq(adapter);
2627
2628         ena_disable_msix(adapter);
2629
2630         ena_com_abort_admin_commands(ena_dev);
2631
2632         ena_com_wait_for_abort_completion(ena_dev);
2633
2634         ena_com_admin_destroy(ena_dev);
2635
2636         ena_com_mmio_reg_read_request_destroy(ena_dev);
2637
2638         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
2639
2640         clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2641         clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2642 }
2643
2644 static int ena_restore_device(struct ena_adapter *adapter)
2645 {
2646         struct ena_com_dev_get_features_ctx get_feat_ctx;
2647         struct ena_com_dev *ena_dev = adapter->ena_dev;
2648         struct pci_dev *pdev = adapter->pdev;
2649         bool wd_state;
2650         int rc;
2651
2652         set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2653         rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
2654         if (rc) {
2655                 dev_err(&pdev->dev, "Can not initialize device\n");
2656                 goto err;
2657         }
2658         adapter->wd_state = wd_state;
2659
2660         rc = ena_device_validate_params(adapter, &get_feat_ctx);
2661         if (rc) {
2662                 dev_err(&pdev->dev, "Validation of device parameters failed\n");
2663                 goto err_device_destroy;
2664         }
2665
2666         clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2667         /* Make sure we don't have a race with AENQ Links state handler */
2668         if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
2669                 netif_carrier_on(adapter->netdev);
2670
2671         rc = ena_enable_msix_and_set_admin_interrupts(adapter,
2672                                                       adapter->num_queues);
2673         if (rc) {
2674                 dev_err(&pdev->dev, "Enable MSI-X failed\n");
2675                 goto err_device_destroy;
2676         }
2677         /* If the interface was up before the reset bring it up */
2678         if (adapter->dev_up_before_reset) {
2679                 rc = ena_up(adapter);
2680                 if (rc) {
2681                         dev_err(&pdev->dev, "Failed to create I/O queues\n");
2682                         goto err_disable_msix;
2683                 }
2684         }
2685
2686         set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2687         mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2688         dev_err(&pdev->dev,
2689                 "Device reset completed successfully, Driver info: %s\n",
2690                 version);
2691
2692         return rc;
2693 err_disable_msix:
2694         ena_free_mgmnt_irq(adapter);
2695         ena_disable_msix(adapter);
2696 err_device_destroy:
2697         ena_com_abort_admin_commands(ena_dev);
2698         ena_com_wait_for_abort_completion(ena_dev);
2699         ena_com_admin_destroy(ena_dev);
2700         ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
2701         ena_com_mmio_reg_read_request_destroy(ena_dev);
2702 err:
2703         clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
2704         clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
2705         dev_err(&pdev->dev,
2706                 "Reset attempt failed. Can not reset the device\n");
2707
2708         return rc;
2709 }
2710
2711 static void ena_fw_reset_device(struct work_struct *work)
2712 {
2713         struct ena_adapter *adapter =
2714                 container_of(work, struct ena_adapter, reset_task);
2715         struct pci_dev *pdev = adapter->pdev;
2716
2717         if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
2718                 dev_err(&pdev->dev,
2719                         "device reset schedule while reset bit is off\n");
2720                 return;
2721         }
2722         rtnl_lock();
2723         ena_destroy_device(adapter, false);
2724         ena_restore_device(adapter);
2725         rtnl_unlock();
2726 }
2727
2728 static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
2729                                         struct ena_ring *rx_ring)
2730 {
2731         if (likely(rx_ring->first_interrupt))
2732                 return 0;
2733
2734         if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
2735                 return 0;
2736
2737         rx_ring->no_interrupt_event_cnt++;
2738
2739         if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) {
2740                 netif_err(adapter, rx_err, adapter->netdev,
2741                           "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
2742                           rx_ring->qid);
2743                 adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2744                 smp_mb__before_atomic();
2745                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2746                 return -EIO;
2747         }
2748
2749         return 0;
2750 }
2751
2752 static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2753                                           struct ena_ring *tx_ring)
2754 {
2755         struct ena_tx_buffer *tx_buf;
2756         unsigned long last_jiffies;
2757         u32 missed_tx = 0;
2758         int i, rc = 0;
2759
2760         for (i = 0; i < tx_ring->ring_size; i++) {
2761                 tx_buf = &tx_ring->tx_buffer_info[i];
2762                 last_jiffies = tx_buf->last_jiffies;
2763
2764                 if (last_jiffies == 0)
2765                         /* no pending Tx at this location */
2766                         continue;
2767
2768                 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
2769                              2 * adapter->missing_tx_completion_to))) {
2770                         /* If after graceful period interrupt is still not
2771                          * received, we schedule a reset
2772                          */
2773                         netif_err(adapter, tx_err, adapter->netdev,
2774                                   "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
2775                                   tx_ring->qid);
2776                         adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
2777                         smp_mb__before_atomic();
2778                         set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2779                         return -EIO;
2780                 }
2781
2782                 if (unlikely(time_is_before_jiffies(last_jiffies +
2783                                 adapter->missing_tx_completion_to))) {
2784                         if (!tx_buf->print_once)
2785                                 netif_notice(adapter, tx_err, adapter->netdev,
2786                                              "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
2787                                              tx_ring->qid, i);
2788
2789                         tx_buf->print_once = 1;
2790                         missed_tx++;
2791                 }
2792         }
2793
2794         if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
2795                 netif_err(adapter, tx_err, adapter->netdev,
2796                           "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
2797                           missed_tx,
2798                           adapter->missing_tx_completion_threshold);
2799                 adapter->reset_reason =
2800                         ENA_REGS_RESET_MISS_TX_CMPL;
2801                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2802                 rc = -EIO;
2803         }
2804
2805         u64_stats_update_begin(&tx_ring->syncp);
2806         tx_ring->tx_stats.missed_tx = missed_tx;
2807         u64_stats_update_end(&tx_ring->syncp);
2808
2809         return rc;
2810 }
2811
2812 static void check_for_missing_completions(struct ena_adapter *adapter)
2813 {
2814         struct ena_ring *tx_ring;
2815         struct ena_ring *rx_ring;
2816         int i, budget, rc;
2817
2818         /* Make sure the driver doesn't turn the device in other process */
2819         smp_rmb();
2820
2821         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2822                 return;
2823
2824         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2825                 return;
2826
2827         if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
2828                 return;
2829
2830         budget = ENA_MONITORED_TX_QUEUES;
2831
2832         for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
2833                 tx_ring = &adapter->tx_ring[i];
2834                 rx_ring = &adapter->rx_ring[i];
2835
2836                 rc = check_missing_comp_in_tx_queue(adapter, tx_ring);
2837                 if (unlikely(rc))
2838                         return;
2839
2840                 rc = check_for_rx_interrupt_queue(adapter, rx_ring);
2841                 if (unlikely(rc))
2842                         return;
2843
2844                 budget--;
2845                 if (!budget)
2846                         break;
2847         }
2848
2849         adapter->last_monitored_tx_qid = i % adapter->num_queues;
2850 }
2851
2852 /* trigger napi schedule after 2 consecutive detections */
2853 #define EMPTY_RX_REFILL 2
2854 /* For the rare case where the device runs out of Rx descriptors and the
2855  * napi handler failed to refill new Rx descriptors (due to a lack of memory
2856  * for example).
2857  * This case will lead to a deadlock:
2858  * The device won't send interrupts since all the new Rx packets will be dropped
2859  * The napi handler won't allocate new Rx descriptors so the device will be
2860  * able to send new packets.
2861  *
2862  * This scenario can happen when the kernel's vm.min_free_kbytes is too small.
2863  * It is recommended to have at least 512MB, with a minimum of 128MB for
2864  * constrained environment).
2865  *
2866  * When such a situation is detected - Reschedule napi
2867  */
2868 static void check_for_empty_rx_ring(struct ena_adapter *adapter)
2869 {
2870         struct ena_ring *rx_ring;
2871         int i, refill_required;
2872
2873         if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
2874                 return;
2875
2876         if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
2877                 return;
2878
2879         for (i = 0; i < adapter->num_queues; i++) {
2880                 rx_ring = &adapter->rx_ring[i];
2881
2882                 refill_required =
2883                         ena_com_free_desc(rx_ring->ena_com_io_sq);
2884                 if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
2885                         rx_ring->empty_rx_queue++;
2886
2887                         if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) {
2888                                 u64_stats_update_begin(&rx_ring->syncp);
2889                                 rx_ring->rx_stats.empty_rx_ring++;
2890                                 u64_stats_update_end(&rx_ring->syncp);
2891
2892                                 netif_err(adapter, drv, adapter->netdev,
2893                                           "trigger refill for ring %d\n", i);
2894
2895                                 napi_schedule(rx_ring->napi);
2896                                 rx_ring->empty_rx_queue = 0;
2897                         }
2898                 } else {
2899                         rx_ring->empty_rx_queue = 0;
2900                 }
2901         }
2902 }
2903
2904 /* Check for keep alive expiration */
2905 static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2906 {
2907         unsigned long keep_alive_expired;
2908
2909         if (!adapter->wd_state)
2910                 return;
2911
2912         if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2913                 return;
2914
2915         keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
2916                                            adapter->keep_alive_timeout);
2917         if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2918                 netif_err(adapter, drv, adapter->netdev,
2919                           "Keep alive watchdog timeout.\n");
2920                 u64_stats_update_begin(&adapter->syncp);
2921                 adapter->dev_stats.wd_expired++;
2922                 u64_stats_update_end(&adapter->syncp);
2923                 adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
2924                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2925         }
2926 }
2927
2928 static void check_for_admin_com_state(struct ena_adapter *adapter)
2929 {
2930         if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) {
2931                 netif_err(adapter, drv, adapter->netdev,
2932                           "ENA admin queue is not in running state!\n");
2933                 u64_stats_update_begin(&adapter->syncp);
2934                 adapter->dev_stats.admin_q_pause++;
2935                 u64_stats_update_end(&adapter->syncp);
2936                 adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
2937                 set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
2938         }
2939 }
2940
2941 static void ena_update_hints(struct ena_adapter *adapter,
2942                              struct ena_admin_ena_hw_hints *hints)
2943 {
2944         struct net_device *netdev = adapter->netdev;
2945
2946         if (hints->admin_completion_tx_timeout)
2947                 adapter->ena_dev->admin_queue.completion_timeout =
2948                         hints->admin_completion_tx_timeout * 1000;
2949
2950         if (hints->mmio_read_timeout)
2951                 /* convert to usec */
2952                 adapter->ena_dev->mmio_read.reg_read_to =
2953                         hints->mmio_read_timeout * 1000;
2954
2955         if (hints->missed_tx_completion_count_threshold_to_reset)
2956                 adapter->missing_tx_completion_threshold =
2957                         hints->missed_tx_completion_count_threshold_to_reset;
2958
2959         if (hints->missing_tx_completion_timeout) {
2960                 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2961                         adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
2962                 else
2963                         adapter->missing_tx_completion_to =
2964                                 msecs_to_jiffies(hints->missing_tx_completion_timeout);
2965         }
2966
2967         if (hints->netdev_wd_timeout)
2968                 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
2969
2970         if (hints->driver_watchdog_timeout) {
2971                 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2972                         adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
2973                 else
2974                         adapter->keep_alive_timeout =
2975                                 msecs_to_jiffies(hints->driver_watchdog_timeout);
2976         }
2977 }
2978
2979 static void ena_update_host_info(struct ena_admin_host_info *host_info,
2980                                  struct net_device *netdev)
2981 {
2982         host_info->supported_network_features[0] =
2983                 netdev->features & GENMASK_ULL(31, 0);
2984         host_info->supported_network_features[1] =
2985                 (netdev->features & GENMASK_ULL(63, 32)) >> 32;
2986 }
2987
2988 static void ena_timer_service(struct timer_list *t)
2989 {
2990         struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
2991         u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
2992         struct ena_admin_host_info *host_info =
2993                 adapter->ena_dev->host_attr.host_info;
2994
2995         check_for_missing_keep_alive(adapter);
2996
2997         check_for_admin_com_state(adapter);
2998
2999         check_for_missing_completions(adapter);
3000
3001         check_for_empty_rx_ring(adapter);
3002
3003         if (debug_area)
3004                 ena_dump_stats_to_buf(adapter, debug_area);
3005
3006         if (host_info)
3007                 ena_update_host_info(host_info, adapter->netdev);
3008
3009         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3010                 netif_err(adapter, drv, adapter->netdev,
3011                           "Trigger reset is on\n");
3012                 ena_dump_stats_to_dmesg(adapter);
3013                 queue_work(ena_wq, &adapter->reset_task);
3014                 return;
3015         }
3016
3017         /* Reset the timer */
3018         mod_timer(&adapter->timer_service, jiffies + HZ);
3019 }
3020
3021 static int ena_calc_io_queue_num(struct pci_dev *pdev,
3022                                  struct ena_com_dev *ena_dev,
3023                                  struct ena_com_dev_get_features_ctx *get_feat_ctx)
3024 {
3025         int io_sq_num, io_queue_num;
3026
3027         /* In case of LLQ use the llq number in the get feature cmd */
3028         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3029                 io_sq_num = get_feat_ctx->llq.max_llq_num;
3030         else
3031                 io_sq_num = get_feat_ctx->max_queues.max_sq_num;
3032
3033         io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
3034         io_queue_num = min_t(int, io_queue_num, io_sq_num);
3035         io_queue_num = min_t(int, io_queue_num,
3036                              get_feat_ctx->max_queues.max_cq_num);
3037         /* 1 IRQ for for mgmnt and 1 IRQs for each IO direction */
3038         io_queue_num = min_t(int, io_queue_num, pci_msix_vec_count(pdev) - 1);
3039         if (unlikely(!io_queue_num)) {
3040                 dev_err(&pdev->dev, "The device doesn't have io queues\n");
3041                 return -EFAULT;
3042         }
3043
3044         return io_queue_num;
3045 }
3046
3047 static int ena_set_queues_placement_policy(struct pci_dev *pdev,
3048                                            struct ena_com_dev *ena_dev,
3049                                            struct ena_admin_feature_llq_desc *llq,
3050                                            struct ena_llq_configurations *llq_default_configurations)
3051 {
3052         bool has_mem_bar;
3053         int rc;
3054         u32 llq_feature_mask;
3055
3056         llq_feature_mask = 1 << ENA_ADMIN_LLQ;
3057         if (!(ena_dev->supported_features & llq_feature_mask)) {
3058                 dev_err(&pdev->dev,
3059                         "LLQ is not supported Fallback to host mode policy.\n");
3060                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3061                 return 0;
3062         }
3063
3064         has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
3065
3066         rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
3067         if (unlikely(rc)) {
3068                 dev_err(&pdev->dev,
3069                         "Failed to configure the device mode.  Fallback to host mode policy.\n");
3070                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3071                 return 0;
3072         }
3073
3074         /* Nothing to config, exit */
3075         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3076                 return 0;
3077
3078         if (!has_mem_bar) {
3079                 dev_err(&pdev->dev,
3080                         "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
3081                 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
3082                 return 0;
3083         }
3084
3085         ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
3086                                            pci_resource_start(pdev, ENA_MEM_BAR),
3087                                            pci_resource_len(pdev, ENA_MEM_BAR));
3088
3089         if (!ena_dev->mem_bar)
3090                 return -EFAULT;
3091
3092         return 0;
3093 }
3094
3095 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
3096                                  struct net_device *netdev)
3097 {
3098         netdev_features_t dev_features = 0;
3099
3100         /* Set offload features */
3101         if (feat->offload.tx &
3102                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV4_CSUM_PART_MASK)
3103                 dev_features |= NETIF_F_IP_CSUM;
3104
3105         if (feat->offload.tx &
3106                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_TX_L4_IPV6_CSUM_PART_MASK)
3107                 dev_features |= NETIF_F_IPV6_CSUM;
3108
3109         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK)
3110                 dev_features |= NETIF_F_TSO;
3111
3112         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK)
3113                 dev_features |= NETIF_F_TSO6;
3114
3115         if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK)
3116                 dev_features |= NETIF_F_TSO_ECN;
3117
3118         if (feat->offload.rx_supported &
3119                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
3120                 dev_features |= NETIF_F_RXCSUM;
3121
3122         if (feat->offload.rx_supported &
3123                 ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV6_CSUM_MASK)
3124                 dev_features |= NETIF_F_RXCSUM;
3125
3126         netdev->features =
3127                 dev_features |
3128                 NETIF_F_SG |
3129                 NETIF_F_RXHASH |
3130                 NETIF_F_HIGHDMA;
3131
3132         netdev->hw_features |= netdev->features;
3133         netdev->vlan_features |= netdev->features;
3134 }
3135
3136 static void ena_set_conf_feat_params(struct ena_adapter *adapter,
3137                                      struct ena_com_dev_get_features_ctx *feat)
3138 {
3139         struct net_device *netdev = adapter->netdev;
3140
3141         /* Copy mac address */
3142         if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) {
3143                 eth_hw_addr_random(netdev);
3144                 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
3145         } else {
3146                 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
3147                 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3148         }
3149
3150         /* Set offload features */
3151         ena_set_dev_offloads(feat, netdev);
3152
3153         adapter->max_mtu = feat->dev_attr.max_mtu;
3154         netdev->max_mtu = adapter->max_mtu;
3155         netdev->min_mtu = ENA_MIN_MTU;
3156 }
3157
3158 static int ena_rss_init_default(struct ena_adapter *adapter)
3159 {
3160         struct ena_com_dev *ena_dev = adapter->ena_dev;
3161         struct device *dev = &adapter->pdev->dev;
3162         int rc, i;
3163         u32 val;
3164
3165         rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE);
3166         if (unlikely(rc)) {
3167                 dev_err(dev, "Cannot init indirect table\n");
3168                 goto err_rss_init;
3169         }
3170
3171         for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
3172                 val = ethtool_rxfh_indir_default(i, adapter->num_queues);
3173                 rc = ena_com_indirect_table_fill_entry(ena_dev, i,
3174                                                        ENA_IO_RXQ_IDX(val));
3175                 if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3176                         dev_err(dev, "Cannot fill indirect table\n");
3177                         goto err_fill_indir;
3178                 }
3179         }
3180
3181         rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
3182                                         ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
3183         if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3184                 dev_err(dev, "Cannot fill hash function\n");
3185                 goto err_fill_indir;
3186         }
3187
3188         rc = ena_com_set_default_hash_ctrl(ena_dev);
3189         if (unlikely(rc && (rc != -EOPNOTSUPP))) {
3190                 dev_err(dev, "Cannot fill hash control\n");
3191                 goto err_fill_indir;
3192         }
3193
3194         return 0;
3195
3196 err_fill_indir:
3197         ena_com_rss_destroy(ena_dev);
3198 err_rss_init:
3199
3200         return rc;
3201 }
3202
3203 static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
3204 {
3205         int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3206
3207         pci_release_selected_regions(pdev, release_bars);
3208 }
3209
3210 static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
3211 {
3212         llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
3213         llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
3214         llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
3215         llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
3216         llq_config->llq_ring_entry_size_value = 128;
3217 }
3218
3219 static int ena_calc_queue_size(struct pci_dev *pdev,
3220                                struct ena_com_dev *ena_dev,
3221                                u16 *max_tx_sgl_size,
3222                                u16 *max_rx_sgl_size,
3223                                struct ena_com_dev_get_features_ctx *get_feat_ctx)
3224 {
3225         u32 queue_size = ENA_DEFAULT_RING_SIZE;
3226
3227         queue_size = min_t(u32, queue_size,
3228                            get_feat_ctx->max_queues.max_cq_depth);
3229         queue_size = min_t(u32, queue_size,
3230                            get_feat_ctx->max_queues.max_sq_depth);
3231
3232         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
3233                 queue_size = min_t(u32, queue_size,
3234                                    get_feat_ctx->llq.max_llq_depth);
3235
3236         queue_size = rounddown_pow_of_two(queue_size);
3237
3238         if (unlikely(!queue_size)) {
3239                 dev_err(&pdev->dev, "Invalid queue size\n");
3240                 return -EFAULT;
3241         }
3242
3243         *max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3244                                  get_feat_ctx->max_queues.max_packet_tx_descs);
3245         *max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS,
3246                                  get_feat_ctx->max_queues.max_packet_rx_descs);
3247
3248         return queue_size;
3249 }
3250
3251 /* ena_probe - Device Initialization Routine
3252  * @pdev: PCI device information struct
3253  * @ent: entry in ena_pci_tbl
3254  *
3255  * Returns 0 on success, negative on failure
3256  *
3257  * ena_probe initializes an adapter identified by a pci_dev structure.
3258  * The OS initialization, configuring of the adapter private structure,
3259  * and a hardware reset occur.
3260  */
3261 static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3262 {
3263         struct ena_com_dev_get_features_ctx get_feat_ctx;
3264         static int version_printed;
3265         struct net_device *netdev;
3266         struct ena_adapter *adapter;
3267         struct ena_llq_configurations llq_config;
3268         struct ena_com_dev *ena_dev = NULL;
3269         char *queue_type_str;
3270         static int adapters_found;
3271         int io_queue_num, bars, rc;
3272         int queue_size;
3273         u16 tx_sgl_size = 0;
3274         u16 rx_sgl_size = 0;
3275         bool wd_state;
3276
3277         dev_dbg(&pdev->dev, "%s\n", __func__);
3278
3279         if (version_printed++ == 0)
3280                 dev_info(&pdev->dev, "%s", version);
3281
3282         rc = pci_enable_device_mem(pdev);
3283         if (rc) {
3284                 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
3285                 return rc;
3286         }
3287
3288         pci_set_master(pdev);
3289
3290         ena_dev = vzalloc(sizeof(*ena_dev));
3291         if (!ena_dev) {
3292                 rc = -ENOMEM;
3293                 goto err_disable_device;
3294         }
3295
3296         bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
3297         rc = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
3298         if (rc) {
3299                 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
3300                         rc);
3301                 goto err_free_ena_dev;
3302         }
3303
3304         ena_dev->reg_bar = devm_ioremap(&pdev->dev,
3305                                         pci_resource_start(pdev, ENA_REG_BAR),
3306                                         pci_resource_len(pdev, ENA_REG_BAR));
3307         if (!ena_dev->reg_bar) {
3308                 dev_err(&pdev->dev, "failed to remap regs bar\n");
3309                 rc = -EFAULT;
3310                 goto err_free_region;
3311         }
3312
3313         ena_dev->dmadev = &pdev->dev;
3314
3315         rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
3316         if (rc) {
3317                 dev_err(&pdev->dev, "ena device init failed\n");
3318                 if (rc == -ETIME)
3319                         rc = -EPROBE_DEFER;
3320                 goto err_free_region;
3321         }
3322
3323         set_default_llq_configurations(&llq_config);
3324
3325         rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
3326                                              &llq_config);
3327         if (rc) {
3328                 dev_err(&pdev->dev, "ena device init failed\n");
3329                 goto err_device_destroy;
3330         }
3331
3332         /* initial Tx interrupt delay, Assumes 1 usec granularity.
3333         * Updated during device initialization with the real granularity
3334         */
3335         ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
3336         io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
3337         queue_size = ena_calc_queue_size(pdev, ena_dev, &tx_sgl_size,
3338                                          &rx_sgl_size, &get_feat_ctx);
3339         if ((queue_size <= 0) || (io_queue_num <= 0)) {
3340                 rc = -EFAULT;
3341                 goto err_device_destroy;
3342         }
3343
3344         dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n",
3345                  io_queue_num, queue_size,
3346                  (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
3347                  "ENABLED" : "DISABLED");
3348
3349         /* dev zeroed in init_etherdev */
3350         netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
3351         if (!netdev) {
3352                 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n");
3353                 rc = -ENOMEM;
3354                 goto err_device_destroy;
3355         }
3356
3357         SET_NETDEV_DEV(netdev, &pdev->dev);
3358
3359         adapter = netdev_priv(netdev);
3360         pci_set_drvdata(pdev, adapter);
3361
3362         adapter->ena_dev = ena_dev;
3363         adapter->netdev = netdev;
3364         adapter->pdev = pdev;
3365
3366         ena_set_conf_feat_params(adapter, &get_feat_ctx);
3367
3368         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3369         adapter->reset_reason = ENA_REGS_RESET_NORMAL;
3370
3371         adapter->tx_ring_size = queue_size;
3372         adapter->rx_ring_size = queue_size;
3373
3374         adapter->max_tx_sgl_size = tx_sgl_size;
3375         adapter->max_rx_sgl_size = rx_sgl_size;
3376
3377         adapter->num_queues = io_queue_num;
3378         adapter->last_monitored_tx_qid = 0;
3379
3380         adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
3381         adapter->wd_state = wd_state;
3382
3383         snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
3384
3385         rc = ena_com_init_interrupt_moderation(adapter->ena_dev);
3386         if (rc) {
3387                 dev_err(&pdev->dev,
3388                         "Failed to query interrupt moderation feature\n");
3389                 goto err_netdev_destroy;
3390         }
3391         ena_init_io_rings(adapter);
3392
3393         netdev->netdev_ops = &ena_netdev_ops;
3394         netdev->watchdog_timeo = TX_TIMEOUT;
3395         ena_set_ethtool_ops(netdev);
3396
3397         netdev->priv_flags |= IFF_UNICAST_FLT;
3398
3399         u64_stats_init(&adapter->syncp);
3400
3401         rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num);
3402         if (rc) {
3403                 dev_err(&pdev->dev,
3404                         "Failed to enable and set the admin interrupts\n");
3405                 goto err_worker_destroy;
3406         }
3407         rc = ena_rss_init_default(adapter);
3408         if (rc && (rc != -EOPNOTSUPP)) {
3409                 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
3410                 goto err_free_msix;
3411         }
3412
3413         ena_config_debug_area(adapter);
3414
3415         memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
3416
3417         netif_carrier_off(netdev);
3418
3419         rc = register_netdev(netdev);
3420         if (rc) {
3421                 dev_err(&pdev->dev, "Cannot register net device\n");
3422                 goto err_rss;
3423         }
3424
3425         INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
3426
3427         adapter->last_keep_alive_jiffies = jiffies;
3428         adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
3429         adapter->missing_tx_completion_to = TX_TIMEOUT;
3430         adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
3431
3432         ena_update_hints(adapter, &get_feat_ctx.hw_hints);
3433
3434         timer_setup(&adapter->timer_service, ena_timer_service, 0);
3435         mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
3436
3437         if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
3438                 queue_type_str = "Regular";
3439         else
3440                 queue_type_str = "Low Latency";
3441
3442         dev_info(&pdev->dev,
3443                  "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n",
3444                  DEVICE_NAME, (long)pci_resource_start(pdev, 0),
3445                  netdev->dev_addr, io_queue_num, queue_type_str);
3446
3447         set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
3448
3449         adapters_found++;
3450
3451         return 0;
3452
3453 err_rss:
3454         ena_com_delete_debug_area(ena_dev);
3455         ena_com_rss_destroy(ena_dev);
3456 err_free_msix:
3457         ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
3458         /* stop submitting admin commands on a device that was reset */
3459         ena_com_set_admin_running_state(ena_dev, false);
3460         ena_free_mgmnt_irq(adapter);
3461         ena_disable_msix(adapter);
3462 err_worker_destroy:
3463         ena_com_destroy_interrupt_moderation(ena_dev);
3464         del_timer(&adapter->timer_service);
3465 err_netdev_destroy:
3466         free_netdev(netdev);
3467 err_device_destroy:
3468         ena_com_delete_host_info(ena_dev);
3469         ena_com_admin_destroy(ena_dev);
3470 err_free_region:
3471         ena_release_bars(ena_dev, pdev);
3472 err_free_ena_dev:
3473         vfree(ena_dev);
3474 err_disable_device:
3475         pci_disable_device(pdev);
3476         return rc;
3477 }
3478
3479 /*****************************************************************************/
3480
3481 /* ena_remove - Device Removal Routine
3482  * @pdev: PCI device information struct
3483  *
3484  * ena_remove is called by the PCI subsystem to alert the driver
3485  * that it should release a PCI device.
3486  */
3487 static void ena_remove(struct pci_dev *pdev)
3488 {
3489         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3490         struct ena_com_dev *ena_dev;
3491         struct net_device *netdev;
3492
3493         ena_dev = adapter->ena_dev;
3494         netdev = adapter->netdev;
3495
3496 #ifdef CONFIG_RFS_ACCEL
3497         if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) {
3498                 free_irq_cpu_rmap(netdev->rx_cpu_rmap);
3499                 netdev->rx_cpu_rmap = NULL;
3500         }
3501 #endif /* CONFIG_RFS_ACCEL */
3502         del_timer_sync(&adapter->timer_service);
3503
3504         cancel_work_sync(&adapter->reset_task);
3505
3506         rtnl_lock();
3507         ena_destroy_device(adapter, true);
3508         rtnl_unlock();
3509
3510         unregister_netdev(netdev);
3511
3512         free_netdev(netdev);
3513
3514         ena_com_rss_destroy(ena_dev);
3515
3516         ena_com_delete_debug_area(ena_dev);
3517
3518         ena_com_delete_host_info(ena_dev);
3519
3520         ena_release_bars(ena_dev, pdev);
3521
3522         pci_disable_device(pdev);
3523
3524         ena_com_destroy_interrupt_moderation(ena_dev);
3525
3526         vfree(ena_dev);
3527 }
3528
3529 #ifdef CONFIG_PM
3530 /* ena_suspend - PM suspend callback
3531  * @pdev: PCI device information struct
3532  * @state:power state
3533  */
3534 static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
3535 {
3536         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3537
3538         u64_stats_update_begin(&adapter->syncp);
3539         adapter->dev_stats.suspend++;
3540         u64_stats_update_end(&adapter->syncp);
3541
3542         rtnl_lock();
3543         if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
3544                 dev_err(&pdev->dev,
3545                         "ignoring device reset request as the device is being suspended\n");
3546                 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
3547         }
3548         ena_destroy_device(adapter, true);
3549         rtnl_unlock();
3550         return 0;
3551 }
3552
3553 /* ena_resume - PM resume callback
3554  * @pdev: PCI device information struct
3555  *
3556  */
3557 static int ena_resume(struct pci_dev *pdev)
3558 {
3559         struct ena_adapter *adapter = pci_get_drvdata(pdev);
3560         int rc;
3561
3562         u64_stats_update_begin(&adapter->syncp);
3563         adapter->dev_stats.resume++;
3564         u64_stats_update_end(&adapter->syncp);
3565
3566         rtnl_lock();
3567         rc = ena_restore_device(adapter);
3568         rtnl_unlock();
3569         return rc;
3570 }
3571 #endif
3572
3573 static struct pci_driver ena_pci_driver = {
3574         .name           = DRV_MODULE_NAME,
3575         .id_table       = ena_pci_tbl,
3576         .probe          = ena_probe,
3577         .remove         = ena_remove,
3578 #ifdef CONFIG_PM
3579         .suspend    = ena_suspend,
3580         .resume     = ena_resume,
3581 #endif
3582         .sriov_configure = pci_sriov_configure_simple,
3583 };
3584
3585 static int __init ena_init(void)
3586 {
3587         pr_info("%s", version);
3588
3589         ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME);
3590         if (!ena_wq) {
3591                 pr_err("Failed to create workqueue\n");
3592                 return -ENOMEM;
3593         }
3594
3595         return pci_register_driver(&ena_pci_driver);
3596 }
3597
3598 static void __exit ena_cleanup(void)
3599 {
3600         pci_unregister_driver(&ena_pci_driver);
3601
3602         if (ena_wq) {
3603                 destroy_workqueue(ena_wq);
3604                 ena_wq = NULL;
3605         }
3606 }
3607
3608 /******************************************************************************
3609  ******************************** AENQ Handlers *******************************
3610  *****************************************************************************/
3611 /* ena_update_on_link_change:
3612  * Notify the network interface about the change in link status
3613  */
3614 static void ena_update_on_link_change(void *adapter_data,
3615                                       struct ena_admin_aenq_entry *aenq_e)
3616 {
3617         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3618         struct ena_admin_aenq_link_change_desc *aenq_desc =
3619                 (struct ena_admin_aenq_link_change_desc *)aenq_e;
3620         int status = aenq_desc->flags &
3621                 ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
3622
3623         if (status) {
3624                 netdev_dbg(adapter->netdev, "%s\n", __func__);
3625                 set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3626                 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
3627                         netif_carrier_on(adapter->netdev);
3628         } else {
3629                 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
3630                 netif_carrier_off(adapter->netdev);
3631         }
3632 }
3633
3634 static void ena_keep_alive_wd(void *adapter_data,
3635                               struct ena_admin_aenq_entry *aenq_e)
3636 {
3637         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3638         struct ena_admin_aenq_keep_alive_desc *desc;
3639         u64 rx_drops;
3640
3641         desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
3642         adapter->last_keep_alive_jiffies = jiffies;
3643
3644         rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
3645
3646         u64_stats_update_begin(&adapter->syncp);
3647         adapter->dev_stats.rx_drops = rx_drops;
3648         u64_stats_update_end(&adapter->syncp);
3649 }
3650
3651 static void ena_notification(void *adapter_data,
3652                              struct ena_admin_aenq_entry *aenq_e)
3653 {
3654         struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
3655         struct ena_admin_ena_hw_hints *hints;
3656
3657         WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
3658              "Invalid group(%x) expected %x\n",
3659              aenq_e->aenq_common_desc.group,
3660              ENA_ADMIN_NOTIFICATION);
3661
3662         switch (aenq_e->aenq_common_desc.syndrom) {
3663         case ENA_ADMIN_UPDATE_HINTS:
3664                 hints = (struct ena_admin_ena_hw_hints *)
3665                         (&aenq_e->inline_data_w4);
3666                 ena_update_hints(adapter, hints);
3667                 break;
3668         default:
3669                 netif_err(adapter, drv, adapter->netdev,
3670                           "Invalid aenq notification link state %d\n",
3671                           aenq_e->aenq_common_desc.syndrom);
3672         }
3673 }
3674
3675 /* This handler will called for unknown event group or unimplemented handlers*/
3676 static void unimplemented_aenq_handler(void *data,
3677                                        struct ena_admin_aenq_entry *aenq_e)
3678 {
3679         struct ena_adapter *adapter = (struct ena_adapter *)data;
3680
3681         netif_err(adapter, drv, adapter->netdev,
3682                   "Unknown event was received or event with unimplemented handler\n");
3683 }
3684
3685 static struct ena_aenq_handlers aenq_handlers = {
3686         .handlers = {
3687                 [ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
3688                 [ENA_ADMIN_NOTIFICATION] = ena_notification,
3689                 [ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
3690         },
3691         .unimplemented_handler = unimplemented_aenq_handler
3692 };
3693
3694 module_init(ena_init);
3695 module_exit(ena_cleanup);