futex: Remove unused or redundant includes
[linux-2.6-microblaze.git] / drivers / net / ethernet / pensando / ionic / ionic_lif.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19
20 /* queuetype support level */
21 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
22         [IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
23         [IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
24         [IONIC_QTYPE_RXQ]     = 0,   /* 0 = Base version with CQ+SG support */
25         [IONIC_QTYPE_TXQ]     = 1,   /* 0 = Base version with CQ+SG support
26                                       * 1 =   ... with Tx SG version 1
27                                       */
28 };
29
30 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
31 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
32 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
33 static void ionic_link_status_check(struct ionic_lif *lif);
34 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
35 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
36 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
37
38 static int ionic_start_queues(struct ionic_lif *lif);
39 static void ionic_stop_queues(struct ionic_lif *lif);
40 static void ionic_lif_queue_identify(struct ionic_lif *lif);
41
42 static void ionic_lif_deferred_work(struct work_struct *work)
43 {
44         struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
45         struct ionic_deferred *def = &lif->deferred;
46         struct ionic_deferred_work *w = NULL;
47
48         spin_lock_bh(&def->lock);
49         if (!list_empty(&def->list)) {
50                 w = list_first_entry(&def->list,
51                                      struct ionic_deferred_work, list);
52                 list_del(&w->list);
53         }
54         spin_unlock_bh(&def->lock);
55
56         if (w) {
57                 switch (w->type) {
58                 case IONIC_DW_TYPE_RX_MODE:
59                         ionic_lif_rx_mode(lif, w->rx_mode);
60                         break;
61                 case IONIC_DW_TYPE_RX_ADDR_ADD:
62                         ionic_lif_addr_add(lif, w->addr);
63                         break;
64                 case IONIC_DW_TYPE_RX_ADDR_DEL:
65                         ionic_lif_addr_del(lif, w->addr);
66                         break;
67                 case IONIC_DW_TYPE_LINK_STATUS:
68                         ionic_link_status_check(lif);
69                         break;
70                 case IONIC_DW_TYPE_LIF_RESET:
71                         if (w->fw_status)
72                                 ionic_lif_handle_fw_up(lif);
73                         else
74                                 ionic_lif_handle_fw_down(lif);
75                         break;
76                 default:
77                         break;
78                 }
79                 kfree(w);
80                 schedule_work(&def->work);
81         }
82 }
83
84 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
85                                 struct ionic_deferred_work *work)
86 {
87         spin_lock_bh(&def->lock);
88         list_add_tail(&work->list, &def->list);
89         spin_unlock_bh(&def->lock);
90         schedule_work(&def->work);
91 }
92
93 static void ionic_link_status_check(struct ionic_lif *lif)
94 {
95         struct net_device *netdev = lif->netdev;
96         u16 link_status;
97         bool link_up;
98
99         if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
100             test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
101                 return;
102
103         link_status = le16_to_cpu(lif->info->status.link_status);
104         link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
105
106         if (link_up) {
107                 if (!netif_carrier_ok(netdev)) {
108                         u32 link_speed;
109
110                         ionic_port_identify(lif->ionic);
111                         link_speed = le32_to_cpu(lif->info->status.link_speed);
112                         netdev_info(netdev, "Link up - %d Gbps\n",
113                                     link_speed / 1000);
114                         netif_carrier_on(netdev);
115                 }
116
117                 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
118                         ionic_start_queues(lif);
119         } else {
120                 if (netif_carrier_ok(netdev)) {
121                         netdev_info(netdev, "Link down\n");
122                         netif_carrier_off(netdev);
123                 }
124
125                 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
126                         ionic_stop_queues(lif);
127         }
128
129         clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
130 }
131
132 void ionic_link_status_check_request(struct ionic_lif *lif)
133 {
134         struct ionic_deferred_work *work;
135
136         /* we only need one request outstanding at a time */
137         if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
138                 return;
139
140         if (in_interrupt()) {
141                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
142                 if (!work)
143                         return;
144
145                 work->type = IONIC_DW_TYPE_LINK_STATUS;
146                 ionic_lif_deferred_enqueue(&lif->deferred, work);
147         } else {
148                 ionic_link_status_check(lif);
149         }
150 }
151
152 static irqreturn_t ionic_isr(int irq, void *data)
153 {
154         struct napi_struct *napi = data;
155
156         napi_schedule_irqoff(napi);
157
158         return IRQ_HANDLED;
159 }
160
161 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
162 {
163         struct ionic_intr_info *intr = &qcq->intr;
164         struct device *dev = lif->ionic->dev;
165         struct ionic_queue *q = &qcq->q;
166         const char *name;
167
168         if (lif->registered)
169                 name = lif->netdev->name;
170         else
171                 name = dev_name(dev);
172
173         snprintf(intr->name, sizeof(intr->name),
174                  "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
175
176         return devm_request_irq(dev, intr->vector, ionic_isr,
177                                 0, intr->name, &qcq->napi);
178 }
179
180 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
181 {
182         struct ionic *ionic = lif->ionic;
183         int index;
184
185         index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
186         if (index == ionic->nintrs) {
187                 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
188                             __func__, index, ionic->nintrs);
189                 return -ENOSPC;
190         }
191
192         set_bit(index, ionic->intrs);
193         ionic_intr_init(&ionic->idev, intr, index);
194
195         return 0;
196 }
197
198 static void ionic_intr_free(struct ionic *ionic, int index)
199 {
200         if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
201                 clear_bit(index, ionic->intrs);
202 }
203
204 static int ionic_qcq_enable(struct ionic_qcq *qcq)
205 {
206         struct ionic_queue *q = &qcq->q;
207         struct ionic_lif *lif = q->lif;
208         struct ionic_dev *idev;
209         struct device *dev;
210
211         struct ionic_admin_ctx ctx = {
212                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
213                 .cmd.q_control = {
214                         .opcode = IONIC_CMD_Q_CONTROL,
215                         .lif_index = cpu_to_le16(lif->index),
216                         .type = q->type,
217                         .index = cpu_to_le32(q->index),
218                         .oper = IONIC_Q_ENABLE,
219                 },
220         };
221
222         idev = &lif->ionic->idev;
223         dev = lif->ionic->dev;
224
225         dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
226                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
227
228         if (qcq->flags & IONIC_QCQ_F_INTR) {
229                 irq_set_affinity_hint(qcq->intr.vector,
230                                       &qcq->intr.affinity_mask);
231                 napi_enable(&qcq->napi);
232                 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
233                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
234                                 IONIC_INTR_MASK_CLEAR);
235         }
236
237         return ionic_adminq_post_wait(lif, &ctx);
238 }
239
240 static int ionic_qcq_disable(struct ionic_qcq *qcq)
241 {
242         struct ionic_queue *q = &qcq->q;
243         struct ionic_lif *lif = q->lif;
244         struct ionic_dev *idev;
245         struct device *dev;
246
247         struct ionic_admin_ctx ctx = {
248                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
249                 .cmd.q_control = {
250                         .opcode = IONIC_CMD_Q_CONTROL,
251                         .lif_index = cpu_to_le16(lif->index),
252                         .type = q->type,
253                         .index = cpu_to_le32(q->index),
254                         .oper = IONIC_Q_DISABLE,
255                 },
256         };
257
258         idev = &lif->ionic->idev;
259         dev = lif->ionic->dev;
260
261         dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
262                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
263
264         if (qcq->flags & IONIC_QCQ_F_INTR) {
265                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
266                                 IONIC_INTR_MASK_SET);
267                 synchronize_irq(qcq->intr.vector);
268                 irq_set_affinity_hint(qcq->intr.vector, NULL);
269                 napi_disable(&qcq->napi);
270         }
271
272         return ionic_adminq_post_wait(lif, &ctx);
273 }
274
275 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
276 {
277         struct ionic_dev *idev = &lif->ionic->idev;
278
279         if (!qcq)
280                 return;
281
282         if (!(qcq->flags & IONIC_QCQ_F_INITED))
283                 return;
284
285         if (qcq->flags & IONIC_QCQ_F_INTR) {
286                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
287                                 IONIC_INTR_MASK_SET);
288                 netif_napi_del(&qcq->napi);
289         }
290
291         qcq->flags &= ~IONIC_QCQ_F_INITED;
292 }
293
294 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
295 {
296         struct device *dev = lif->ionic->dev;
297
298         if (!qcq)
299                 return;
300
301         ionic_debugfs_del_qcq(qcq);
302
303         dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
304         qcq->base = NULL;
305         qcq->base_pa = 0;
306
307         if (qcq->flags & IONIC_QCQ_F_INTR) {
308                 irq_set_affinity_hint(qcq->intr.vector, NULL);
309                 devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
310                 qcq->intr.vector = 0;
311                 ionic_intr_free(lif->ionic, qcq->intr.index);
312         }
313
314         devm_kfree(dev, qcq->cq.info);
315         qcq->cq.info = NULL;
316         devm_kfree(dev, qcq->q.info);
317         qcq->q.info = NULL;
318         devm_kfree(dev, qcq);
319 }
320
321 static void ionic_qcqs_free(struct ionic_lif *lif)
322 {
323         struct device *dev = lif->ionic->dev;
324         unsigned int i;
325
326         if (lif->notifyqcq) {
327                 ionic_qcq_free(lif, lif->notifyqcq);
328                 lif->notifyqcq = NULL;
329         }
330
331         if (lif->adminqcq) {
332                 ionic_qcq_free(lif, lif->adminqcq);
333                 lif->adminqcq = NULL;
334         }
335
336         if (lif->rxqcqs) {
337                 for (i = 0; i < lif->nxqs; i++)
338                         if (lif->rxqcqs[i].stats)
339                                 devm_kfree(dev, lif->rxqcqs[i].stats);
340                 devm_kfree(dev, lif->rxqcqs);
341                 lif->rxqcqs = NULL;
342         }
343
344         if (lif->txqcqs) {
345                 for (i = 0; i < lif->nxqs; i++)
346                         if (lif->txqcqs[i].stats)
347                                 devm_kfree(dev, lif->txqcqs[i].stats);
348                 devm_kfree(dev, lif->txqcqs);
349                 lif->txqcqs = NULL;
350         }
351 }
352
353 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
354                                       struct ionic_qcq *n_qcq)
355 {
356         if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
357                 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
358                 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
359         }
360
361         n_qcq->intr.vector = src_qcq->intr.vector;
362         n_qcq->intr.index = src_qcq->intr.index;
363 }
364
365 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
366                            unsigned int index,
367                            const char *name, unsigned int flags,
368                            unsigned int num_descs, unsigned int desc_size,
369                            unsigned int cq_desc_size,
370                            unsigned int sg_desc_size,
371                            unsigned int pid, struct ionic_qcq **qcq)
372 {
373         struct ionic_dev *idev = &lif->ionic->idev;
374         u32 q_size, cq_size, sg_size, total_size;
375         struct device *dev = lif->ionic->dev;
376         void *q_base, *cq_base, *sg_base;
377         dma_addr_t cq_base_pa = 0;
378         dma_addr_t sg_base_pa = 0;
379         dma_addr_t q_base_pa = 0;
380         struct ionic_qcq *new;
381         int err;
382
383         *qcq = NULL;
384
385         q_size  = num_descs * desc_size;
386         cq_size = num_descs * cq_desc_size;
387         sg_size = num_descs * sg_desc_size;
388
389         total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
390         /* Note: aligning q_size/cq_size is not enough due to cq_base
391          * address aligning as q_base could be not aligned to the page.
392          * Adding PAGE_SIZE.
393          */
394         total_size += PAGE_SIZE;
395         if (flags & IONIC_QCQ_F_SG) {
396                 total_size += ALIGN(sg_size, PAGE_SIZE);
397                 total_size += PAGE_SIZE;
398         }
399
400         new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
401         if (!new) {
402                 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
403                 err = -ENOMEM;
404                 goto err_out;
405         }
406
407         new->flags = flags;
408
409         new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
410                                    GFP_KERNEL);
411         if (!new->q.info) {
412                 netdev_err(lif->netdev, "Cannot allocate queue info\n");
413                 err = -ENOMEM;
414                 goto err_out;
415         }
416
417         new->q.type = type;
418
419         err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
420                            desc_size, sg_desc_size, pid);
421         if (err) {
422                 netdev_err(lif->netdev, "Cannot initialize queue\n");
423                 goto err_out;
424         }
425
426         if (flags & IONIC_QCQ_F_INTR) {
427                 err = ionic_intr_alloc(lif, &new->intr);
428                 if (err) {
429                         netdev_warn(lif->netdev, "no intr for %s: %d\n",
430                                     name, err);
431                         goto err_out;
432                 }
433
434                 err = ionic_bus_get_irq(lif->ionic, new->intr.index);
435                 if (err < 0) {
436                         netdev_warn(lif->netdev, "no vector for %s: %d\n",
437                                     name, err);
438                         goto err_out_free_intr;
439                 }
440                 new->intr.vector = err;
441                 ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
442                                        IONIC_INTR_MASK_SET);
443
444                 err = ionic_request_irq(lif, new);
445                 if (err) {
446                         netdev_warn(lif->netdev, "irq request failed %d\n", err);
447                         goto err_out_free_intr;
448                 }
449
450                 new->intr.cpu = cpumask_local_spread(new->intr.index,
451                                                      dev_to_node(dev));
452                 if (new->intr.cpu != -1)
453                         cpumask_set_cpu(new->intr.cpu,
454                                         &new->intr.affinity_mask);
455         } else {
456                 new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
457         }
458
459         new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
460                                     GFP_KERNEL);
461         if (!new->cq.info) {
462                 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
463                 err = -ENOMEM;
464                 goto err_out_free_irq;
465         }
466
467         err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
468         if (err) {
469                 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
470                 goto err_out_free_irq;
471         }
472
473         new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
474                                        GFP_KERNEL);
475         if (!new->base) {
476                 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
477                 err = -ENOMEM;
478                 goto err_out_free_irq;
479         }
480
481         new->total_size = total_size;
482
483         q_base = new->base;
484         q_base_pa = new->base_pa;
485
486         cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
487         cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
488
489         if (flags & IONIC_QCQ_F_SG) {
490                 sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
491                                         PAGE_SIZE);
492                 sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
493                 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
494         }
495
496         ionic_q_map(&new->q, q_base, q_base_pa);
497         ionic_cq_map(&new->cq, cq_base, cq_base_pa);
498         ionic_cq_bind(&new->cq, &new->q);
499
500         *qcq = new;
501
502         return 0;
503
504 err_out_free_irq:
505         if (flags & IONIC_QCQ_F_INTR)
506                 devm_free_irq(dev, new->intr.vector, &new->napi);
507 err_out_free_intr:
508         if (flags & IONIC_QCQ_F_INTR)
509                 ionic_intr_free(lif->ionic, new->intr.index);
510 err_out:
511         dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
512         return err;
513 }
514
515 static int ionic_qcqs_alloc(struct ionic_lif *lif)
516 {
517         struct device *dev = lif->ionic->dev;
518         unsigned int q_list_size;
519         unsigned int flags;
520         int err;
521         int i;
522
523         flags = IONIC_QCQ_F_INTR;
524         err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
525                               IONIC_ADMINQ_LENGTH,
526                               sizeof(struct ionic_admin_cmd),
527                               sizeof(struct ionic_admin_comp),
528                               0, lif->kern_pid, &lif->adminqcq);
529         if (err)
530                 return err;
531         ionic_debugfs_add_qcq(lif, lif->adminqcq);
532
533         if (lif->ionic->nnqs_per_lif) {
534                 flags = IONIC_QCQ_F_NOTIFYQ;
535                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
536                                       flags, IONIC_NOTIFYQ_LENGTH,
537                                       sizeof(struct ionic_notifyq_cmd),
538                                       sizeof(union ionic_notifyq_comp),
539                                       0, lif->kern_pid, &lif->notifyqcq);
540                 if (err)
541                         goto err_out_free_adminqcq;
542                 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
543
544                 /* Let the notifyq ride on the adminq interrupt */
545                 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
546         }
547
548         q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
549         err = -ENOMEM;
550         lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
551         if (!lif->txqcqs)
552                 goto err_out_free_notifyqcq;
553         for (i = 0; i < lif->nxqs; i++) {
554                 lif->txqcqs[i].stats = devm_kzalloc(dev,
555                                                     sizeof(struct ionic_q_stats),
556                                                     GFP_KERNEL);
557                 if (!lif->txqcqs[i].stats)
558                         goto err_out_free_tx_stats;
559         }
560
561         lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
562         if (!lif->rxqcqs)
563                 goto err_out_free_tx_stats;
564         for (i = 0; i < lif->nxqs; i++) {
565                 lif->rxqcqs[i].stats = devm_kzalloc(dev,
566                                                     sizeof(struct ionic_q_stats),
567                                                     GFP_KERNEL);
568                 if (!lif->rxqcqs[i].stats)
569                         goto err_out_free_rx_stats;
570         }
571
572         return 0;
573
574 err_out_free_rx_stats:
575         for (i = 0; i < lif->nxqs; i++)
576                 if (lif->rxqcqs[i].stats)
577                         devm_kfree(dev, lif->rxqcqs[i].stats);
578         devm_kfree(dev, lif->rxqcqs);
579         lif->rxqcqs = NULL;
580 err_out_free_tx_stats:
581         for (i = 0; i < lif->nxqs; i++)
582                 if (lif->txqcqs[i].stats)
583                         devm_kfree(dev, lif->txqcqs[i].stats);
584         devm_kfree(dev, lif->txqcqs);
585         lif->txqcqs = NULL;
586 err_out_free_notifyqcq:
587         if (lif->notifyqcq) {
588                 ionic_qcq_free(lif, lif->notifyqcq);
589                 lif->notifyqcq = NULL;
590         }
591 err_out_free_adminqcq:
592         ionic_qcq_free(lif, lif->adminqcq);
593         lif->adminqcq = NULL;
594
595         return err;
596 }
597
598 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
599 {
600         struct device *dev = lif->ionic->dev;
601         struct ionic_queue *q = &qcq->q;
602         struct ionic_cq *cq = &qcq->cq;
603         struct ionic_admin_ctx ctx = {
604                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
605                 .cmd.q_init = {
606                         .opcode = IONIC_CMD_Q_INIT,
607                         .lif_index = cpu_to_le16(lif->index),
608                         .type = q->type,
609                         .ver = lif->qtype_info[q->type].version,
610                         .index = cpu_to_le32(q->index),
611                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
612                                              IONIC_QINIT_F_SG),
613                         .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
614                         .pid = cpu_to_le16(q->pid),
615                         .ring_size = ilog2(q->num_descs),
616                         .ring_base = cpu_to_le64(q->base_pa),
617                         .cq_ring_base = cpu_to_le64(cq->base_pa),
618                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
619                 },
620         };
621         int err;
622
623         dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
624         dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
625         dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
626         dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
627         dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
628         dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
629
630         q->tail = q->info;
631         q->head = q->tail;
632         cq->tail = cq->info;
633
634         err = ionic_adminq_post_wait(lif, &ctx);
635         if (err)
636                 return err;
637
638         q->hw_type = ctx.comp.q_init.hw_type;
639         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
640         q->dbval = IONIC_DBELL_QID(q->hw_index);
641
642         dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
643         dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
644
645         qcq->flags |= IONIC_QCQ_F_INITED;
646
647         return 0;
648 }
649
650 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
651 {
652         struct device *dev = lif->ionic->dev;
653         struct ionic_queue *q = &qcq->q;
654         struct ionic_cq *cq = &qcq->cq;
655         struct ionic_admin_ctx ctx = {
656                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
657                 .cmd.q_init = {
658                         .opcode = IONIC_CMD_Q_INIT,
659                         .lif_index = cpu_to_le16(lif->index),
660                         .type = q->type,
661                         .ver = lif->qtype_info[q->type].version,
662                         .index = cpu_to_le32(q->index),
663                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
664                                              IONIC_QINIT_F_SG),
665                         .intr_index = cpu_to_le16(cq->bound_intr->index),
666                         .pid = cpu_to_le16(q->pid),
667                         .ring_size = ilog2(q->num_descs),
668                         .ring_base = cpu_to_le64(q->base_pa),
669                         .cq_ring_base = cpu_to_le64(cq->base_pa),
670                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
671                 },
672         };
673         int err;
674
675         dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
676         dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
677         dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
678         dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
679         dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
680         dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
681
682         q->tail = q->info;
683         q->head = q->tail;
684         cq->tail = cq->info;
685
686         err = ionic_adminq_post_wait(lif, &ctx);
687         if (err)
688                 return err;
689
690         q->hw_type = ctx.comp.q_init.hw_type;
691         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
692         q->dbval = IONIC_DBELL_QID(q->hw_index);
693
694         dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
695         dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
696
697         netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
698                        NAPI_POLL_WEIGHT);
699
700         qcq->flags |= IONIC_QCQ_F_INITED;
701
702         return 0;
703 }
704
705 static bool ionic_notifyq_service(struct ionic_cq *cq,
706                                   struct ionic_cq_info *cq_info)
707 {
708         union ionic_notifyq_comp *comp = cq_info->cq_desc;
709         struct ionic_deferred_work *work;
710         struct net_device *netdev;
711         struct ionic_queue *q;
712         struct ionic_lif *lif;
713         u64 eid;
714
715         q = cq->bound_q;
716         lif = q->info[0].cb_arg;
717         netdev = lif->netdev;
718         eid = le64_to_cpu(comp->event.eid);
719
720         /* Have we run out of new completions to process? */
721         if (eid <= lif->last_eid)
722                 return false;
723
724         lif->last_eid = eid;
725
726         dev_dbg(lif->ionic->dev, "notifyq event:\n");
727         dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
728                          comp, sizeof(*comp), true);
729
730         switch (le16_to_cpu(comp->event.ecode)) {
731         case IONIC_EVENT_LINK_CHANGE:
732                 ionic_link_status_check_request(lif);
733                 break;
734         case IONIC_EVENT_RESET:
735                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
736                 if (!work) {
737                         netdev_err(lif->netdev, "%s OOM\n", __func__);
738                 } else {
739                         work->type = IONIC_DW_TYPE_LIF_RESET;
740                         ionic_lif_deferred_enqueue(&lif->deferred, work);
741                 }
742                 break;
743         default:
744                 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
745                             comp->event.ecode, eid);
746                 break;
747         }
748
749         return true;
750 }
751
752 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
753 {
754         struct ionic_dev *idev = &lif->ionic->idev;
755         struct ionic_cq *cq = &lif->notifyqcq->cq;
756         u32 work_done;
757
758         work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
759                                      NULL, NULL);
760         if (work_done)
761                 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
762                                    work_done, IONIC_INTR_CRED_RESET_COALESCE);
763
764         return work_done;
765 }
766
767 static bool ionic_adminq_service(struct ionic_cq *cq,
768                                  struct ionic_cq_info *cq_info)
769 {
770         struct ionic_admin_comp *comp = cq_info->cq_desc;
771
772         if (!color_match(comp->color, cq->done_color))
773                 return false;
774
775         ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
776
777         return true;
778 }
779
780 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
781 {
782         struct ionic_lif *lif = napi_to_cq(napi)->lif;
783         int n_work = 0;
784         int a_work = 0;
785
786         if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
787                 n_work = ionic_notifyq_clean(lif, budget);
788         a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
789
790         return max(n_work, a_work);
791 }
792
793 void ionic_get_stats64(struct net_device *netdev,
794                        struct rtnl_link_stats64 *ns)
795 {
796         struct ionic_lif *lif = netdev_priv(netdev);
797         struct ionic_lif_stats *ls;
798
799         memset(ns, 0, sizeof(*ns));
800         ls = &lif->info->stats;
801
802         ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
803                          le64_to_cpu(ls->rx_mcast_packets) +
804                          le64_to_cpu(ls->rx_bcast_packets);
805
806         ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
807                          le64_to_cpu(ls->tx_mcast_packets) +
808                          le64_to_cpu(ls->tx_bcast_packets);
809
810         ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
811                        le64_to_cpu(ls->rx_mcast_bytes) +
812                        le64_to_cpu(ls->rx_bcast_bytes);
813
814         ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
815                        le64_to_cpu(ls->tx_mcast_bytes) +
816                        le64_to_cpu(ls->tx_bcast_bytes);
817
818         ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
819                          le64_to_cpu(ls->rx_mcast_drop_packets) +
820                          le64_to_cpu(ls->rx_bcast_drop_packets);
821
822         ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
823                          le64_to_cpu(ls->tx_mcast_drop_packets) +
824                          le64_to_cpu(ls->tx_bcast_drop_packets);
825
826         ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
827
828         ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
829
830         ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
831                                le64_to_cpu(ls->rx_queue_disabled) +
832                                le64_to_cpu(ls->rx_desc_fetch_error) +
833                                le64_to_cpu(ls->rx_desc_data_error);
834
835         ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
836                                 le64_to_cpu(ls->tx_queue_disabled) +
837                                 le64_to_cpu(ls->tx_desc_fetch_error) +
838                                 le64_to_cpu(ls->tx_desc_data_error);
839
840         ns->rx_errors = ns->rx_over_errors +
841                         ns->rx_missed_errors;
842
843         ns->tx_errors = ns->tx_aborted_errors;
844 }
845
846 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
847 {
848         struct ionic_admin_ctx ctx = {
849                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
850                 .cmd.rx_filter_add = {
851                         .opcode = IONIC_CMD_RX_FILTER_ADD,
852                         .lif_index = cpu_to_le16(lif->index),
853                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
854                 },
855         };
856         struct ionic_rx_filter *f;
857         int err;
858
859         /* don't bother if we already have it */
860         spin_lock_bh(&lif->rx_filters.lock);
861         f = ionic_rx_filter_by_addr(lif, addr);
862         spin_unlock_bh(&lif->rx_filters.lock);
863         if (f)
864                 return 0;
865
866         netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
867                    ctx.comp.rx_filter_add.filter_id);
868
869         memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
870         err = ionic_adminq_post_wait(lif, &ctx);
871         if (err && err != -EEXIST)
872                 return err;
873
874         return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
875 }
876
877 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
878 {
879         struct ionic_admin_ctx ctx = {
880                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
881                 .cmd.rx_filter_del = {
882                         .opcode = IONIC_CMD_RX_FILTER_DEL,
883                         .lif_index = cpu_to_le16(lif->index),
884                 },
885         };
886         struct ionic_rx_filter *f;
887         int err;
888
889         spin_lock_bh(&lif->rx_filters.lock);
890         f = ionic_rx_filter_by_addr(lif, addr);
891         if (!f) {
892                 spin_unlock_bh(&lif->rx_filters.lock);
893                 return -ENOENT;
894         }
895
896         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
897         ionic_rx_filter_free(lif, f);
898         spin_unlock_bh(&lif->rx_filters.lock);
899
900         err = ionic_adminq_post_wait(lif, &ctx);
901         if (err && err != -EEXIST)
902                 return err;
903
904         netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
905                    ctx.cmd.rx_filter_del.filter_id);
906
907         return 0;
908 }
909
910 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
911 {
912         struct ionic *ionic = lif->ionic;
913         struct ionic_deferred_work *work;
914         unsigned int nmfilters;
915         unsigned int nufilters;
916
917         if (add) {
918                 /* Do we have space for this filter?  We test the counters
919                  * here before checking the need for deferral so that we
920                  * can return an overflow error to the stack.
921                  */
922                 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
923                 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
924
925                 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
926                         lif->nmcast++;
927                 else if (!is_multicast_ether_addr(addr) &&
928                          lif->nucast < nufilters)
929                         lif->nucast++;
930                 else
931                         return -ENOSPC;
932         } else {
933                 if (is_multicast_ether_addr(addr) && lif->nmcast)
934                         lif->nmcast--;
935                 else if (!is_multicast_ether_addr(addr) && lif->nucast)
936                         lif->nucast--;
937         }
938
939         if (in_interrupt()) {
940                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
941                 if (!work) {
942                         netdev_err(lif->netdev, "%s OOM\n", __func__);
943                         return -ENOMEM;
944                 }
945                 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
946                                    IONIC_DW_TYPE_RX_ADDR_DEL;
947                 memcpy(work->addr, addr, ETH_ALEN);
948                 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
949                            add ? "add" : "del", addr);
950                 ionic_lif_deferred_enqueue(&lif->deferred, work);
951         } else {
952                 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
953                            add ? "add" : "del", addr);
954                 if (add)
955                         return ionic_lif_addr_add(lif, addr);
956                 else
957                         return ionic_lif_addr_del(lif, addr);
958         }
959
960         return 0;
961 }
962
963 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
964 {
965         return ionic_lif_addr(netdev_priv(netdev), addr, true);
966 }
967
968 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
969 {
970         return ionic_lif_addr(netdev_priv(netdev), addr, false);
971 }
972
973 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
974 {
975         struct ionic_admin_ctx ctx = {
976                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
977                 .cmd.rx_mode_set = {
978                         .opcode = IONIC_CMD_RX_MODE_SET,
979                         .lif_index = cpu_to_le16(lif->index),
980                         .rx_mode = cpu_to_le16(rx_mode),
981                 },
982         };
983         char buf[128];
984         int err;
985         int i;
986 #define REMAIN(__x) (sizeof(buf) - (__x))
987
988         i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
989                       lif->rx_mode, rx_mode);
990         if (rx_mode & IONIC_RX_MODE_F_UNICAST)
991                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
992         if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
993                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
994         if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
995                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
996         if (rx_mode & IONIC_RX_MODE_F_PROMISC)
997                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
998         if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
999                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1000         netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1001
1002         err = ionic_adminq_post_wait(lif, &ctx);
1003         if (err)
1004                 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1005                             rx_mode, err);
1006         else
1007                 lif->rx_mode = rx_mode;
1008 }
1009
1010 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1011 {
1012         struct ionic_deferred_work *work;
1013
1014         if (in_interrupt()) {
1015                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1016                 if (!work) {
1017                         netdev_err(lif->netdev, "%s OOM\n", __func__);
1018                         return;
1019                 }
1020                 work->type = IONIC_DW_TYPE_RX_MODE;
1021                 work->rx_mode = rx_mode;
1022                 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1023                 ionic_lif_deferred_enqueue(&lif->deferred, work);
1024         } else {
1025                 ionic_lif_rx_mode(lif, rx_mode);
1026         }
1027 }
1028
1029 static void ionic_set_rx_mode(struct net_device *netdev)
1030 {
1031         struct ionic_lif *lif = netdev_priv(netdev);
1032         struct ionic_identity *ident;
1033         unsigned int nfilters;
1034         unsigned int rx_mode;
1035
1036         ident = &lif->ionic->ident;
1037
1038         rx_mode = IONIC_RX_MODE_F_UNICAST;
1039         rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1040         rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1041         rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1042         rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1043
1044         /* sync unicast addresses
1045          * next check to see if we're in an overflow state
1046          *    if so, we track that we overflowed and enable NIC PROMISC
1047          *    else if the overflow is set and not needed
1048          *       we remove our overflow flag and check the netdev flags
1049          *       to see if we can disable NIC PROMISC
1050          */
1051         __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1052         nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1053         if (netdev_uc_count(netdev) + 1 > nfilters) {
1054                 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1055                 lif->uc_overflow = true;
1056         } else if (lif->uc_overflow) {
1057                 lif->uc_overflow = false;
1058                 if (!(netdev->flags & IFF_PROMISC))
1059                         rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1060         }
1061
1062         /* same for multicast */
1063         __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1064         nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1065         if (netdev_mc_count(netdev) > nfilters) {
1066                 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1067                 lif->mc_overflow = true;
1068         } else if (lif->mc_overflow) {
1069                 lif->mc_overflow = false;
1070                 if (!(netdev->flags & IFF_ALLMULTI))
1071                         rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1072         }
1073
1074         if (lif->rx_mode != rx_mode)
1075                 _ionic_lif_rx_mode(lif, rx_mode);
1076 }
1077
1078 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1079 {
1080         u64 wanted = 0;
1081
1082         if (features & NETIF_F_HW_VLAN_CTAG_TX)
1083                 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1084         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1085                 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1086         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1087                 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1088         if (features & NETIF_F_RXHASH)
1089                 wanted |= IONIC_ETH_HW_RX_HASH;
1090         if (features & NETIF_F_RXCSUM)
1091                 wanted |= IONIC_ETH_HW_RX_CSUM;
1092         if (features & NETIF_F_SG)
1093                 wanted |= IONIC_ETH_HW_TX_SG;
1094         if (features & NETIF_F_HW_CSUM)
1095                 wanted |= IONIC_ETH_HW_TX_CSUM;
1096         if (features & NETIF_F_TSO)
1097                 wanted |= IONIC_ETH_HW_TSO;
1098         if (features & NETIF_F_TSO6)
1099                 wanted |= IONIC_ETH_HW_TSO_IPV6;
1100         if (features & NETIF_F_TSO_ECN)
1101                 wanted |= IONIC_ETH_HW_TSO_ECN;
1102         if (features & NETIF_F_GSO_GRE)
1103                 wanted |= IONIC_ETH_HW_TSO_GRE;
1104         if (features & NETIF_F_GSO_GRE_CSUM)
1105                 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1106         if (features & NETIF_F_GSO_IPXIP4)
1107                 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1108         if (features & NETIF_F_GSO_IPXIP6)
1109                 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1110         if (features & NETIF_F_GSO_UDP_TUNNEL)
1111                 wanted |= IONIC_ETH_HW_TSO_UDP;
1112         if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1113                 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1114
1115         return cpu_to_le64(wanted);
1116 }
1117
1118 static int ionic_set_nic_features(struct ionic_lif *lif,
1119                                   netdev_features_t features)
1120 {
1121         struct device *dev = lif->ionic->dev;
1122         struct ionic_admin_ctx ctx = {
1123                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1124                 .cmd.lif_setattr = {
1125                         .opcode = IONIC_CMD_LIF_SETATTR,
1126                         .index = cpu_to_le16(lif->index),
1127                         .attr = IONIC_LIF_ATTR_FEATURES,
1128                 },
1129         };
1130         u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1131                          IONIC_ETH_HW_VLAN_RX_STRIP |
1132                          IONIC_ETH_HW_VLAN_RX_FILTER;
1133         u64 old_hw_features;
1134         int err;
1135
1136         ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1137         err = ionic_adminq_post_wait(lif, &ctx);
1138         if (err)
1139                 return err;
1140
1141         old_hw_features = lif->hw_features;
1142         lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1143                                        ctx.comp.lif_setattr.features);
1144
1145         if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1146                 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1147
1148         if ((vlan_flags & features) &&
1149             !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1150                 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1151
1152         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1153                 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1154         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1155                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1156         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1157                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1158         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1159                 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1160         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1161                 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1162         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1163                 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1164         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1165                 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1166         if (lif->hw_features & IONIC_ETH_HW_TSO)
1167                 dev_dbg(dev, "feature ETH_HW_TSO\n");
1168         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1169                 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1170         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1171                 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1172         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1173                 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1174         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1175                 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1176         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1177                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1178         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1179                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1180         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1181                 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1182         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1183                 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1184
1185         return 0;
1186 }
1187
1188 static int ionic_init_nic_features(struct ionic_lif *lif)
1189 {
1190         struct net_device *netdev = lif->netdev;
1191         netdev_features_t features;
1192         int err;
1193
1194         /* set up what we expect to support by default */
1195         features = NETIF_F_HW_VLAN_CTAG_TX |
1196                    NETIF_F_HW_VLAN_CTAG_RX |
1197                    NETIF_F_HW_VLAN_CTAG_FILTER |
1198                    NETIF_F_RXHASH |
1199                    NETIF_F_SG |
1200                    NETIF_F_HW_CSUM |
1201                    NETIF_F_RXCSUM |
1202                    NETIF_F_TSO |
1203                    NETIF_F_TSO6 |
1204                    NETIF_F_TSO_ECN;
1205
1206         err = ionic_set_nic_features(lif, features);
1207         if (err)
1208                 return err;
1209
1210         /* tell the netdev what we actually can support */
1211         netdev->features |= NETIF_F_HIGHDMA;
1212
1213         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1214                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1215         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1216                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1217         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1218                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1219         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1220                 netdev->hw_features |= NETIF_F_RXHASH;
1221         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1222                 netdev->hw_features |= NETIF_F_SG;
1223
1224         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1225                 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1226         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1227                 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1228         if (lif->hw_features & IONIC_ETH_HW_TSO)
1229                 netdev->hw_enc_features |= NETIF_F_TSO;
1230         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1231                 netdev->hw_enc_features |= NETIF_F_TSO6;
1232         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1233                 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1234         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1235                 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1236         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1237                 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1238         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1239                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1240         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1241                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1242         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1243                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1244         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1245                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1246
1247         netdev->hw_features |= netdev->hw_enc_features;
1248         netdev->features |= netdev->hw_features;
1249         netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1250
1251         netdev->priv_flags |= IFF_UNICAST_FLT |
1252                               IFF_LIVE_ADDR_CHANGE;
1253
1254         return 0;
1255 }
1256
1257 static int ionic_set_features(struct net_device *netdev,
1258                               netdev_features_t features)
1259 {
1260         struct ionic_lif *lif = netdev_priv(netdev);
1261         int err;
1262
1263         netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1264                    __func__, (u64)lif->netdev->features, (u64)features);
1265
1266         err = ionic_set_nic_features(lif, features);
1267
1268         return err;
1269 }
1270
1271 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1272 {
1273         struct sockaddr *addr = sa;
1274         u8 *mac;
1275         int err;
1276
1277         mac = (u8 *)addr->sa_data;
1278         if (ether_addr_equal(netdev->dev_addr, mac))
1279                 return 0;
1280
1281         err = eth_prepare_mac_addr_change(netdev, addr);
1282         if (err)
1283                 return err;
1284
1285         if (!is_zero_ether_addr(netdev->dev_addr)) {
1286                 netdev_info(netdev, "deleting mac addr %pM\n",
1287                             netdev->dev_addr);
1288                 ionic_addr_del(netdev, netdev->dev_addr);
1289         }
1290
1291         eth_commit_mac_addr_change(netdev, addr);
1292         netdev_info(netdev, "updating mac addr %pM\n", mac);
1293
1294         return ionic_addr_add(netdev, mac);
1295 }
1296
1297 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1298 {
1299         struct ionic_lif *lif = netdev_priv(netdev);
1300         struct ionic_admin_ctx ctx = {
1301                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1302                 .cmd.lif_setattr = {
1303                         .opcode = IONIC_CMD_LIF_SETATTR,
1304                         .index = cpu_to_le16(lif->index),
1305                         .attr = IONIC_LIF_ATTR_MTU,
1306                         .mtu = cpu_to_le32(new_mtu),
1307                 },
1308         };
1309         int err;
1310
1311         err = ionic_adminq_post_wait(lif, &ctx);
1312         if (err)
1313                 return err;
1314
1315         netdev->mtu = new_mtu;
1316         err = ionic_reset_queues(lif);
1317
1318         return err;
1319 }
1320
1321 static void ionic_tx_timeout_work(struct work_struct *ws)
1322 {
1323         struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1324
1325         netdev_info(lif->netdev, "Tx Timeout recovery\n");
1326
1327         rtnl_lock();
1328         ionic_reset_queues(lif);
1329         rtnl_unlock();
1330 }
1331
1332 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1333 {
1334         struct ionic_lif *lif = netdev_priv(netdev);
1335
1336         schedule_work(&lif->tx_timeout_work);
1337 }
1338
1339 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1340                                  u16 vid)
1341 {
1342         struct ionic_lif *lif = netdev_priv(netdev);
1343         struct ionic_admin_ctx ctx = {
1344                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1345                 .cmd.rx_filter_add = {
1346                         .opcode = IONIC_CMD_RX_FILTER_ADD,
1347                         .lif_index = cpu_to_le16(lif->index),
1348                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1349                         .vlan.vlan = cpu_to_le16(vid),
1350                 },
1351         };
1352         int err;
1353
1354         err = ionic_adminq_post_wait(lif, &ctx);
1355         if (err)
1356                 return err;
1357
1358         netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1359                    ctx.comp.rx_filter_add.filter_id);
1360
1361         return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1362 }
1363
1364 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1365                                   u16 vid)
1366 {
1367         struct ionic_lif *lif = netdev_priv(netdev);
1368         struct ionic_admin_ctx ctx = {
1369                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1370                 .cmd.rx_filter_del = {
1371                         .opcode = IONIC_CMD_RX_FILTER_DEL,
1372                         .lif_index = cpu_to_le16(lif->index),
1373                 },
1374         };
1375         struct ionic_rx_filter *f;
1376
1377         spin_lock_bh(&lif->rx_filters.lock);
1378
1379         f = ionic_rx_filter_by_vlan(lif, vid);
1380         if (!f) {
1381                 spin_unlock_bh(&lif->rx_filters.lock);
1382                 return -ENOENT;
1383         }
1384
1385         netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1386                    le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1387
1388         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1389         ionic_rx_filter_free(lif, f);
1390         spin_unlock_bh(&lif->rx_filters.lock);
1391
1392         return ionic_adminq_post_wait(lif, &ctx);
1393 }
1394
1395 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1396                          const u8 *key, const u32 *indir)
1397 {
1398         struct ionic_admin_ctx ctx = {
1399                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1400                 .cmd.lif_setattr = {
1401                         .opcode = IONIC_CMD_LIF_SETATTR,
1402                         .attr = IONIC_LIF_ATTR_RSS,
1403                         .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1404                 },
1405         };
1406         unsigned int i, tbl_sz;
1407
1408         if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1409                 lif->rss_types = types;
1410                 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1411         }
1412
1413         if (key)
1414                 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1415
1416         if (indir) {
1417                 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1418                 for (i = 0; i < tbl_sz; i++)
1419                         lif->rss_ind_tbl[i] = indir[i];
1420         }
1421
1422         memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1423                IONIC_RSS_HASH_KEY_SIZE);
1424
1425         return ionic_adminq_post_wait(lif, &ctx);
1426 }
1427
1428 static int ionic_lif_rss_init(struct ionic_lif *lif)
1429 {
1430         unsigned int tbl_sz;
1431         unsigned int i;
1432
1433         lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1434                          IONIC_RSS_TYPE_IPV4_TCP |
1435                          IONIC_RSS_TYPE_IPV4_UDP |
1436                          IONIC_RSS_TYPE_IPV6     |
1437                          IONIC_RSS_TYPE_IPV6_TCP |
1438                          IONIC_RSS_TYPE_IPV6_UDP;
1439
1440         /* Fill indirection table with 'default' values */
1441         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1442         for (i = 0; i < tbl_sz; i++)
1443                 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1444
1445         return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1446 }
1447
1448 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1449 {
1450         int tbl_sz;
1451
1452         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1453         memset(lif->rss_ind_tbl, 0, tbl_sz);
1454         memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1455
1456         ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1457 }
1458
1459 static void ionic_txrx_disable(struct ionic_lif *lif)
1460 {
1461         unsigned int i;
1462         int err;
1463
1464         if (lif->txqcqs) {
1465                 for (i = 0; i < lif->nxqs; i++) {
1466                         err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1467                         if (err == -ETIMEDOUT)
1468                                 break;
1469                 }
1470         }
1471
1472         if (lif->rxqcqs) {
1473                 for (i = 0; i < lif->nxqs; i++) {
1474                         err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1475                         if (err == -ETIMEDOUT)
1476                                 break;
1477                 }
1478         }
1479 }
1480
1481 static void ionic_txrx_deinit(struct ionic_lif *lif)
1482 {
1483         unsigned int i;
1484
1485         if (lif->txqcqs) {
1486                 for (i = 0; i < lif->nxqs; i++) {
1487                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1488                         ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1489                         ionic_tx_empty(&lif->txqcqs[i].qcq->q);
1490                 }
1491         }
1492
1493         if (lif->rxqcqs) {
1494                 for (i = 0; i < lif->nxqs; i++) {
1495                         ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1496                         ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1497                         ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1498                 }
1499         }
1500         lif->rx_mode = 0;
1501 }
1502
1503 static void ionic_txrx_free(struct ionic_lif *lif)
1504 {
1505         unsigned int i;
1506
1507         if (lif->txqcqs) {
1508                 for (i = 0; i < lif->nxqs; i++) {
1509                         ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1510                         lif->txqcqs[i].qcq = NULL;
1511                 }
1512         }
1513
1514         if (lif->rxqcqs) {
1515                 for (i = 0; i < lif->nxqs; i++) {
1516                         ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1517                         lif->rxqcqs[i].qcq = NULL;
1518                 }
1519         }
1520 }
1521
1522 static int ionic_txrx_alloc(struct ionic_lif *lif)
1523 {
1524         unsigned int sg_desc_sz;
1525         unsigned int flags;
1526         unsigned int i;
1527         int err = 0;
1528
1529         if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1530             lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1531                                           sizeof(struct ionic_txq_sg_desc_v1))
1532                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1533         else
1534                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1535
1536         flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1537         for (i = 0; i < lif->nxqs; i++) {
1538                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1539                                       lif->ntxq_descs,
1540                                       sizeof(struct ionic_txq_desc),
1541                                       sizeof(struct ionic_txq_comp),
1542                                       sg_desc_sz,
1543                                       lif->kern_pid, &lif->txqcqs[i].qcq);
1544                 if (err)
1545                         goto err_out;
1546
1547                 lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1548                 ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
1549         }
1550
1551         flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1552         for (i = 0; i < lif->nxqs; i++) {
1553                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1554                                       lif->nrxq_descs,
1555                                       sizeof(struct ionic_rxq_desc),
1556                                       sizeof(struct ionic_rxq_comp),
1557                                       sizeof(struct ionic_rxq_sg_desc),
1558                                       lif->kern_pid, &lif->rxqcqs[i].qcq);
1559                 if (err)
1560                         goto err_out;
1561
1562                 lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1563
1564                 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1565                                      lif->rxqcqs[i].qcq->intr.index,
1566                                      lif->rx_coalesce_hw);
1567                 ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1568                                           lif->txqcqs[i].qcq);
1569                 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
1570         }
1571
1572         return 0;
1573
1574 err_out:
1575         ionic_txrx_free(lif);
1576
1577         return err;
1578 }
1579
1580 static int ionic_txrx_init(struct ionic_lif *lif)
1581 {
1582         unsigned int i;
1583         int err;
1584
1585         for (i = 0; i < lif->nxqs; i++) {
1586                 err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1587                 if (err)
1588                         goto err_out;
1589
1590                 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1591                 if (err) {
1592                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1593                         goto err_out;
1594                 }
1595         }
1596
1597         if (lif->netdev->features & NETIF_F_RXHASH)
1598                 ionic_lif_rss_init(lif);
1599
1600         ionic_set_rx_mode(lif->netdev);
1601
1602         return 0;
1603
1604 err_out:
1605         while (i--) {
1606                 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1607                 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1608         }
1609
1610         return err;
1611 }
1612
1613 static int ionic_txrx_enable(struct ionic_lif *lif)
1614 {
1615         int i, err;
1616
1617         for (i = 0; i < lif->nxqs; i++) {
1618                 ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1619                 err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1620                 if (err)
1621                         goto err_out;
1622
1623                 err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1624                 if (err) {
1625                         if (err != -ETIMEDOUT)
1626                                 ionic_qcq_disable(lif->rxqcqs[i].qcq);
1627                         goto err_out;
1628                 }
1629         }
1630
1631         return 0;
1632
1633 err_out:
1634         while (i--) {
1635                 err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1636                 if (err == -ETIMEDOUT)
1637                         break;
1638                 err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1639                 if (err == -ETIMEDOUT)
1640                         break;
1641         }
1642
1643         return err;
1644 }
1645
1646 static int ionic_start_queues(struct ionic_lif *lif)
1647 {
1648         int err;
1649
1650         if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1651                 return 0;
1652
1653         err = ionic_txrx_enable(lif);
1654         if (err) {
1655                 clear_bit(IONIC_LIF_F_UP, lif->state);
1656                 return err;
1657         }
1658         netif_tx_wake_all_queues(lif->netdev);
1659
1660         return 0;
1661 }
1662
1663 int ionic_open(struct net_device *netdev)
1664 {
1665         struct ionic_lif *lif = netdev_priv(netdev);
1666         int err;
1667
1668         err = ionic_txrx_alloc(lif);
1669         if (err)
1670                 return err;
1671
1672         err = ionic_txrx_init(lif);
1673         if (err)
1674                 goto err_out;
1675
1676         /* don't start the queues until we have link */
1677         if (netif_carrier_ok(netdev)) {
1678                 err = ionic_start_queues(lif);
1679                 if (err)
1680                         goto err_txrx_deinit;
1681         }
1682
1683         return 0;
1684
1685 err_txrx_deinit:
1686         ionic_txrx_deinit(lif);
1687 err_out:
1688         ionic_txrx_free(lif);
1689         return err;
1690 }
1691
1692 static void ionic_stop_queues(struct ionic_lif *lif)
1693 {
1694         if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1695                 return;
1696
1697         netif_tx_disable(lif->netdev);
1698         ionic_txrx_disable(lif);
1699 }
1700
1701 int ionic_stop(struct net_device *netdev)
1702 {
1703         struct ionic_lif *lif = netdev_priv(netdev);
1704
1705         if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1706                 return 0;
1707
1708         ionic_stop_queues(lif);
1709         ionic_txrx_deinit(lif);
1710         ionic_txrx_free(lif);
1711
1712         return 0;
1713 }
1714
1715 static int ionic_get_vf_config(struct net_device *netdev,
1716                                int vf, struct ifla_vf_info *ivf)
1717 {
1718         struct ionic_lif *lif = netdev_priv(netdev);
1719         struct ionic *ionic = lif->ionic;
1720         int ret = 0;
1721
1722         if (!netif_device_present(netdev))
1723                 return -EBUSY;
1724
1725         down_read(&ionic->vf_op_lock);
1726
1727         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1728                 ret = -EINVAL;
1729         } else {
1730                 ivf->vf           = vf;
1731                 ivf->vlan         = ionic->vfs[vf].vlanid;
1732                 ivf->qos          = 0;
1733                 ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1734                 ivf->linkstate    = ionic->vfs[vf].linkstate;
1735                 ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1736                 ivf->trusted      = ionic->vfs[vf].trusted;
1737                 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1738         }
1739
1740         up_read(&ionic->vf_op_lock);
1741         return ret;
1742 }
1743
1744 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1745                               struct ifla_vf_stats *vf_stats)
1746 {
1747         struct ionic_lif *lif = netdev_priv(netdev);
1748         struct ionic *ionic = lif->ionic;
1749         struct ionic_lif_stats *vs;
1750         int ret = 0;
1751
1752         if (!netif_device_present(netdev))
1753                 return -EBUSY;
1754
1755         down_read(&ionic->vf_op_lock);
1756
1757         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1758                 ret = -EINVAL;
1759         } else {
1760                 memset(vf_stats, 0, sizeof(*vf_stats));
1761                 vs = &ionic->vfs[vf].stats;
1762
1763                 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1764                 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1765                 vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1766                 vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1767                 vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1768                 vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1769                 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1770                                        le64_to_cpu(vs->rx_mcast_drop_packets) +
1771                                        le64_to_cpu(vs->rx_bcast_drop_packets);
1772                 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1773                                        le64_to_cpu(vs->tx_mcast_drop_packets) +
1774                                        le64_to_cpu(vs->tx_bcast_drop_packets);
1775         }
1776
1777         up_read(&ionic->vf_op_lock);
1778         return ret;
1779 }
1780
1781 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1782 {
1783         struct ionic_lif *lif = netdev_priv(netdev);
1784         struct ionic *ionic = lif->ionic;
1785         int ret;
1786
1787         if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1788                 return -EINVAL;
1789
1790         if (!netif_device_present(netdev))
1791                 return -EBUSY;
1792
1793         down_write(&ionic->vf_op_lock);
1794
1795         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1796                 ret = -EINVAL;
1797         } else {
1798                 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1799                 if (!ret)
1800                         ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1801         }
1802
1803         up_write(&ionic->vf_op_lock);
1804         return ret;
1805 }
1806
1807 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1808                              u8 qos, __be16 proto)
1809 {
1810         struct ionic_lif *lif = netdev_priv(netdev);
1811         struct ionic *ionic = lif->ionic;
1812         int ret;
1813
1814         /* until someday when we support qos */
1815         if (qos)
1816                 return -EINVAL;
1817
1818         if (vlan > 4095)
1819                 return -EINVAL;
1820
1821         if (proto != htons(ETH_P_8021Q))
1822                 return -EPROTONOSUPPORT;
1823
1824         if (!netif_device_present(netdev))
1825                 return -EBUSY;
1826
1827         down_write(&ionic->vf_op_lock);
1828
1829         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1830                 ret = -EINVAL;
1831         } else {
1832                 ret = ionic_set_vf_config(ionic, vf,
1833                                           IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1834                 if (!ret)
1835                         ionic->vfs[vf].vlanid = vlan;
1836         }
1837
1838         up_write(&ionic->vf_op_lock);
1839         return ret;
1840 }
1841
1842 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1843                              int tx_min, int tx_max)
1844 {
1845         struct ionic_lif *lif = netdev_priv(netdev);
1846         struct ionic *ionic = lif->ionic;
1847         int ret;
1848
1849         /* setting the min just seems silly */
1850         if (tx_min)
1851                 return -EINVAL;
1852
1853         if (!netif_device_present(netdev))
1854                 return -EBUSY;
1855
1856         down_write(&ionic->vf_op_lock);
1857
1858         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1859                 ret = -EINVAL;
1860         } else {
1861                 ret = ionic_set_vf_config(ionic, vf,
1862                                           IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1863                 if (!ret)
1864                         lif->ionic->vfs[vf].maxrate = tx_max;
1865         }
1866
1867         up_write(&ionic->vf_op_lock);
1868         return ret;
1869 }
1870
1871 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1872 {
1873         struct ionic_lif *lif = netdev_priv(netdev);
1874         struct ionic *ionic = lif->ionic;
1875         u8 data = set;  /* convert to u8 for config */
1876         int ret;
1877
1878         if (!netif_device_present(netdev))
1879                 return -EBUSY;
1880
1881         down_write(&ionic->vf_op_lock);
1882
1883         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1884                 ret = -EINVAL;
1885         } else {
1886                 ret = ionic_set_vf_config(ionic, vf,
1887                                           IONIC_VF_ATTR_SPOOFCHK, &data);
1888                 if (!ret)
1889                         ionic->vfs[vf].spoofchk = data;
1890         }
1891
1892         up_write(&ionic->vf_op_lock);
1893         return ret;
1894 }
1895
1896 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1897 {
1898         struct ionic_lif *lif = netdev_priv(netdev);
1899         struct ionic *ionic = lif->ionic;
1900         u8 data = set;  /* convert to u8 for config */
1901         int ret;
1902
1903         if (!netif_device_present(netdev))
1904                 return -EBUSY;
1905
1906         down_write(&ionic->vf_op_lock);
1907
1908         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1909                 ret = -EINVAL;
1910         } else {
1911                 ret = ionic_set_vf_config(ionic, vf,
1912                                           IONIC_VF_ATTR_TRUST, &data);
1913                 if (!ret)
1914                         ionic->vfs[vf].trusted = data;
1915         }
1916
1917         up_write(&ionic->vf_op_lock);
1918         return ret;
1919 }
1920
1921 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1922 {
1923         struct ionic_lif *lif = netdev_priv(netdev);
1924         struct ionic *ionic = lif->ionic;
1925         u8 data;
1926         int ret;
1927
1928         switch (set) {
1929         case IFLA_VF_LINK_STATE_ENABLE:
1930                 data = IONIC_VF_LINK_STATUS_UP;
1931                 break;
1932         case IFLA_VF_LINK_STATE_DISABLE:
1933                 data = IONIC_VF_LINK_STATUS_DOWN;
1934                 break;
1935         case IFLA_VF_LINK_STATE_AUTO:
1936                 data = IONIC_VF_LINK_STATUS_AUTO;
1937                 break;
1938         default:
1939                 return -EINVAL;
1940         }
1941
1942         if (!netif_device_present(netdev))
1943                 return -EBUSY;
1944
1945         down_write(&ionic->vf_op_lock);
1946
1947         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1948                 ret = -EINVAL;
1949         } else {
1950                 ret = ionic_set_vf_config(ionic, vf,
1951                                           IONIC_VF_ATTR_LINKSTATE, &data);
1952                 if (!ret)
1953                         ionic->vfs[vf].linkstate = set;
1954         }
1955
1956         up_write(&ionic->vf_op_lock);
1957         return ret;
1958 }
1959
1960 static const struct net_device_ops ionic_netdev_ops = {
1961         .ndo_open               = ionic_open,
1962         .ndo_stop               = ionic_stop,
1963         .ndo_start_xmit         = ionic_start_xmit,
1964         .ndo_get_stats64        = ionic_get_stats64,
1965         .ndo_set_rx_mode        = ionic_set_rx_mode,
1966         .ndo_set_features       = ionic_set_features,
1967         .ndo_set_mac_address    = ionic_set_mac_address,
1968         .ndo_validate_addr      = eth_validate_addr,
1969         .ndo_tx_timeout         = ionic_tx_timeout,
1970         .ndo_change_mtu         = ionic_change_mtu,
1971         .ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1972         .ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1973         .ndo_set_vf_vlan        = ionic_set_vf_vlan,
1974         .ndo_set_vf_trust       = ionic_set_vf_trust,
1975         .ndo_set_vf_mac         = ionic_set_vf_mac,
1976         .ndo_set_vf_rate        = ionic_set_vf_rate,
1977         .ndo_set_vf_spoofchk    = ionic_set_vf_spoofchk,
1978         .ndo_get_vf_config      = ionic_get_vf_config,
1979         .ndo_set_vf_link_state  = ionic_set_vf_link_state,
1980         .ndo_get_vf_stats       = ionic_get_vf_stats,
1981 };
1982
1983 int ionic_reset_queues(struct ionic_lif *lif)
1984 {
1985         bool running;
1986         int err = 0;
1987
1988         err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
1989         if (err)
1990                 return err;
1991
1992         running = netif_running(lif->netdev);
1993         if (running) {
1994                 netif_device_detach(lif->netdev);
1995                 err = ionic_stop(lif->netdev);
1996         }
1997         if (!err && running) {
1998                 ionic_open(lif->netdev);
1999                 netif_device_attach(lif->netdev);
2000         }
2001
2002         clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
2003
2004         return err;
2005 }
2006
2007 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
2008 {
2009         struct device *dev = ionic->dev;
2010         struct net_device *netdev;
2011         struct ionic_lif *lif;
2012         int tbl_sz;
2013         int err;
2014
2015         netdev = alloc_etherdev_mqs(sizeof(*lif),
2016                                     ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2017         if (!netdev) {
2018                 dev_err(dev, "Cannot allocate netdev, aborting\n");
2019                 return ERR_PTR(-ENOMEM);
2020         }
2021
2022         SET_NETDEV_DEV(netdev, dev);
2023
2024         lif = netdev_priv(netdev);
2025         lif->netdev = netdev;
2026         ionic->master_lif = lif;
2027         netdev->netdev_ops = &ionic_netdev_ops;
2028         ionic_ethtool_set_ops(netdev);
2029
2030         netdev->watchdog_timeo = 2 * HZ;
2031         netif_carrier_off(netdev);
2032
2033         netdev->min_mtu = IONIC_MIN_MTU;
2034         netdev->max_mtu = IONIC_MAX_MTU;
2035
2036         lif->neqs = ionic->neqs_per_lif;
2037         lif->nxqs = ionic->ntxqs_per_lif;
2038
2039         lif->ionic = ionic;
2040         lif->index = index;
2041         lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2042         lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2043
2044         /* Convert the default coalesce value to actual hw resolution */
2045         lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2046         lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2047                                                     lif->rx_coalesce_usecs);
2048
2049         snprintf(lif->name, sizeof(lif->name), "lif%u", index);
2050
2051         spin_lock_init(&lif->adminq_lock);
2052
2053         spin_lock_init(&lif->deferred.lock);
2054         INIT_LIST_HEAD(&lif->deferred.list);
2055         INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2056
2057         /* allocate lif info */
2058         lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2059         lif->info = dma_alloc_coherent(dev, lif->info_sz,
2060                                        &lif->info_pa, GFP_KERNEL);
2061         if (!lif->info) {
2062                 dev_err(dev, "Failed to allocate lif info, aborting\n");
2063                 err = -ENOMEM;
2064                 goto err_out_free_netdev;
2065         }
2066
2067         ionic_debugfs_add_lif(lif);
2068
2069         /* allocate queues */
2070         err = ionic_qcqs_alloc(lif);
2071         if (err)
2072                 goto err_out_free_lif_info;
2073
2074         /* allocate rss indirection table */
2075         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2076         lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2077         lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2078                                               &lif->rss_ind_tbl_pa,
2079                                               GFP_KERNEL);
2080
2081         if (!lif->rss_ind_tbl) {
2082                 err = -ENOMEM;
2083                 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2084                 goto err_out_free_qcqs;
2085         }
2086         netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2087
2088         list_add_tail(&lif->list, &ionic->lifs);
2089
2090         return lif;
2091
2092 err_out_free_qcqs:
2093         ionic_qcqs_free(lif);
2094 err_out_free_lif_info:
2095         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2096         lif->info = NULL;
2097         lif->info_pa = 0;
2098 err_out_free_netdev:
2099         free_netdev(lif->netdev);
2100         lif = NULL;
2101
2102         return ERR_PTR(err);
2103 }
2104
2105 int ionic_lifs_alloc(struct ionic *ionic)
2106 {
2107         struct ionic_lif *lif;
2108
2109         INIT_LIST_HEAD(&ionic->lifs);
2110
2111         /* only build the first lif, others are for later features */
2112         set_bit(0, ionic->lifbits);
2113
2114         lif = ionic_lif_alloc(ionic, 0);
2115         if (IS_ERR_OR_NULL(lif)) {
2116                 clear_bit(0, ionic->lifbits);
2117                 return -ENOMEM;
2118         }
2119
2120         lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2121         ionic_lif_queue_identify(lif);
2122
2123         return 0;
2124 }
2125
2126 static void ionic_lif_reset(struct ionic_lif *lif)
2127 {
2128         struct ionic_dev *idev = &lif->ionic->idev;
2129
2130         mutex_lock(&lif->ionic->dev_cmd_lock);
2131         ionic_dev_cmd_lif_reset(idev, lif->index);
2132         ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2133         mutex_unlock(&lif->ionic->dev_cmd_lock);
2134 }
2135
2136 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2137 {
2138         struct ionic *ionic = lif->ionic;
2139
2140         if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2141                 return;
2142
2143         dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2144
2145         netif_device_detach(lif->netdev);
2146
2147         if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2148                 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2149                 ionic_stop_queues(lif);
2150         }
2151
2152         if (netif_running(lif->netdev)) {
2153                 ionic_txrx_deinit(lif);
2154                 ionic_txrx_free(lif);
2155         }
2156         ionic_lifs_deinit(ionic);
2157         ionic_reset(ionic);
2158         ionic_qcqs_free(lif);
2159
2160         dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2161 }
2162
2163 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2164 {
2165         struct ionic *ionic = lif->ionic;
2166         int err;
2167
2168         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2169                 return;
2170
2171         dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2172
2173         ionic_init_devinfo(ionic);
2174         ionic_port_init(ionic);
2175         err = ionic_qcqs_alloc(lif);
2176         if (err)
2177                 goto err_out;
2178
2179         err = ionic_lifs_init(ionic);
2180         if (err)
2181                 goto err_qcqs_free;
2182
2183         if (lif->registered)
2184                 ionic_lif_set_netdev_info(lif);
2185
2186         ionic_rx_filter_replay(lif);
2187
2188         if (netif_running(lif->netdev)) {
2189                 err = ionic_txrx_alloc(lif);
2190                 if (err)
2191                         goto err_lifs_deinit;
2192
2193                 err = ionic_txrx_init(lif);
2194                 if (err)
2195                         goto err_txrx_free;
2196         }
2197
2198         clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2199         ionic_link_status_check_request(lif);
2200         netif_device_attach(lif->netdev);
2201         dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2202
2203         return;
2204
2205 err_txrx_free:
2206         ionic_txrx_free(lif);
2207 err_lifs_deinit:
2208         ionic_lifs_deinit(ionic);
2209 err_qcqs_free:
2210         ionic_qcqs_free(lif);
2211 err_out:
2212         dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2213 }
2214
2215 static void ionic_lif_free(struct ionic_lif *lif)
2216 {
2217         struct device *dev = lif->ionic->dev;
2218
2219         /* free rss indirection table */
2220         dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2221                           lif->rss_ind_tbl_pa);
2222         lif->rss_ind_tbl = NULL;
2223         lif->rss_ind_tbl_pa = 0;
2224
2225         /* free queues */
2226         ionic_qcqs_free(lif);
2227         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2228                 ionic_lif_reset(lif);
2229
2230         /* free lif info */
2231         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2232         lif->info = NULL;
2233         lif->info_pa = 0;
2234
2235         /* unmap doorbell page */
2236         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2237         lif->kern_dbpage = NULL;
2238         kfree(lif->dbid_inuse);
2239         lif->dbid_inuse = NULL;
2240
2241         /* free netdev & lif */
2242         ionic_debugfs_del_lif(lif);
2243         list_del(&lif->list);
2244         free_netdev(lif->netdev);
2245 }
2246
2247 void ionic_lifs_free(struct ionic *ionic)
2248 {
2249         struct list_head *cur, *tmp;
2250         struct ionic_lif *lif;
2251
2252         list_for_each_safe(cur, tmp, &ionic->lifs) {
2253                 lif = list_entry(cur, struct ionic_lif, list);
2254
2255                 ionic_lif_free(lif);
2256         }
2257 }
2258
2259 static void ionic_lif_deinit(struct ionic_lif *lif)
2260 {
2261         if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
2262                 return;
2263
2264         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2265                 cancel_work_sync(&lif->deferred.work);
2266                 cancel_work_sync(&lif->tx_timeout_work);
2267                 ionic_rx_filters_deinit(lif);
2268         }
2269
2270         if (lif->netdev->features & NETIF_F_RXHASH)
2271                 ionic_lif_rss_deinit(lif);
2272
2273         napi_disable(&lif->adminqcq->napi);
2274         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2275         ionic_lif_qcq_deinit(lif, lif->adminqcq);
2276
2277         ionic_lif_reset(lif);
2278 }
2279
2280 void ionic_lifs_deinit(struct ionic *ionic)
2281 {
2282         struct list_head *cur, *tmp;
2283         struct ionic_lif *lif;
2284
2285         list_for_each_safe(cur, tmp, &ionic->lifs) {
2286                 lif = list_entry(cur, struct ionic_lif, list);
2287                 ionic_lif_deinit(lif);
2288         }
2289 }
2290
2291 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2292 {
2293         struct device *dev = lif->ionic->dev;
2294         struct ionic_q_init_comp comp;
2295         struct ionic_dev *idev;
2296         struct ionic_qcq *qcq;
2297         struct ionic_queue *q;
2298         int err;
2299
2300         idev = &lif->ionic->idev;
2301         qcq = lif->adminqcq;
2302         q = &qcq->q;
2303
2304         mutex_lock(&lif->ionic->dev_cmd_lock);
2305         ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2306         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2307         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2308         mutex_unlock(&lif->ionic->dev_cmd_lock);
2309         if (err) {
2310                 netdev_err(lif->netdev, "adminq init failed %d\n", err);
2311                 return err;
2312         }
2313
2314         q->hw_type = comp.hw_type;
2315         q->hw_index = le32_to_cpu(comp.hw_index);
2316         q->dbval = IONIC_DBELL_QID(q->hw_index);
2317
2318         dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2319         dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2320
2321         netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2322                        NAPI_POLL_WEIGHT);
2323
2324         napi_enable(&qcq->napi);
2325
2326         if (qcq->flags & IONIC_QCQ_F_INTR)
2327                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2328                                 IONIC_INTR_MASK_CLEAR);
2329
2330         qcq->flags |= IONIC_QCQ_F_INITED;
2331
2332         return 0;
2333 }
2334
2335 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2336 {
2337         struct ionic_qcq *qcq = lif->notifyqcq;
2338         struct device *dev = lif->ionic->dev;
2339         struct ionic_queue *q = &qcq->q;
2340         int err;
2341
2342         struct ionic_admin_ctx ctx = {
2343                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2344                 .cmd.q_init = {
2345                         .opcode = IONIC_CMD_Q_INIT,
2346                         .lif_index = cpu_to_le16(lif->index),
2347                         .type = q->type,
2348                         .ver = lif->qtype_info[q->type].version,
2349                         .index = cpu_to_le32(q->index),
2350                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2351                                              IONIC_QINIT_F_ENA),
2352                         .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2353                         .pid = cpu_to_le16(q->pid),
2354                         .ring_size = ilog2(q->num_descs),
2355                         .ring_base = cpu_to_le64(q->base_pa),
2356                 }
2357         };
2358
2359         dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2360         dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2361         dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2362         dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2363
2364         err = ionic_adminq_post_wait(lif, &ctx);
2365         if (err)
2366                 return err;
2367
2368         lif->last_eid = 0;
2369         q->hw_type = ctx.comp.q_init.hw_type;
2370         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2371         q->dbval = IONIC_DBELL_QID(q->hw_index);
2372
2373         dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2374         dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2375
2376         /* preset the callback info */
2377         q->info[0].cb_arg = lif;
2378
2379         qcq->flags |= IONIC_QCQ_F_INITED;
2380
2381         return 0;
2382 }
2383
2384 static int ionic_station_set(struct ionic_lif *lif)
2385 {
2386         struct net_device *netdev = lif->netdev;
2387         struct ionic_admin_ctx ctx = {
2388                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2389                 .cmd.lif_getattr = {
2390                         .opcode = IONIC_CMD_LIF_GETATTR,
2391                         .index = cpu_to_le16(lif->index),
2392                         .attr = IONIC_LIF_ATTR_MAC,
2393                 },
2394         };
2395         struct sockaddr addr;
2396         int err;
2397
2398         err = ionic_adminq_post_wait(lif, &ctx);
2399         if (err)
2400                 return err;
2401         netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2402                    ctx.comp.lif_getattr.mac);
2403         if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2404                 return 0;
2405
2406         if (!is_zero_ether_addr(netdev->dev_addr)) {
2407                 /* If the netdev mac is non-zero and doesn't match the default
2408                  * device address, it was set by something earlier and we're
2409                  * likely here again after a fw-upgrade reset.  We need to be
2410                  * sure the netdev mac is in our filter list.
2411                  */
2412                 if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2413                                       netdev->dev_addr))
2414                         ionic_lif_addr(lif, netdev->dev_addr, true);
2415         } else {
2416                 /* Update the netdev mac with the device's mac */
2417                 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2418                 addr.sa_family = AF_INET;
2419                 err = eth_prepare_mac_addr_change(netdev, &addr);
2420                 if (err) {
2421                         netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2422                                     addr.sa_data, err);
2423                         return 0;
2424                 }
2425
2426                 eth_commit_mac_addr_change(netdev, &addr);
2427         }
2428
2429         netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2430                    netdev->dev_addr);
2431         ionic_lif_addr(lif, netdev->dev_addr, true);
2432
2433         return 0;
2434 }
2435
2436 static int ionic_lif_init(struct ionic_lif *lif)
2437 {
2438         struct ionic_dev *idev = &lif->ionic->idev;
2439         struct device *dev = lif->ionic->dev;
2440         struct ionic_lif_init_comp comp;
2441         int dbpage_num;
2442         int err;
2443
2444         mutex_lock(&lif->ionic->dev_cmd_lock);
2445         ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2446         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2447         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2448         mutex_unlock(&lif->ionic->dev_cmd_lock);
2449         if (err)
2450                 return err;
2451
2452         lif->hw_index = le16_to_cpu(comp.hw_index);
2453
2454         /* now that we have the hw_index we can figure out our doorbell page */
2455         lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2456         if (!lif->dbid_count) {
2457                 dev_err(dev, "No doorbell pages, aborting\n");
2458                 return -EINVAL;
2459         }
2460
2461         lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2462         if (!lif->dbid_inuse) {
2463                 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2464                 return -ENOMEM;
2465         }
2466
2467         /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2468         set_bit(0, lif->dbid_inuse);
2469         lif->kern_pid = 0;
2470
2471         dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2472         lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2473         if (!lif->kern_dbpage) {
2474                 dev_err(dev, "Cannot map dbpage, aborting\n");
2475                 err = -ENOMEM;
2476                 goto err_out_free_dbid;
2477         }
2478
2479         err = ionic_lif_adminq_init(lif);
2480         if (err)
2481                 goto err_out_adminq_deinit;
2482
2483         if (lif->ionic->nnqs_per_lif) {
2484                 err = ionic_lif_notifyq_init(lif);
2485                 if (err)
2486                         goto err_out_notifyq_deinit;
2487         }
2488
2489         err = ionic_init_nic_features(lif);
2490         if (err)
2491                 goto err_out_notifyq_deinit;
2492
2493         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2494                 err = ionic_rx_filters_init(lif);
2495                 if (err)
2496                         goto err_out_notifyq_deinit;
2497         }
2498
2499         err = ionic_station_set(lif);
2500         if (err)
2501                 goto err_out_notifyq_deinit;
2502
2503         lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2504
2505         set_bit(IONIC_LIF_F_INITED, lif->state);
2506
2507         INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2508
2509         return 0;
2510
2511 err_out_notifyq_deinit:
2512         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2513 err_out_adminq_deinit:
2514         ionic_lif_qcq_deinit(lif, lif->adminqcq);
2515         ionic_lif_reset(lif);
2516         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2517         lif->kern_dbpage = NULL;
2518 err_out_free_dbid:
2519         kfree(lif->dbid_inuse);
2520         lif->dbid_inuse = NULL;
2521
2522         return err;
2523 }
2524
2525 int ionic_lifs_init(struct ionic *ionic)
2526 {
2527         struct list_head *cur, *tmp;
2528         struct ionic_lif *lif;
2529         int err;
2530
2531         list_for_each_safe(cur, tmp, &ionic->lifs) {
2532                 lif = list_entry(cur, struct ionic_lif, list);
2533                 err = ionic_lif_init(lif);
2534                 if (err)
2535                         return err;
2536         }
2537
2538         return 0;
2539 }
2540
2541 static void ionic_lif_notify_work(struct work_struct *ws)
2542 {
2543 }
2544
2545 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2546 {
2547         struct ionic_admin_ctx ctx = {
2548                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2549                 .cmd.lif_setattr = {
2550                         .opcode = IONIC_CMD_LIF_SETATTR,
2551                         .index = cpu_to_le16(lif->index),
2552                         .attr = IONIC_LIF_ATTR_NAME,
2553                 },
2554         };
2555
2556         strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2557                 sizeof(ctx.cmd.lif_setattr.name));
2558
2559         ionic_adminq_post_wait(lif, &ctx);
2560 }
2561
2562 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2563 {
2564         if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2565                 return NULL;
2566
2567         return netdev_priv(netdev);
2568 }
2569
2570 static int ionic_lif_notify(struct notifier_block *nb,
2571                             unsigned long event, void *info)
2572 {
2573         struct net_device *ndev = netdev_notifier_info_to_dev(info);
2574         struct ionic *ionic = container_of(nb, struct ionic, nb);
2575         struct ionic_lif *lif = ionic_netdev_lif(ndev);
2576
2577         if (!lif || lif->ionic != ionic)
2578                 return NOTIFY_DONE;
2579
2580         switch (event) {
2581         case NETDEV_CHANGENAME:
2582                 ionic_lif_set_netdev_info(lif);
2583                 break;
2584         }
2585
2586         return NOTIFY_DONE;
2587 }
2588
2589 int ionic_lifs_register(struct ionic *ionic)
2590 {
2591         int err;
2592
2593         INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2594
2595         ionic->nb.notifier_call = ionic_lif_notify;
2596
2597         err = register_netdevice_notifier(&ionic->nb);
2598         if (err)
2599                 ionic->nb.notifier_call = NULL;
2600
2601         /* only register LIF0 for now */
2602         err = register_netdev(ionic->master_lif->netdev);
2603         if (err) {
2604                 dev_err(ionic->dev, "Cannot register net device, aborting\n");
2605                 return err;
2606         }
2607         ionic->master_lif->registered = true;
2608
2609         return 0;
2610 }
2611
2612 void ionic_lifs_unregister(struct ionic *ionic)
2613 {
2614         if (ionic->nb.notifier_call) {
2615                 unregister_netdevice_notifier(&ionic->nb);
2616                 cancel_work_sync(&ionic->nb_work);
2617                 ionic->nb.notifier_call = NULL;
2618         }
2619
2620         /* There is only one lif ever registered in the
2621          * current model, so don't bother searching the
2622          * ionic->lif for candidates to unregister
2623          */
2624         if (ionic->master_lif &&
2625             ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2626                 unregister_netdev(ionic->master_lif->netdev);
2627 }
2628
2629 static void ionic_lif_queue_identify(struct ionic_lif *lif)
2630 {
2631         struct ionic *ionic = lif->ionic;
2632         union ionic_q_identity *q_ident;
2633         struct ionic_dev *idev;
2634         int qtype;
2635         int err;
2636
2637         idev = &lif->ionic->idev;
2638         q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
2639
2640         for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
2641                 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
2642
2643                 /* filter out the ones we know about */
2644                 switch (qtype) {
2645                 case IONIC_QTYPE_ADMINQ:
2646                 case IONIC_QTYPE_NOTIFYQ:
2647                 case IONIC_QTYPE_RXQ:
2648                 case IONIC_QTYPE_TXQ:
2649                         break;
2650                 default:
2651                         continue;
2652                 }
2653
2654                 memset(qti, 0, sizeof(*qti));
2655
2656                 mutex_lock(&ionic->dev_cmd_lock);
2657                 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
2658                                              ionic_qtype_versions[qtype]);
2659                 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2660                 if (!err) {
2661                         qti->version   = q_ident->version;
2662                         qti->supported = q_ident->supported;
2663                         qti->features  = le64_to_cpu(q_ident->features);
2664                         qti->desc_sz   = le16_to_cpu(q_ident->desc_sz);
2665                         qti->comp_sz   = le16_to_cpu(q_ident->comp_sz);
2666                         qti->sg_desc_sz   = le16_to_cpu(q_ident->sg_desc_sz);
2667                         qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
2668                         qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
2669                 }
2670                 mutex_unlock(&ionic->dev_cmd_lock);
2671
2672                 if (err == -EINVAL) {
2673                         dev_err(ionic->dev, "qtype %d not supported\n", qtype);
2674                         continue;
2675                 } else if (err == -EIO) {
2676                         dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
2677                         return;
2678                 } else if (err) {
2679                         dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
2680                                 qtype, err);
2681                         return;
2682                 }
2683
2684                 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
2685                         qtype, qti->version);
2686                 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
2687                         qtype, qti->supported);
2688                 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
2689                         qtype, qti->features);
2690                 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
2691                         qtype, qti->desc_sz);
2692                 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
2693                         qtype, qti->comp_sz);
2694                 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
2695                         qtype, qti->sg_desc_sz);
2696                 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
2697                         qtype, qti->max_sg_elems);
2698                 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
2699                         qtype, qti->sg_desc_stride);
2700         }
2701 }
2702
2703 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2704                        union ionic_lif_identity *lid)
2705 {
2706         struct ionic_dev *idev = &ionic->idev;
2707         size_t sz;
2708         int err;
2709
2710         sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2711
2712         mutex_lock(&ionic->dev_cmd_lock);
2713         ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2714         err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2715         memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2716         mutex_unlock(&ionic->dev_cmd_lock);
2717         if (err)
2718                 return (err);
2719
2720         dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2721                 le64_to_cpu(lid->capabilities));
2722
2723         dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2724                 le32_to_cpu(lid->eth.max_ucast_filters));
2725         dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2726                 le32_to_cpu(lid->eth.max_mcast_filters));
2727         dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2728                 le64_to_cpu(lid->eth.config.features));
2729         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2730                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2731         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2732                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2733         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2734                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2735         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2736                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2737         dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2738         dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2739         dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2740                 le32_to_cpu(lid->eth.config.mtu));
2741
2742         return 0;
2743 }
2744
2745 int ionic_lifs_size(struct ionic *ionic)
2746 {
2747         struct ionic_identity *ident = &ionic->ident;
2748         unsigned int nintrs, dev_nintrs;
2749         union ionic_lif_config *lc;
2750         unsigned int ntxqs_per_lif;
2751         unsigned int nrxqs_per_lif;
2752         unsigned int neqs_per_lif;
2753         unsigned int nnqs_per_lif;
2754         unsigned int nxqs, neqs;
2755         unsigned int min_intrs;
2756         int err;
2757
2758         lc = &ident->lif.eth.config;
2759         dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2760         neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2761         nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2762         ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2763         nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2764
2765         nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2766         nxqs = min(nxqs, num_online_cpus());
2767         neqs = min(neqs_per_lif, num_online_cpus());
2768
2769 try_again:
2770         /* interrupt usage:
2771          *    1 for master lif adminq/notifyq
2772          *    1 for each CPU for master lif TxRx queue pairs
2773          *    whatever's left is for RDMA queues
2774          */
2775         nintrs = 1 + nxqs + neqs;
2776         min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2777
2778         if (nintrs > dev_nintrs)
2779                 goto try_fewer;
2780
2781         err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2782         if (err < 0 && err != -ENOSPC) {
2783                 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2784                 return err;
2785         }
2786         if (err == -ENOSPC)
2787                 goto try_fewer;
2788
2789         if (err != nintrs) {
2790                 ionic_bus_free_irq_vectors(ionic);
2791                 goto try_fewer;
2792         }
2793
2794         ionic->nnqs_per_lif = nnqs_per_lif;
2795         ionic->neqs_per_lif = neqs;
2796         ionic->ntxqs_per_lif = nxqs;
2797         ionic->nrxqs_per_lif = nxqs;
2798         ionic->nintrs = nintrs;
2799
2800         ionic_debugfs_add_sizes(ionic);
2801
2802         return 0;
2803
2804 try_fewer:
2805         if (nnqs_per_lif > 1) {
2806                 nnqs_per_lif >>= 1;
2807                 goto try_again;
2808         }
2809         if (neqs > 1) {
2810                 neqs >>= 1;
2811                 goto try_again;
2812         }
2813         if (nxqs > 1) {
2814                 nxqs >>= 1;
2815                 goto try_again;
2816         }
2817         dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2818         return -ENOSPC;
2819 }