ionic: no link check while resetting queues
[linux-2.6-microblaze.git] / drivers / net / ethernet / pensando / ionic / ionic_lif.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19
20 /* queuetype support level */
21 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
22         [IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
23         [IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
24         [IONIC_QTYPE_RXQ]     = 0,   /* 0 = Base version with CQ+SG support */
25         [IONIC_QTYPE_TXQ]     = 1,   /* 0 = Base version with CQ+SG support
26                                       * 1 =   ... with Tx SG version 1
27                                       */
28 };
29
30 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
31 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
32 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
33 static void ionic_link_status_check(struct ionic_lif *lif);
34 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
35 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
36 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
37
38 static int ionic_start_queues(struct ionic_lif *lif);
39 static void ionic_stop_queues(struct ionic_lif *lif);
40 static void ionic_lif_queue_identify(struct ionic_lif *lif);
41
42 static void ionic_lif_deferred_work(struct work_struct *work)
43 {
44         struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
45         struct ionic_deferred *def = &lif->deferred;
46         struct ionic_deferred_work *w = NULL;
47
48         spin_lock_bh(&def->lock);
49         if (!list_empty(&def->list)) {
50                 w = list_first_entry(&def->list,
51                                      struct ionic_deferred_work, list);
52                 list_del(&w->list);
53         }
54         spin_unlock_bh(&def->lock);
55
56         if (w) {
57                 switch (w->type) {
58                 case IONIC_DW_TYPE_RX_MODE:
59                         ionic_lif_rx_mode(lif, w->rx_mode);
60                         break;
61                 case IONIC_DW_TYPE_RX_ADDR_ADD:
62                         ionic_lif_addr_add(lif, w->addr);
63                         break;
64                 case IONIC_DW_TYPE_RX_ADDR_DEL:
65                         ionic_lif_addr_del(lif, w->addr);
66                         break;
67                 case IONIC_DW_TYPE_LINK_STATUS:
68                         ionic_link_status_check(lif);
69                         break;
70                 case IONIC_DW_TYPE_LIF_RESET:
71                         if (w->fw_status)
72                                 ionic_lif_handle_fw_up(lif);
73                         else
74                                 ionic_lif_handle_fw_down(lif);
75                         break;
76                 default:
77                         break;
78                 }
79                 kfree(w);
80                 schedule_work(&def->work);
81         }
82 }
83
84 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
85                                 struct ionic_deferred_work *work)
86 {
87         spin_lock_bh(&def->lock);
88         list_add_tail(&work->list, &def->list);
89         spin_unlock_bh(&def->lock);
90         schedule_work(&def->work);
91 }
92
93 static void ionic_link_status_check(struct ionic_lif *lif)
94 {
95         struct net_device *netdev = lif->netdev;
96         u16 link_status;
97         bool link_up;
98
99         if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
100             test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
101                 return;
102
103         link_status = le16_to_cpu(lif->info->status.link_status);
104         link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
105
106         if (link_up) {
107                 if (!netif_carrier_ok(netdev)) {
108                         u32 link_speed;
109
110                         ionic_port_identify(lif->ionic);
111                         link_speed = le32_to_cpu(lif->info->status.link_speed);
112                         netdev_info(netdev, "Link up - %d Gbps\n",
113                                     link_speed / 1000);
114                         netif_carrier_on(netdev);
115                 }
116
117                 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
118                         ionic_start_queues(lif);
119         } else {
120                 if (netif_carrier_ok(netdev)) {
121                         netdev_info(netdev, "Link down\n");
122                         netif_carrier_off(netdev);
123                 }
124
125                 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
126                         ionic_stop_queues(lif);
127         }
128
129         clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
130 }
131
132 void ionic_link_status_check_request(struct ionic_lif *lif)
133 {
134         struct ionic_deferred_work *work;
135
136         /* we only need one request outstanding at a time */
137         if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
138                 return;
139
140         if (in_interrupt()) {
141                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
142                 if (!work)
143                         return;
144
145                 work->type = IONIC_DW_TYPE_LINK_STATUS;
146                 ionic_lif_deferred_enqueue(&lif->deferred, work);
147         } else {
148                 ionic_link_status_check(lif);
149         }
150 }
151
152 static irqreturn_t ionic_isr(int irq, void *data)
153 {
154         struct napi_struct *napi = data;
155
156         napi_schedule_irqoff(napi);
157
158         return IRQ_HANDLED;
159 }
160
161 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
162 {
163         struct ionic_intr_info *intr = &qcq->intr;
164         struct device *dev = lif->ionic->dev;
165         struct ionic_queue *q = &qcq->q;
166         const char *name;
167
168         if (lif->registered)
169                 name = lif->netdev->name;
170         else
171                 name = dev_name(dev);
172
173         snprintf(intr->name, sizeof(intr->name),
174                  "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
175
176         return devm_request_irq(dev, intr->vector, ionic_isr,
177                                 0, intr->name, &qcq->napi);
178 }
179
180 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
181 {
182         struct ionic *ionic = lif->ionic;
183         int index;
184
185         index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
186         if (index == ionic->nintrs) {
187                 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
188                             __func__, index, ionic->nintrs);
189                 return -ENOSPC;
190         }
191
192         set_bit(index, ionic->intrs);
193         ionic_intr_init(&ionic->idev, intr, index);
194
195         return 0;
196 }
197
198 static void ionic_intr_free(struct ionic *ionic, int index)
199 {
200         if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
201                 clear_bit(index, ionic->intrs);
202 }
203
204 static int ionic_qcq_enable(struct ionic_qcq *qcq)
205 {
206         struct ionic_queue *q = &qcq->q;
207         struct ionic_lif *lif = q->lif;
208         struct ionic_dev *idev;
209         struct device *dev;
210
211         struct ionic_admin_ctx ctx = {
212                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
213                 .cmd.q_control = {
214                         .opcode = IONIC_CMD_Q_CONTROL,
215                         .lif_index = cpu_to_le16(lif->index),
216                         .type = q->type,
217                         .index = cpu_to_le32(q->index),
218                         .oper = IONIC_Q_ENABLE,
219                 },
220         };
221
222         idev = &lif->ionic->idev;
223         dev = lif->ionic->dev;
224
225         dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
226                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
227
228         if (qcq->flags & IONIC_QCQ_F_INTR) {
229                 irq_set_affinity_hint(qcq->intr.vector,
230                                       &qcq->intr.affinity_mask);
231                 napi_enable(&qcq->napi);
232                 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
233                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
234                                 IONIC_INTR_MASK_CLEAR);
235         }
236
237         return ionic_adminq_post_wait(lif, &ctx);
238 }
239
240 static int ionic_qcq_disable(struct ionic_qcq *qcq)
241 {
242         struct ionic_queue *q = &qcq->q;
243         struct ionic_lif *lif = q->lif;
244         struct ionic_dev *idev;
245         struct device *dev;
246
247         struct ionic_admin_ctx ctx = {
248                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
249                 .cmd.q_control = {
250                         .opcode = IONIC_CMD_Q_CONTROL,
251                         .lif_index = cpu_to_le16(lif->index),
252                         .type = q->type,
253                         .index = cpu_to_le32(q->index),
254                         .oper = IONIC_Q_DISABLE,
255                 },
256         };
257
258         idev = &lif->ionic->idev;
259         dev = lif->ionic->dev;
260
261         dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
262                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
263
264         if (qcq->flags & IONIC_QCQ_F_INTR) {
265                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
266                                 IONIC_INTR_MASK_SET);
267                 synchronize_irq(qcq->intr.vector);
268                 irq_set_affinity_hint(qcq->intr.vector, NULL);
269                 napi_disable(&qcq->napi);
270         }
271
272         return ionic_adminq_post_wait(lif, &ctx);
273 }
274
275 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
276 {
277         struct ionic_dev *idev = &lif->ionic->idev;
278
279         if (!qcq)
280                 return;
281
282         if (!(qcq->flags & IONIC_QCQ_F_INITED))
283                 return;
284
285         if (qcq->flags & IONIC_QCQ_F_INTR) {
286                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
287                                 IONIC_INTR_MASK_SET);
288                 netif_napi_del(&qcq->napi);
289         }
290
291         qcq->flags &= ~IONIC_QCQ_F_INITED;
292 }
293
294 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
295 {
296         struct device *dev = lif->ionic->dev;
297
298         if (!qcq)
299                 return;
300
301         ionic_debugfs_del_qcq(qcq);
302
303         dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
304         qcq->base = NULL;
305         qcq->base_pa = 0;
306
307         if (qcq->flags & IONIC_QCQ_F_INTR) {
308                 irq_set_affinity_hint(qcq->intr.vector, NULL);
309                 devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
310                 qcq->intr.vector = 0;
311                 ionic_intr_free(lif->ionic, qcq->intr.index);
312         }
313
314         devm_kfree(dev, qcq->cq.info);
315         qcq->cq.info = NULL;
316         devm_kfree(dev, qcq->q.info);
317         qcq->q.info = NULL;
318         devm_kfree(dev, qcq);
319 }
320
321 static void ionic_qcqs_free(struct ionic_lif *lif)
322 {
323         struct device *dev = lif->ionic->dev;
324         unsigned int i;
325
326         if (lif->notifyqcq) {
327                 ionic_qcq_free(lif, lif->notifyqcq);
328                 lif->notifyqcq = NULL;
329         }
330
331         if (lif->adminqcq) {
332                 ionic_qcq_free(lif, lif->adminqcq);
333                 lif->adminqcq = NULL;
334         }
335
336         if (lif->rxqcqs) {
337                 for (i = 0; i < lif->nxqs; i++)
338                         if (lif->rxqcqs[i].stats)
339                                 devm_kfree(dev, lif->rxqcqs[i].stats);
340                 devm_kfree(dev, lif->rxqcqs);
341                 lif->rxqcqs = NULL;
342         }
343
344         if (lif->txqcqs) {
345                 for (i = 0; i < lif->nxqs; i++)
346                         if (lif->txqcqs[i].stats)
347                                 devm_kfree(dev, lif->txqcqs[i].stats);
348                 devm_kfree(dev, lif->txqcqs);
349                 lif->txqcqs = NULL;
350         }
351 }
352
353 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
354                                       struct ionic_qcq *n_qcq)
355 {
356         if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
357                 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
358                 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
359         }
360
361         n_qcq->intr.vector = src_qcq->intr.vector;
362         n_qcq->intr.index = src_qcq->intr.index;
363 }
364
365 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
366                            unsigned int index,
367                            const char *name, unsigned int flags,
368                            unsigned int num_descs, unsigned int desc_size,
369                            unsigned int cq_desc_size,
370                            unsigned int sg_desc_size,
371                            unsigned int pid, struct ionic_qcq **qcq)
372 {
373         struct ionic_dev *idev = &lif->ionic->idev;
374         u32 q_size, cq_size, sg_size, total_size;
375         struct device *dev = lif->ionic->dev;
376         void *q_base, *cq_base, *sg_base;
377         dma_addr_t cq_base_pa = 0;
378         dma_addr_t sg_base_pa = 0;
379         dma_addr_t q_base_pa = 0;
380         struct ionic_qcq *new;
381         int err;
382
383         *qcq = NULL;
384
385         q_size  = num_descs * desc_size;
386         cq_size = num_descs * cq_desc_size;
387         sg_size = num_descs * sg_desc_size;
388
389         total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
390         /* Note: aligning q_size/cq_size is not enough due to cq_base
391          * address aligning as q_base could be not aligned to the page.
392          * Adding PAGE_SIZE.
393          */
394         total_size += PAGE_SIZE;
395         if (flags & IONIC_QCQ_F_SG) {
396                 total_size += ALIGN(sg_size, PAGE_SIZE);
397                 total_size += PAGE_SIZE;
398         }
399
400         new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
401         if (!new) {
402                 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
403                 err = -ENOMEM;
404                 goto err_out;
405         }
406
407         new->flags = flags;
408
409         new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
410                                    GFP_KERNEL);
411         if (!new->q.info) {
412                 netdev_err(lif->netdev, "Cannot allocate queue info\n");
413                 err = -ENOMEM;
414                 goto err_out;
415         }
416
417         new->q.type = type;
418
419         err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
420                            desc_size, sg_desc_size, pid);
421         if (err) {
422                 netdev_err(lif->netdev, "Cannot initialize queue\n");
423                 goto err_out;
424         }
425
426         if (flags & IONIC_QCQ_F_INTR) {
427                 err = ionic_intr_alloc(lif, &new->intr);
428                 if (err) {
429                         netdev_warn(lif->netdev, "no intr for %s: %d\n",
430                                     name, err);
431                         goto err_out;
432                 }
433
434                 err = ionic_bus_get_irq(lif->ionic, new->intr.index);
435                 if (err < 0) {
436                         netdev_warn(lif->netdev, "no vector for %s: %d\n",
437                                     name, err);
438                         goto err_out_free_intr;
439                 }
440                 new->intr.vector = err;
441                 ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
442                                        IONIC_INTR_MASK_SET);
443
444                 err = ionic_request_irq(lif, new);
445                 if (err) {
446                         netdev_warn(lif->netdev, "irq request failed %d\n", err);
447                         goto err_out_free_intr;
448                 }
449
450                 new->intr.cpu = cpumask_local_spread(new->intr.index,
451                                                      dev_to_node(dev));
452                 if (new->intr.cpu != -1)
453                         cpumask_set_cpu(new->intr.cpu,
454                                         &new->intr.affinity_mask);
455         } else {
456                 new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
457         }
458
459         new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
460                                     GFP_KERNEL);
461         if (!new->cq.info) {
462                 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
463                 err = -ENOMEM;
464                 goto err_out_free_irq;
465         }
466
467         err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
468         if (err) {
469                 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
470                 goto err_out_free_irq;
471         }
472
473         new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
474                                        GFP_KERNEL);
475         if (!new->base) {
476                 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
477                 err = -ENOMEM;
478                 goto err_out_free_irq;
479         }
480
481         new->total_size = total_size;
482
483         q_base = new->base;
484         q_base_pa = new->base_pa;
485
486         cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
487         cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
488
489         if (flags & IONIC_QCQ_F_SG) {
490                 sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
491                                         PAGE_SIZE);
492                 sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
493                 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
494         }
495
496         ionic_q_map(&new->q, q_base, q_base_pa);
497         ionic_cq_map(&new->cq, cq_base, cq_base_pa);
498         ionic_cq_bind(&new->cq, &new->q);
499
500         *qcq = new;
501
502         return 0;
503
504 err_out_free_irq:
505         if (flags & IONIC_QCQ_F_INTR)
506                 devm_free_irq(dev, new->intr.vector, &new->napi);
507 err_out_free_intr:
508         if (flags & IONIC_QCQ_F_INTR)
509                 ionic_intr_free(lif->ionic, new->intr.index);
510 err_out:
511         dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
512         return err;
513 }
514
515 static int ionic_qcqs_alloc(struct ionic_lif *lif)
516 {
517         struct device *dev = lif->ionic->dev;
518         unsigned int q_list_size;
519         unsigned int flags;
520         int err;
521         int i;
522
523         flags = IONIC_QCQ_F_INTR;
524         err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
525                               IONIC_ADMINQ_LENGTH,
526                               sizeof(struct ionic_admin_cmd),
527                               sizeof(struct ionic_admin_comp),
528                               0, lif->kern_pid, &lif->adminqcq);
529         if (err)
530                 return err;
531         ionic_debugfs_add_qcq(lif, lif->adminqcq);
532
533         if (lif->ionic->nnqs_per_lif) {
534                 flags = IONIC_QCQ_F_NOTIFYQ;
535                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
536                                       flags, IONIC_NOTIFYQ_LENGTH,
537                                       sizeof(struct ionic_notifyq_cmd),
538                                       sizeof(union ionic_notifyq_comp),
539                                       0, lif->kern_pid, &lif->notifyqcq);
540                 if (err)
541                         goto err_out_free_adminqcq;
542                 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
543
544                 /* Let the notifyq ride on the adminq interrupt */
545                 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
546         }
547
548         q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
549         err = -ENOMEM;
550         lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
551         if (!lif->txqcqs)
552                 goto err_out_free_notifyqcq;
553         for (i = 0; i < lif->nxqs; i++) {
554                 lif->txqcqs[i].stats = devm_kzalloc(dev,
555                                                     sizeof(struct ionic_q_stats),
556                                                     GFP_KERNEL);
557                 if (!lif->txqcqs[i].stats)
558                         goto err_out_free_tx_stats;
559         }
560
561         lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
562         if (!lif->rxqcqs)
563                 goto err_out_free_tx_stats;
564         for (i = 0; i < lif->nxqs; i++) {
565                 lif->rxqcqs[i].stats = devm_kzalloc(dev,
566                                                     sizeof(struct ionic_q_stats),
567                                                     GFP_KERNEL);
568                 if (!lif->rxqcqs[i].stats)
569                         goto err_out_free_rx_stats;
570         }
571
572         return 0;
573
574 err_out_free_rx_stats:
575         for (i = 0; i < lif->nxqs; i++)
576                 if (lif->rxqcqs[i].stats)
577                         devm_kfree(dev, lif->rxqcqs[i].stats);
578         devm_kfree(dev, lif->rxqcqs);
579         lif->rxqcqs = NULL;
580 err_out_free_tx_stats:
581         for (i = 0; i < lif->nxqs; i++)
582                 if (lif->txqcqs[i].stats)
583                         devm_kfree(dev, lif->txqcqs[i].stats);
584         devm_kfree(dev, lif->txqcqs);
585         lif->txqcqs = NULL;
586 err_out_free_notifyqcq:
587         if (lif->notifyqcq) {
588                 ionic_qcq_free(lif, lif->notifyqcq);
589                 lif->notifyqcq = NULL;
590         }
591 err_out_free_adminqcq:
592         ionic_qcq_free(lif, lif->adminqcq);
593         lif->adminqcq = NULL;
594
595         return err;
596 }
597
598 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
599 {
600         struct device *dev = lif->ionic->dev;
601         struct ionic_queue *q = &qcq->q;
602         struct ionic_cq *cq = &qcq->cq;
603         struct ionic_admin_ctx ctx = {
604                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
605                 .cmd.q_init = {
606                         .opcode = IONIC_CMD_Q_INIT,
607                         .lif_index = cpu_to_le16(lif->index),
608                         .type = q->type,
609                         .ver = lif->qtype_info[q->type].version,
610                         .index = cpu_to_le32(q->index),
611                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
612                                              IONIC_QINIT_F_SG),
613                         .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
614                         .pid = cpu_to_le16(q->pid),
615                         .ring_size = ilog2(q->num_descs),
616                         .ring_base = cpu_to_le64(q->base_pa),
617                         .cq_ring_base = cpu_to_le64(cq->base_pa),
618                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
619                 },
620         };
621         int err;
622
623         dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
624         dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
625         dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
626         dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
627         dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
628         dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
629
630         q->tail = q->info;
631         q->head = q->tail;
632         cq->tail = cq->info;
633
634         err = ionic_adminq_post_wait(lif, &ctx);
635         if (err)
636                 return err;
637
638         q->hw_type = ctx.comp.q_init.hw_type;
639         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
640         q->dbval = IONIC_DBELL_QID(q->hw_index);
641
642         dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
643         dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
644
645         qcq->flags |= IONIC_QCQ_F_INITED;
646
647         return 0;
648 }
649
650 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
651 {
652         struct device *dev = lif->ionic->dev;
653         struct ionic_queue *q = &qcq->q;
654         struct ionic_cq *cq = &qcq->cq;
655         struct ionic_admin_ctx ctx = {
656                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
657                 .cmd.q_init = {
658                         .opcode = IONIC_CMD_Q_INIT,
659                         .lif_index = cpu_to_le16(lif->index),
660                         .type = q->type,
661                         .ver = lif->qtype_info[q->type].version,
662                         .index = cpu_to_le32(q->index),
663                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
664                                              IONIC_QINIT_F_SG),
665                         .intr_index = cpu_to_le16(cq->bound_intr->index),
666                         .pid = cpu_to_le16(q->pid),
667                         .ring_size = ilog2(q->num_descs),
668                         .ring_base = cpu_to_le64(q->base_pa),
669                         .cq_ring_base = cpu_to_le64(cq->base_pa),
670                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
671                 },
672         };
673         int err;
674
675         dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
676         dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
677         dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
678         dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
679         dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
680         dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
681
682         q->tail = q->info;
683         q->head = q->tail;
684         cq->tail = cq->info;
685
686         err = ionic_adminq_post_wait(lif, &ctx);
687         if (err)
688                 return err;
689
690         q->hw_type = ctx.comp.q_init.hw_type;
691         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
692         q->dbval = IONIC_DBELL_QID(q->hw_index);
693
694         dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
695         dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
696
697         netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
698                        NAPI_POLL_WEIGHT);
699
700         qcq->flags |= IONIC_QCQ_F_INITED;
701
702         return 0;
703 }
704
705 static bool ionic_notifyq_service(struct ionic_cq *cq,
706                                   struct ionic_cq_info *cq_info)
707 {
708         union ionic_notifyq_comp *comp = cq_info->cq_desc;
709         struct ionic_deferred_work *work;
710         struct net_device *netdev;
711         struct ionic_queue *q;
712         struct ionic_lif *lif;
713         u64 eid;
714
715         q = cq->bound_q;
716         lif = q->info[0].cb_arg;
717         netdev = lif->netdev;
718         eid = le64_to_cpu(comp->event.eid);
719
720         /* Have we run out of new completions to process? */
721         if (eid <= lif->last_eid)
722                 return false;
723
724         lif->last_eid = eid;
725
726         dev_dbg(lif->ionic->dev, "notifyq event:\n");
727         dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
728                          comp, sizeof(*comp), true);
729
730         switch (le16_to_cpu(comp->event.ecode)) {
731         case IONIC_EVENT_LINK_CHANGE:
732                 ionic_link_status_check_request(lif);
733                 break;
734         case IONIC_EVENT_RESET:
735                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
736                 if (!work) {
737                         netdev_err(lif->netdev, "%s OOM\n", __func__);
738                 } else {
739                         work->type = IONIC_DW_TYPE_LIF_RESET;
740                         ionic_lif_deferred_enqueue(&lif->deferred, work);
741                 }
742                 break;
743         default:
744                 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
745                             comp->event.ecode, eid);
746                 break;
747         }
748
749         return true;
750 }
751
752 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
753 {
754         struct ionic_dev *idev = &lif->ionic->idev;
755         struct ionic_cq *cq = &lif->notifyqcq->cq;
756         u32 work_done;
757
758         work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
759                                      NULL, NULL);
760         if (work_done)
761                 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
762                                    work_done, IONIC_INTR_CRED_RESET_COALESCE);
763
764         return work_done;
765 }
766
767 static bool ionic_adminq_service(struct ionic_cq *cq,
768                                  struct ionic_cq_info *cq_info)
769 {
770         struct ionic_admin_comp *comp = cq_info->cq_desc;
771
772         if (!color_match(comp->color, cq->done_color))
773                 return false;
774
775         ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
776
777         return true;
778 }
779
780 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
781 {
782         struct ionic_lif *lif = napi_to_cq(napi)->lif;
783         int n_work = 0;
784         int a_work = 0;
785
786         if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
787                 n_work = ionic_notifyq_clean(lif, budget);
788         a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
789
790         return max(n_work, a_work);
791 }
792
793 void ionic_get_stats64(struct net_device *netdev,
794                        struct rtnl_link_stats64 *ns)
795 {
796         struct ionic_lif *lif = netdev_priv(netdev);
797         struct ionic_lif_stats *ls;
798
799         memset(ns, 0, sizeof(*ns));
800         ls = &lif->info->stats;
801
802         ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
803                          le64_to_cpu(ls->rx_mcast_packets) +
804                          le64_to_cpu(ls->rx_bcast_packets);
805
806         ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
807                          le64_to_cpu(ls->tx_mcast_packets) +
808                          le64_to_cpu(ls->tx_bcast_packets);
809
810         ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
811                        le64_to_cpu(ls->rx_mcast_bytes) +
812                        le64_to_cpu(ls->rx_bcast_bytes);
813
814         ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
815                        le64_to_cpu(ls->tx_mcast_bytes) +
816                        le64_to_cpu(ls->tx_bcast_bytes);
817
818         ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
819                          le64_to_cpu(ls->rx_mcast_drop_packets) +
820                          le64_to_cpu(ls->rx_bcast_drop_packets);
821
822         ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
823                          le64_to_cpu(ls->tx_mcast_drop_packets) +
824                          le64_to_cpu(ls->tx_bcast_drop_packets);
825
826         ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
827
828         ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
829
830         ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
831                                le64_to_cpu(ls->rx_queue_disabled) +
832                                le64_to_cpu(ls->rx_desc_fetch_error) +
833                                le64_to_cpu(ls->rx_desc_data_error);
834
835         ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
836                                 le64_to_cpu(ls->tx_queue_disabled) +
837                                 le64_to_cpu(ls->tx_desc_fetch_error) +
838                                 le64_to_cpu(ls->tx_desc_data_error);
839
840         ns->rx_errors = ns->rx_over_errors +
841                         ns->rx_missed_errors;
842
843         ns->tx_errors = ns->tx_aborted_errors;
844 }
845
846 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
847 {
848         struct ionic_admin_ctx ctx = {
849                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
850                 .cmd.rx_filter_add = {
851                         .opcode = IONIC_CMD_RX_FILTER_ADD,
852                         .lif_index = cpu_to_le16(lif->index),
853                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
854                 },
855         };
856         struct ionic_rx_filter *f;
857         int err;
858
859         /* don't bother if we already have it */
860         spin_lock_bh(&lif->rx_filters.lock);
861         f = ionic_rx_filter_by_addr(lif, addr);
862         spin_unlock_bh(&lif->rx_filters.lock);
863         if (f)
864                 return 0;
865
866         netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
867                    ctx.comp.rx_filter_add.filter_id);
868
869         memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
870         err = ionic_adminq_post_wait(lif, &ctx);
871         if (err && err != -EEXIST)
872                 return err;
873
874         return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
875 }
876
877 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
878 {
879         struct ionic_admin_ctx ctx = {
880                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
881                 .cmd.rx_filter_del = {
882                         .opcode = IONIC_CMD_RX_FILTER_DEL,
883                         .lif_index = cpu_to_le16(lif->index),
884                 },
885         };
886         struct ionic_rx_filter *f;
887         int err;
888
889         spin_lock_bh(&lif->rx_filters.lock);
890         f = ionic_rx_filter_by_addr(lif, addr);
891         if (!f) {
892                 spin_unlock_bh(&lif->rx_filters.lock);
893                 return -ENOENT;
894         }
895
896         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
897         ionic_rx_filter_free(lif, f);
898         spin_unlock_bh(&lif->rx_filters.lock);
899
900         err = ionic_adminq_post_wait(lif, &ctx);
901         if (err && err != -EEXIST)
902                 return err;
903
904         netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
905                    ctx.cmd.rx_filter_del.filter_id);
906
907         return 0;
908 }
909
910 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
911 {
912         struct ionic *ionic = lif->ionic;
913         struct ionic_deferred_work *work;
914         unsigned int nmfilters;
915         unsigned int nufilters;
916
917         if (add) {
918                 /* Do we have space for this filter?  We test the counters
919                  * here before checking the need for deferral so that we
920                  * can return an overflow error to the stack.
921                  */
922                 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
923                 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
924
925                 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
926                         lif->nmcast++;
927                 else if (!is_multicast_ether_addr(addr) &&
928                          lif->nucast < nufilters)
929                         lif->nucast++;
930                 else
931                         return -ENOSPC;
932         } else {
933                 if (is_multicast_ether_addr(addr) && lif->nmcast)
934                         lif->nmcast--;
935                 else if (!is_multicast_ether_addr(addr) && lif->nucast)
936                         lif->nucast--;
937         }
938
939         if (in_interrupt()) {
940                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
941                 if (!work) {
942                         netdev_err(lif->netdev, "%s OOM\n", __func__);
943                         return -ENOMEM;
944                 }
945                 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
946                                    IONIC_DW_TYPE_RX_ADDR_DEL;
947                 memcpy(work->addr, addr, ETH_ALEN);
948                 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
949                            add ? "add" : "del", addr);
950                 ionic_lif_deferred_enqueue(&lif->deferred, work);
951         } else {
952                 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
953                            add ? "add" : "del", addr);
954                 if (add)
955                         return ionic_lif_addr_add(lif, addr);
956                 else
957                         return ionic_lif_addr_del(lif, addr);
958         }
959
960         return 0;
961 }
962
963 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
964 {
965         return ionic_lif_addr(netdev_priv(netdev), addr, true);
966 }
967
968 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
969 {
970         return ionic_lif_addr(netdev_priv(netdev), addr, false);
971 }
972
973 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
974 {
975         struct ionic_admin_ctx ctx = {
976                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
977                 .cmd.rx_mode_set = {
978                         .opcode = IONIC_CMD_RX_MODE_SET,
979                         .lif_index = cpu_to_le16(lif->index),
980                         .rx_mode = cpu_to_le16(rx_mode),
981                 },
982         };
983         char buf[128];
984         int err;
985         int i;
986 #define REMAIN(__x) (sizeof(buf) - (__x))
987
988         i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
989                       lif->rx_mode, rx_mode);
990         if (rx_mode & IONIC_RX_MODE_F_UNICAST)
991                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
992         if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
993                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
994         if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
995                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
996         if (rx_mode & IONIC_RX_MODE_F_PROMISC)
997                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
998         if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
999                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1000         netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1001
1002         err = ionic_adminq_post_wait(lif, &ctx);
1003         if (err)
1004                 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1005                             rx_mode, err);
1006         else
1007                 lif->rx_mode = rx_mode;
1008 }
1009
1010 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1011 {
1012         struct ionic_deferred_work *work;
1013
1014         if (in_interrupt()) {
1015                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1016                 if (!work) {
1017                         netdev_err(lif->netdev, "%s OOM\n", __func__);
1018                         return;
1019                 }
1020                 work->type = IONIC_DW_TYPE_RX_MODE;
1021                 work->rx_mode = rx_mode;
1022                 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1023                 ionic_lif_deferred_enqueue(&lif->deferred, work);
1024         } else {
1025                 ionic_lif_rx_mode(lif, rx_mode);
1026         }
1027 }
1028
1029 static void ionic_set_rx_mode(struct net_device *netdev)
1030 {
1031         struct ionic_lif *lif = netdev_priv(netdev);
1032         struct ionic_identity *ident;
1033         unsigned int nfilters;
1034         unsigned int rx_mode;
1035
1036         ident = &lif->ionic->ident;
1037
1038         rx_mode = IONIC_RX_MODE_F_UNICAST;
1039         rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1040         rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1041         rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1042         rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1043
1044         /* sync unicast addresses
1045          * next check to see if we're in an overflow state
1046          *    if so, we track that we overflowed and enable NIC PROMISC
1047          *    else if the overflow is set and not needed
1048          *       we remove our overflow flag and check the netdev flags
1049          *       to see if we can disable NIC PROMISC
1050          */
1051         __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1052         nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1053         if (netdev_uc_count(netdev) + 1 > nfilters) {
1054                 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1055                 lif->uc_overflow = true;
1056         } else if (lif->uc_overflow) {
1057                 lif->uc_overflow = false;
1058                 if (!(netdev->flags & IFF_PROMISC))
1059                         rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1060         }
1061
1062         /* same for multicast */
1063         __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1064         nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1065         if (netdev_mc_count(netdev) > nfilters) {
1066                 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1067                 lif->mc_overflow = true;
1068         } else if (lif->mc_overflow) {
1069                 lif->mc_overflow = false;
1070                 if (!(netdev->flags & IFF_ALLMULTI))
1071                         rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1072         }
1073
1074         if (lif->rx_mode != rx_mode)
1075                 _ionic_lif_rx_mode(lif, rx_mode);
1076 }
1077
1078 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1079 {
1080         u64 wanted = 0;
1081
1082         if (features & NETIF_F_HW_VLAN_CTAG_TX)
1083                 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1084         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1085                 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1086         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1087                 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1088         if (features & NETIF_F_RXHASH)
1089                 wanted |= IONIC_ETH_HW_RX_HASH;
1090         if (features & NETIF_F_RXCSUM)
1091                 wanted |= IONIC_ETH_HW_RX_CSUM;
1092         if (features & NETIF_F_SG)
1093                 wanted |= IONIC_ETH_HW_TX_SG;
1094         if (features & NETIF_F_HW_CSUM)
1095                 wanted |= IONIC_ETH_HW_TX_CSUM;
1096         if (features & NETIF_F_TSO)
1097                 wanted |= IONIC_ETH_HW_TSO;
1098         if (features & NETIF_F_TSO6)
1099                 wanted |= IONIC_ETH_HW_TSO_IPV6;
1100         if (features & NETIF_F_TSO_ECN)
1101                 wanted |= IONIC_ETH_HW_TSO_ECN;
1102         if (features & NETIF_F_GSO_GRE)
1103                 wanted |= IONIC_ETH_HW_TSO_GRE;
1104         if (features & NETIF_F_GSO_GRE_CSUM)
1105                 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1106         if (features & NETIF_F_GSO_IPXIP4)
1107                 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1108         if (features & NETIF_F_GSO_IPXIP6)
1109                 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1110         if (features & NETIF_F_GSO_UDP_TUNNEL)
1111                 wanted |= IONIC_ETH_HW_TSO_UDP;
1112         if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1113                 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1114
1115         return cpu_to_le64(wanted);
1116 }
1117
1118 static int ionic_set_nic_features(struct ionic_lif *lif,
1119                                   netdev_features_t features)
1120 {
1121         struct device *dev = lif->ionic->dev;
1122         struct ionic_admin_ctx ctx = {
1123                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1124                 .cmd.lif_setattr = {
1125                         .opcode = IONIC_CMD_LIF_SETATTR,
1126                         .index = cpu_to_le16(lif->index),
1127                         .attr = IONIC_LIF_ATTR_FEATURES,
1128                 },
1129         };
1130         u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1131                          IONIC_ETH_HW_VLAN_RX_STRIP |
1132                          IONIC_ETH_HW_VLAN_RX_FILTER;
1133         u64 old_hw_features;
1134         int err;
1135
1136         ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1137         err = ionic_adminq_post_wait(lif, &ctx);
1138         if (err)
1139                 return err;
1140
1141         old_hw_features = lif->hw_features;
1142         lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1143                                        ctx.comp.lif_setattr.features);
1144
1145         if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1146                 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1147
1148         if ((vlan_flags & features) &&
1149             !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1150                 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1151
1152         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1153                 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1154         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1155                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1156         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1157                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1158         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1159                 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1160         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1161                 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1162         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1163                 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1164         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1165                 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1166         if (lif->hw_features & IONIC_ETH_HW_TSO)
1167                 dev_dbg(dev, "feature ETH_HW_TSO\n");
1168         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1169                 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1170         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1171                 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1172         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1173                 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1174         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1175                 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1176         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1177                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1178         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1179                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1180         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1181                 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1182         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1183                 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1184
1185         return 0;
1186 }
1187
1188 static int ionic_init_nic_features(struct ionic_lif *lif)
1189 {
1190         struct net_device *netdev = lif->netdev;
1191         netdev_features_t features;
1192         int err;
1193
1194         /* set up what we expect to support by default */
1195         features = NETIF_F_HW_VLAN_CTAG_TX |
1196                    NETIF_F_HW_VLAN_CTAG_RX |
1197                    NETIF_F_HW_VLAN_CTAG_FILTER |
1198                    NETIF_F_RXHASH |
1199                    NETIF_F_SG |
1200                    NETIF_F_HW_CSUM |
1201                    NETIF_F_RXCSUM |
1202                    NETIF_F_TSO |
1203                    NETIF_F_TSO6 |
1204                    NETIF_F_TSO_ECN;
1205
1206         err = ionic_set_nic_features(lif, features);
1207         if (err)
1208                 return err;
1209
1210         /* tell the netdev what we actually can support */
1211         netdev->features |= NETIF_F_HIGHDMA;
1212
1213         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1214                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1215         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1216                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1217         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1218                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1219         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1220                 netdev->hw_features |= NETIF_F_RXHASH;
1221         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1222                 netdev->hw_features |= NETIF_F_SG;
1223
1224         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1225                 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1226         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1227                 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1228         if (lif->hw_features & IONIC_ETH_HW_TSO)
1229                 netdev->hw_enc_features |= NETIF_F_TSO;
1230         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1231                 netdev->hw_enc_features |= NETIF_F_TSO6;
1232         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1233                 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1234         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1235                 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1236         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1237                 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1238         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1239                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1240         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1241                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1242         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1243                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1244         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1245                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1246
1247         netdev->hw_features |= netdev->hw_enc_features;
1248         netdev->features |= netdev->hw_features;
1249
1250         netdev->priv_flags |= IFF_UNICAST_FLT |
1251                               IFF_LIVE_ADDR_CHANGE;
1252
1253         return 0;
1254 }
1255
1256 static int ionic_set_features(struct net_device *netdev,
1257                               netdev_features_t features)
1258 {
1259         struct ionic_lif *lif = netdev_priv(netdev);
1260         int err;
1261
1262         netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1263                    __func__, (u64)lif->netdev->features, (u64)features);
1264
1265         err = ionic_set_nic_features(lif, features);
1266
1267         return err;
1268 }
1269
1270 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1271 {
1272         struct sockaddr *addr = sa;
1273         u8 *mac;
1274         int err;
1275
1276         mac = (u8 *)addr->sa_data;
1277         if (ether_addr_equal(netdev->dev_addr, mac))
1278                 return 0;
1279
1280         err = eth_prepare_mac_addr_change(netdev, addr);
1281         if (err)
1282                 return err;
1283
1284         if (!is_zero_ether_addr(netdev->dev_addr)) {
1285                 netdev_info(netdev, "deleting mac addr %pM\n",
1286                             netdev->dev_addr);
1287                 ionic_addr_del(netdev, netdev->dev_addr);
1288         }
1289
1290         eth_commit_mac_addr_change(netdev, addr);
1291         netdev_info(netdev, "updating mac addr %pM\n", mac);
1292
1293         return ionic_addr_add(netdev, mac);
1294 }
1295
1296 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1297 {
1298         struct ionic_lif *lif = netdev_priv(netdev);
1299         struct ionic_admin_ctx ctx = {
1300                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1301                 .cmd.lif_setattr = {
1302                         .opcode = IONIC_CMD_LIF_SETATTR,
1303                         .index = cpu_to_le16(lif->index),
1304                         .attr = IONIC_LIF_ATTR_MTU,
1305                         .mtu = cpu_to_le32(new_mtu),
1306                 },
1307         };
1308         int err;
1309
1310         err = ionic_adminq_post_wait(lif, &ctx);
1311         if (err)
1312                 return err;
1313
1314         netdev->mtu = new_mtu;
1315         err = ionic_reset_queues(lif);
1316
1317         return err;
1318 }
1319
1320 static void ionic_tx_timeout_work(struct work_struct *ws)
1321 {
1322         struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1323
1324         netdev_info(lif->netdev, "Tx Timeout recovery\n");
1325
1326         rtnl_lock();
1327         ionic_reset_queues(lif);
1328         rtnl_unlock();
1329 }
1330
1331 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1332 {
1333         struct ionic_lif *lif = netdev_priv(netdev);
1334
1335         schedule_work(&lif->tx_timeout_work);
1336 }
1337
1338 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1339                                  u16 vid)
1340 {
1341         struct ionic_lif *lif = netdev_priv(netdev);
1342         struct ionic_admin_ctx ctx = {
1343                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1344                 .cmd.rx_filter_add = {
1345                         .opcode = IONIC_CMD_RX_FILTER_ADD,
1346                         .lif_index = cpu_to_le16(lif->index),
1347                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1348                         .vlan.vlan = cpu_to_le16(vid),
1349                 },
1350         };
1351         int err;
1352
1353         err = ionic_adminq_post_wait(lif, &ctx);
1354         if (err)
1355                 return err;
1356
1357         netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1358                    ctx.comp.rx_filter_add.filter_id);
1359
1360         return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1361 }
1362
1363 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1364                                   u16 vid)
1365 {
1366         struct ionic_lif *lif = netdev_priv(netdev);
1367         struct ionic_admin_ctx ctx = {
1368                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1369                 .cmd.rx_filter_del = {
1370                         .opcode = IONIC_CMD_RX_FILTER_DEL,
1371                         .lif_index = cpu_to_le16(lif->index),
1372                 },
1373         };
1374         struct ionic_rx_filter *f;
1375
1376         spin_lock_bh(&lif->rx_filters.lock);
1377
1378         f = ionic_rx_filter_by_vlan(lif, vid);
1379         if (!f) {
1380                 spin_unlock_bh(&lif->rx_filters.lock);
1381                 return -ENOENT;
1382         }
1383
1384         netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1385                    le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1386
1387         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1388         ionic_rx_filter_free(lif, f);
1389         spin_unlock_bh(&lif->rx_filters.lock);
1390
1391         return ionic_adminq_post_wait(lif, &ctx);
1392 }
1393
1394 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1395                          const u8 *key, const u32 *indir)
1396 {
1397         struct ionic_admin_ctx ctx = {
1398                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1399                 .cmd.lif_setattr = {
1400                         .opcode = IONIC_CMD_LIF_SETATTR,
1401                         .attr = IONIC_LIF_ATTR_RSS,
1402                         .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1403                 },
1404         };
1405         unsigned int i, tbl_sz;
1406
1407         if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1408                 lif->rss_types = types;
1409                 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1410         }
1411
1412         if (key)
1413                 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1414
1415         if (indir) {
1416                 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1417                 for (i = 0; i < tbl_sz; i++)
1418                         lif->rss_ind_tbl[i] = indir[i];
1419         }
1420
1421         memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1422                IONIC_RSS_HASH_KEY_SIZE);
1423
1424         return ionic_adminq_post_wait(lif, &ctx);
1425 }
1426
1427 static int ionic_lif_rss_init(struct ionic_lif *lif)
1428 {
1429         unsigned int tbl_sz;
1430         unsigned int i;
1431
1432         lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1433                          IONIC_RSS_TYPE_IPV4_TCP |
1434                          IONIC_RSS_TYPE_IPV4_UDP |
1435                          IONIC_RSS_TYPE_IPV6     |
1436                          IONIC_RSS_TYPE_IPV6_TCP |
1437                          IONIC_RSS_TYPE_IPV6_UDP;
1438
1439         /* Fill indirection table with 'default' values */
1440         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1441         for (i = 0; i < tbl_sz; i++)
1442                 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1443
1444         return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1445 }
1446
1447 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1448 {
1449         int tbl_sz;
1450
1451         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1452         memset(lif->rss_ind_tbl, 0, tbl_sz);
1453         memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1454
1455         ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1456 }
1457
1458 static void ionic_txrx_disable(struct ionic_lif *lif)
1459 {
1460         unsigned int i;
1461         int err;
1462
1463         if (lif->txqcqs) {
1464                 for (i = 0; i < lif->nxqs; i++) {
1465                         err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1466                         if (err == -ETIMEDOUT)
1467                                 break;
1468                 }
1469         }
1470
1471         if (lif->rxqcqs) {
1472                 for (i = 0; i < lif->nxqs; i++) {
1473                         err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1474                         if (err == -ETIMEDOUT)
1475                                 break;
1476                 }
1477         }
1478 }
1479
1480 static void ionic_txrx_deinit(struct ionic_lif *lif)
1481 {
1482         unsigned int i;
1483
1484         if (lif->txqcqs) {
1485                 for (i = 0; i < lif->nxqs; i++) {
1486                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1487                         ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1488                         ionic_tx_empty(&lif->txqcqs[i].qcq->q);
1489                 }
1490         }
1491
1492         if (lif->rxqcqs) {
1493                 for (i = 0; i < lif->nxqs; i++) {
1494                         ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1495                         ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1496                         ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1497                 }
1498         }
1499         lif->rx_mode = 0;
1500 }
1501
1502 static void ionic_txrx_free(struct ionic_lif *lif)
1503 {
1504         unsigned int i;
1505
1506         if (lif->txqcqs) {
1507                 for (i = 0; i < lif->nxqs; i++) {
1508                         ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1509                         lif->txqcqs[i].qcq = NULL;
1510                 }
1511         }
1512
1513         if (lif->rxqcqs) {
1514                 for (i = 0; i < lif->nxqs; i++) {
1515                         ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1516                         lif->rxqcqs[i].qcq = NULL;
1517                 }
1518         }
1519 }
1520
1521 static int ionic_txrx_alloc(struct ionic_lif *lif)
1522 {
1523         unsigned int sg_desc_sz;
1524         unsigned int flags;
1525         unsigned int i;
1526         int err = 0;
1527
1528         if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1529             lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1530                                           sizeof(struct ionic_txq_sg_desc_v1))
1531                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1532         else
1533                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1534
1535         flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1536         for (i = 0; i < lif->nxqs; i++) {
1537                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1538                                       lif->ntxq_descs,
1539                                       sizeof(struct ionic_txq_desc),
1540                                       sizeof(struct ionic_txq_comp),
1541                                       sg_desc_sz,
1542                                       lif->kern_pid, &lif->txqcqs[i].qcq);
1543                 if (err)
1544                         goto err_out;
1545
1546                 lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1547                 ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
1548         }
1549
1550         flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1551         for (i = 0; i < lif->nxqs; i++) {
1552                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1553                                       lif->nrxq_descs,
1554                                       sizeof(struct ionic_rxq_desc),
1555                                       sizeof(struct ionic_rxq_comp),
1556                                       sizeof(struct ionic_rxq_sg_desc),
1557                                       lif->kern_pid, &lif->rxqcqs[i].qcq);
1558                 if (err)
1559                         goto err_out;
1560
1561                 lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1562
1563                 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1564                                      lif->rxqcqs[i].qcq->intr.index,
1565                                      lif->rx_coalesce_hw);
1566                 ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1567                                           lif->txqcqs[i].qcq);
1568                 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
1569         }
1570
1571         return 0;
1572
1573 err_out:
1574         ionic_txrx_free(lif);
1575
1576         return err;
1577 }
1578
1579 static int ionic_txrx_init(struct ionic_lif *lif)
1580 {
1581         unsigned int i;
1582         int err;
1583
1584         for (i = 0; i < lif->nxqs; i++) {
1585                 err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1586                 if (err)
1587                         goto err_out;
1588
1589                 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1590                 if (err) {
1591                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1592                         goto err_out;
1593                 }
1594         }
1595
1596         if (lif->netdev->features & NETIF_F_RXHASH)
1597                 ionic_lif_rss_init(lif);
1598
1599         ionic_set_rx_mode(lif->netdev);
1600
1601         return 0;
1602
1603 err_out:
1604         while (i--) {
1605                 ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1606                 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1607         }
1608
1609         return err;
1610 }
1611
1612 static int ionic_txrx_enable(struct ionic_lif *lif)
1613 {
1614         int i, err;
1615
1616         for (i = 0; i < lif->nxqs; i++) {
1617                 ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1618                 err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1619                 if (err)
1620                         goto err_out;
1621
1622                 err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1623                 if (err) {
1624                         if (err != -ETIMEDOUT)
1625                                 ionic_qcq_disable(lif->rxqcqs[i].qcq);
1626                         goto err_out;
1627                 }
1628         }
1629
1630         return 0;
1631
1632 err_out:
1633         while (i--) {
1634                 err = ionic_qcq_disable(lif->txqcqs[i].qcq);
1635                 if (err == -ETIMEDOUT)
1636                         break;
1637                 err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
1638                 if (err == -ETIMEDOUT)
1639                         break;
1640         }
1641
1642         return err;
1643 }
1644
1645 static int ionic_start_queues(struct ionic_lif *lif)
1646 {
1647         int err;
1648
1649         if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1650                 return 0;
1651
1652         err = ionic_txrx_enable(lif);
1653         if (err) {
1654                 clear_bit(IONIC_LIF_F_UP, lif->state);
1655                 return err;
1656         }
1657         netif_tx_wake_all_queues(lif->netdev);
1658
1659         return 0;
1660 }
1661
1662 int ionic_open(struct net_device *netdev)
1663 {
1664         struct ionic_lif *lif = netdev_priv(netdev);
1665         int err;
1666
1667         err = ionic_txrx_alloc(lif);
1668         if (err)
1669                 return err;
1670
1671         err = ionic_txrx_init(lif);
1672         if (err)
1673                 goto err_out;
1674
1675         /* don't start the queues until we have link */
1676         if (netif_carrier_ok(netdev)) {
1677                 err = ionic_start_queues(lif);
1678                 if (err)
1679                         goto err_txrx_deinit;
1680         }
1681
1682         return 0;
1683
1684 err_txrx_deinit:
1685         ionic_txrx_deinit(lif);
1686 err_out:
1687         ionic_txrx_free(lif);
1688         return err;
1689 }
1690
1691 static void ionic_stop_queues(struct ionic_lif *lif)
1692 {
1693         if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1694                 return;
1695
1696         ionic_txrx_disable(lif);
1697         netif_tx_disable(lif->netdev);
1698 }
1699
1700 int ionic_stop(struct net_device *netdev)
1701 {
1702         struct ionic_lif *lif = netdev_priv(netdev);
1703
1704         if (!netif_device_present(netdev))
1705                 return 0;
1706
1707         ionic_stop_queues(lif);
1708         ionic_txrx_deinit(lif);
1709         ionic_txrx_free(lif);
1710
1711         return 0;
1712 }
1713
1714 static int ionic_get_vf_config(struct net_device *netdev,
1715                                int vf, struct ifla_vf_info *ivf)
1716 {
1717         struct ionic_lif *lif = netdev_priv(netdev);
1718         struct ionic *ionic = lif->ionic;
1719         int ret = 0;
1720
1721         if (!netif_device_present(netdev))
1722                 return -EBUSY;
1723
1724         down_read(&ionic->vf_op_lock);
1725
1726         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1727                 ret = -EINVAL;
1728         } else {
1729                 ivf->vf           = vf;
1730                 ivf->vlan         = ionic->vfs[vf].vlanid;
1731                 ivf->qos          = 0;
1732                 ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1733                 ivf->linkstate    = ionic->vfs[vf].linkstate;
1734                 ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1735                 ivf->trusted      = ionic->vfs[vf].trusted;
1736                 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1737         }
1738
1739         up_read(&ionic->vf_op_lock);
1740         return ret;
1741 }
1742
1743 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1744                               struct ifla_vf_stats *vf_stats)
1745 {
1746         struct ionic_lif *lif = netdev_priv(netdev);
1747         struct ionic *ionic = lif->ionic;
1748         struct ionic_lif_stats *vs;
1749         int ret = 0;
1750
1751         if (!netif_device_present(netdev))
1752                 return -EBUSY;
1753
1754         down_read(&ionic->vf_op_lock);
1755
1756         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1757                 ret = -EINVAL;
1758         } else {
1759                 memset(vf_stats, 0, sizeof(*vf_stats));
1760                 vs = &ionic->vfs[vf].stats;
1761
1762                 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1763                 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1764                 vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1765                 vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1766                 vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1767                 vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1768                 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1769                                        le64_to_cpu(vs->rx_mcast_drop_packets) +
1770                                        le64_to_cpu(vs->rx_bcast_drop_packets);
1771                 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1772                                        le64_to_cpu(vs->tx_mcast_drop_packets) +
1773                                        le64_to_cpu(vs->tx_bcast_drop_packets);
1774         }
1775
1776         up_read(&ionic->vf_op_lock);
1777         return ret;
1778 }
1779
1780 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1781 {
1782         struct ionic_lif *lif = netdev_priv(netdev);
1783         struct ionic *ionic = lif->ionic;
1784         int ret;
1785
1786         if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1787                 return -EINVAL;
1788
1789         if (!netif_device_present(netdev))
1790                 return -EBUSY;
1791
1792         down_write(&ionic->vf_op_lock);
1793
1794         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1795                 ret = -EINVAL;
1796         } else {
1797                 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1798                 if (!ret)
1799                         ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1800         }
1801
1802         up_write(&ionic->vf_op_lock);
1803         return ret;
1804 }
1805
1806 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1807                              u8 qos, __be16 proto)
1808 {
1809         struct ionic_lif *lif = netdev_priv(netdev);
1810         struct ionic *ionic = lif->ionic;
1811         int ret;
1812
1813         /* until someday when we support qos */
1814         if (qos)
1815                 return -EINVAL;
1816
1817         if (vlan > 4095)
1818                 return -EINVAL;
1819
1820         if (proto != htons(ETH_P_8021Q))
1821                 return -EPROTONOSUPPORT;
1822
1823         if (!netif_device_present(netdev))
1824                 return -EBUSY;
1825
1826         down_write(&ionic->vf_op_lock);
1827
1828         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1829                 ret = -EINVAL;
1830         } else {
1831                 ret = ionic_set_vf_config(ionic, vf,
1832                                           IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1833                 if (!ret)
1834                         ionic->vfs[vf].vlanid = vlan;
1835         }
1836
1837         up_write(&ionic->vf_op_lock);
1838         return ret;
1839 }
1840
1841 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1842                              int tx_min, int tx_max)
1843 {
1844         struct ionic_lif *lif = netdev_priv(netdev);
1845         struct ionic *ionic = lif->ionic;
1846         int ret;
1847
1848         /* setting the min just seems silly */
1849         if (tx_min)
1850                 return -EINVAL;
1851
1852         if (!netif_device_present(netdev))
1853                 return -EBUSY;
1854
1855         down_write(&ionic->vf_op_lock);
1856
1857         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1858                 ret = -EINVAL;
1859         } else {
1860                 ret = ionic_set_vf_config(ionic, vf,
1861                                           IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1862                 if (!ret)
1863                         lif->ionic->vfs[vf].maxrate = tx_max;
1864         }
1865
1866         up_write(&ionic->vf_op_lock);
1867         return ret;
1868 }
1869
1870 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1871 {
1872         struct ionic_lif *lif = netdev_priv(netdev);
1873         struct ionic *ionic = lif->ionic;
1874         u8 data = set;  /* convert to u8 for config */
1875         int ret;
1876
1877         if (!netif_device_present(netdev))
1878                 return -EBUSY;
1879
1880         down_write(&ionic->vf_op_lock);
1881
1882         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1883                 ret = -EINVAL;
1884         } else {
1885                 ret = ionic_set_vf_config(ionic, vf,
1886                                           IONIC_VF_ATTR_SPOOFCHK, &data);
1887                 if (!ret)
1888                         ionic->vfs[vf].spoofchk = data;
1889         }
1890
1891         up_write(&ionic->vf_op_lock);
1892         return ret;
1893 }
1894
1895 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1896 {
1897         struct ionic_lif *lif = netdev_priv(netdev);
1898         struct ionic *ionic = lif->ionic;
1899         u8 data = set;  /* convert to u8 for config */
1900         int ret;
1901
1902         if (!netif_device_present(netdev))
1903                 return -EBUSY;
1904
1905         down_write(&ionic->vf_op_lock);
1906
1907         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1908                 ret = -EINVAL;
1909         } else {
1910                 ret = ionic_set_vf_config(ionic, vf,
1911                                           IONIC_VF_ATTR_TRUST, &data);
1912                 if (!ret)
1913                         ionic->vfs[vf].trusted = data;
1914         }
1915
1916         up_write(&ionic->vf_op_lock);
1917         return ret;
1918 }
1919
1920 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1921 {
1922         struct ionic_lif *lif = netdev_priv(netdev);
1923         struct ionic *ionic = lif->ionic;
1924         u8 data;
1925         int ret;
1926
1927         switch (set) {
1928         case IFLA_VF_LINK_STATE_ENABLE:
1929                 data = IONIC_VF_LINK_STATUS_UP;
1930                 break;
1931         case IFLA_VF_LINK_STATE_DISABLE:
1932                 data = IONIC_VF_LINK_STATUS_DOWN;
1933                 break;
1934         case IFLA_VF_LINK_STATE_AUTO:
1935                 data = IONIC_VF_LINK_STATUS_AUTO;
1936                 break;
1937         default:
1938                 return -EINVAL;
1939         }
1940
1941         if (!netif_device_present(netdev))
1942                 return -EBUSY;
1943
1944         down_write(&ionic->vf_op_lock);
1945
1946         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1947                 ret = -EINVAL;
1948         } else {
1949                 ret = ionic_set_vf_config(ionic, vf,
1950                                           IONIC_VF_ATTR_LINKSTATE, &data);
1951                 if (!ret)
1952                         ionic->vfs[vf].linkstate = set;
1953         }
1954
1955         up_write(&ionic->vf_op_lock);
1956         return ret;
1957 }
1958
1959 static const struct net_device_ops ionic_netdev_ops = {
1960         .ndo_open               = ionic_open,
1961         .ndo_stop               = ionic_stop,
1962         .ndo_start_xmit         = ionic_start_xmit,
1963         .ndo_get_stats64        = ionic_get_stats64,
1964         .ndo_set_rx_mode        = ionic_set_rx_mode,
1965         .ndo_set_features       = ionic_set_features,
1966         .ndo_set_mac_address    = ionic_set_mac_address,
1967         .ndo_validate_addr      = eth_validate_addr,
1968         .ndo_tx_timeout         = ionic_tx_timeout,
1969         .ndo_change_mtu         = ionic_change_mtu,
1970         .ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1971         .ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1972         .ndo_set_vf_vlan        = ionic_set_vf_vlan,
1973         .ndo_set_vf_trust       = ionic_set_vf_trust,
1974         .ndo_set_vf_mac         = ionic_set_vf_mac,
1975         .ndo_set_vf_rate        = ionic_set_vf_rate,
1976         .ndo_set_vf_spoofchk    = ionic_set_vf_spoofchk,
1977         .ndo_get_vf_config      = ionic_get_vf_config,
1978         .ndo_set_vf_link_state  = ionic_set_vf_link_state,
1979         .ndo_get_vf_stats       = ionic_get_vf_stats,
1980 };
1981
1982 int ionic_reset_queues(struct ionic_lif *lif)
1983 {
1984         bool running;
1985         int err = 0;
1986
1987         /* Put off the next watchdog timeout */
1988         netif_trans_update(lif->netdev);
1989
1990         err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
1991         if (err)
1992                 return err;
1993
1994         running = netif_running(lif->netdev);
1995         if (running)
1996                 err = ionic_stop(lif->netdev);
1997         if (!err && running)
1998                 ionic_open(lif->netdev);
1999
2000         clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
2001
2002         return err;
2003 }
2004
2005 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
2006 {
2007         struct device *dev = ionic->dev;
2008         struct net_device *netdev;
2009         struct ionic_lif *lif;
2010         int tbl_sz;
2011         int err;
2012
2013         netdev = alloc_etherdev_mqs(sizeof(*lif),
2014                                     ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2015         if (!netdev) {
2016                 dev_err(dev, "Cannot allocate netdev, aborting\n");
2017                 return ERR_PTR(-ENOMEM);
2018         }
2019
2020         SET_NETDEV_DEV(netdev, dev);
2021
2022         lif = netdev_priv(netdev);
2023         lif->netdev = netdev;
2024         ionic->master_lif = lif;
2025         netdev->netdev_ops = &ionic_netdev_ops;
2026         ionic_ethtool_set_ops(netdev);
2027
2028         netdev->watchdog_timeo = 2 * HZ;
2029         netif_carrier_off(netdev);
2030
2031         netdev->min_mtu = IONIC_MIN_MTU;
2032         netdev->max_mtu = IONIC_MAX_MTU;
2033
2034         lif->neqs = ionic->neqs_per_lif;
2035         lif->nxqs = ionic->ntxqs_per_lif;
2036
2037         lif->ionic = ionic;
2038         lif->index = index;
2039         lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2040         lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2041
2042         /* Convert the default coalesce value to actual hw resolution */
2043         lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2044         lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2045                                                     lif->rx_coalesce_usecs);
2046
2047         snprintf(lif->name, sizeof(lif->name), "lif%u", index);
2048
2049         spin_lock_init(&lif->adminq_lock);
2050
2051         spin_lock_init(&lif->deferred.lock);
2052         INIT_LIST_HEAD(&lif->deferred.list);
2053         INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2054
2055         /* allocate lif info */
2056         lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2057         lif->info = dma_alloc_coherent(dev, lif->info_sz,
2058                                        &lif->info_pa, GFP_KERNEL);
2059         if (!lif->info) {
2060                 dev_err(dev, "Failed to allocate lif info, aborting\n");
2061                 err = -ENOMEM;
2062                 goto err_out_free_netdev;
2063         }
2064
2065         ionic_debugfs_add_lif(lif);
2066
2067         /* allocate queues */
2068         err = ionic_qcqs_alloc(lif);
2069         if (err)
2070                 goto err_out_free_lif_info;
2071
2072         /* allocate rss indirection table */
2073         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2074         lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2075         lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2076                                               &lif->rss_ind_tbl_pa,
2077                                               GFP_KERNEL);
2078
2079         if (!lif->rss_ind_tbl) {
2080                 err = -ENOMEM;
2081                 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2082                 goto err_out_free_qcqs;
2083         }
2084         netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2085
2086         list_add_tail(&lif->list, &ionic->lifs);
2087
2088         return lif;
2089
2090 err_out_free_qcqs:
2091         ionic_qcqs_free(lif);
2092 err_out_free_lif_info:
2093         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2094         lif->info = NULL;
2095         lif->info_pa = 0;
2096 err_out_free_netdev:
2097         free_netdev(lif->netdev);
2098         lif = NULL;
2099
2100         return ERR_PTR(err);
2101 }
2102
2103 int ionic_lifs_alloc(struct ionic *ionic)
2104 {
2105         struct ionic_lif *lif;
2106
2107         INIT_LIST_HEAD(&ionic->lifs);
2108
2109         /* only build the first lif, others are for later features */
2110         set_bit(0, ionic->lifbits);
2111
2112         lif = ionic_lif_alloc(ionic, 0);
2113         if (IS_ERR_OR_NULL(lif)) {
2114                 clear_bit(0, ionic->lifbits);
2115                 return -ENOMEM;
2116         }
2117
2118         lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2119         ionic_lif_queue_identify(lif);
2120
2121         return 0;
2122 }
2123
2124 static void ionic_lif_reset(struct ionic_lif *lif)
2125 {
2126         struct ionic_dev *idev = &lif->ionic->idev;
2127
2128         mutex_lock(&lif->ionic->dev_cmd_lock);
2129         ionic_dev_cmd_lif_reset(idev, lif->index);
2130         ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2131         mutex_unlock(&lif->ionic->dev_cmd_lock);
2132 }
2133
2134 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2135 {
2136         struct ionic *ionic = lif->ionic;
2137
2138         if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2139                 return;
2140
2141         dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2142
2143         netif_device_detach(lif->netdev);
2144
2145         if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2146                 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2147                 ionic_stop_queues(lif);
2148         }
2149
2150         if (netif_running(lif->netdev)) {
2151                 ionic_txrx_deinit(lif);
2152                 ionic_txrx_free(lif);
2153         }
2154         ionic_lifs_deinit(ionic);
2155         ionic_reset(ionic);
2156         ionic_qcqs_free(lif);
2157
2158         dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2159 }
2160
2161 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2162 {
2163         struct ionic *ionic = lif->ionic;
2164         int err;
2165
2166         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2167                 return;
2168
2169         dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2170
2171         ionic_init_devinfo(ionic);
2172         ionic_port_init(ionic);
2173         err = ionic_qcqs_alloc(lif);
2174         if (err)
2175                 goto err_out;
2176
2177         err = ionic_lifs_init(ionic);
2178         if (err)
2179                 goto err_qcqs_free;
2180
2181         if (lif->registered)
2182                 ionic_lif_set_netdev_info(lif);
2183
2184         ionic_rx_filter_replay(lif);
2185
2186         if (netif_running(lif->netdev)) {
2187                 err = ionic_txrx_alloc(lif);
2188                 if (err)
2189                         goto err_lifs_deinit;
2190
2191                 err = ionic_txrx_init(lif);
2192                 if (err)
2193                         goto err_txrx_free;
2194         }
2195
2196         clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2197         ionic_link_status_check_request(lif);
2198         netif_device_attach(lif->netdev);
2199         dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2200
2201         return;
2202
2203 err_txrx_free:
2204         ionic_txrx_free(lif);
2205 err_lifs_deinit:
2206         ionic_lifs_deinit(ionic);
2207 err_qcqs_free:
2208         ionic_qcqs_free(lif);
2209 err_out:
2210         dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2211 }
2212
2213 static void ionic_lif_free(struct ionic_lif *lif)
2214 {
2215         struct device *dev = lif->ionic->dev;
2216
2217         /* free rss indirection table */
2218         dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2219                           lif->rss_ind_tbl_pa);
2220         lif->rss_ind_tbl = NULL;
2221         lif->rss_ind_tbl_pa = 0;
2222
2223         /* free queues */
2224         ionic_qcqs_free(lif);
2225         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2226                 ionic_lif_reset(lif);
2227
2228         /* free lif info */
2229         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2230         lif->info = NULL;
2231         lif->info_pa = 0;
2232
2233         /* unmap doorbell page */
2234         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2235         lif->kern_dbpage = NULL;
2236         kfree(lif->dbid_inuse);
2237         lif->dbid_inuse = NULL;
2238
2239         /* free netdev & lif */
2240         ionic_debugfs_del_lif(lif);
2241         list_del(&lif->list);
2242         free_netdev(lif->netdev);
2243 }
2244
2245 void ionic_lifs_free(struct ionic *ionic)
2246 {
2247         struct list_head *cur, *tmp;
2248         struct ionic_lif *lif;
2249
2250         list_for_each_safe(cur, tmp, &ionic->lifs) {
2251                 lif = list_entry(cur, struct ionic_lif, list);
2252
2253                 ionic_lif_free(lif);
2254         }
2255 }
2256
2257 static void ionic_lif_deinit(struct ionic_lif *lif)
2258 {
2259         if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
2260                 return;
2261
2262         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2263                 cancel_work_sync(&lif->deferred.work);
2264                 cancel_work_sync(&lif->tx_timeout_work);
2265                 ionic_rx_filters_deinit(lif);
2266         }
2267
2268         if (lif->netdev->features & NETIF_F_RXHASH)
2269                 ionic_lif_rss_deinit(lif);
2270
2271         napi_disable(&lif->adminqcq->napi);
2272         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2273         ionic_lif_qcq_deinit(lif, lif->adminqcq);
2274
2275         ionic_lif_reset(lif);
2276 }
2277
2278 void ionic_lifs_deinit(struct ionic *ionic)
2279 {
2280         struct list_head *cur, *tmp;
2281         struct ionic_lif *lif;
2282
2283         list_for_each_safe(cur, tmp, &ionic->lifs) {
2284                 lif = list_entry(cur, struct ionic_lif, list);
2285                 ionic_lif_deinit(lif);
2286         }
2287 }
2288
2289 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2290 {
2291         struct device *dev = lif->ionic->dev;
2292         struct ionic_q_init_comp comp;
2293         struct ionic_dev *idev;
2294         struct ionic_qcq *qcq;
2295         struct ionic_queue *q;
2296         int err;
2297
2298         idev = &lif->ionic->idev;
2299         qcq = lif->adminqcq;
2300         q = &qcq->q;
2301
2302         mutex_lock(&lif->ionic->dev_cmd_lock);
2303         ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2304         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2305         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2306         mutex_unlock(&lif->ionic->dev_cmd_lock);
2307         if (err) {
2308                 netdev_err(lif->netdev, "adminq init failed %d\n", err);
2309                 return err;
2310         }
2311
2312         q->hw_type = comp.hw_type;
2313         q->hw_index = le32_to_cpu(comp.hw_index);
2314         q->dbval = IONIC_DBELL_QID(q->hw_index);
2315
2316         dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2317         dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2318
2319         netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2320                        NAPI_POLL_WEIGHT);
2321
2322         napi_enable(&qcq->napi);
2323
2324         if (qcq->flags & IONIC_QCQ_F_INTR)
2325                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2326                                 IONIC_INTR_MASK_CLEAR);
2327
2328         qcq->flags |= IONIC_QCQ_F_INITED;
2329
2330         return 0;
2331 }
2332
2333 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2334 {
2335         struct ionic_qcq *qcq = lif->notifyqcq;
2336         struct device *dev = lif->ionic->dev;
2337         struct ionic_queue *q = &qcq->q;
2338         int err;
2339
2340         struct ionic_admin_ctx ctx = {
2341                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2342                 .cmd.q_init = {
2343                         .opcode = IONIC_CMD_Q_INIT,
2344                         .lif_index = cpu_to_le16(lif->index),
2345                         .type = q->type,
2346                         .ver = lif->qtype_info[q->type].version,
2347                         .index = cpu_to_le32(q->index),
2348                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2349                                              IONIC_QINIT_F_ENA),
2350                         .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2351                         .pid = cpu_to_le16(q->pid),
2352                         .ring_size = ilog2(q->num_descs),
2353                         .ring_base = cpu_to_le64(q->base_pa),
2354                 }
2355         };
2356
2357         dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2358         dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2359         dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2360         dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2361
2362         err = ionic_adminq_post_wait(lif, &ctx);
2363         if (err)
2364                 return err;
2365
2366         lif->last_eid = 0;
2367         q->hw_type = ctx.comp.q_init.hw_type;
2368         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2369         q->dbval = IONIC_DBELL_QID(q->hw_index);
2370
2371         dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2372         dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2373
2374         /* preset the callback info */
2375         q->info[0].cb_arg = lif;
2376
2377         qcq->flags |= IONIC_QCQ_F_INITED;
2378
2379         return 0;
2380 }
2381
2382 static int ionic_station_set(struct ionic_lif *lif)
2383 {
2384         struct net_device *netdev = lif->netdev;
2385         struct ionic_admin_ctx ctx = {
2386                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2387                 .cmd.lif_getattr = {
2388                         .opcode = IONIC_CMD_LIF_GETATTR,
2389                         .index = cpu_to_le16(lif->index),
2390                         .attr = IONIC_LIF_ATTR_MAC,
2391                 },
2392         };
2393         struct sockaddr addr;
2394         int err;
2395
2396         err = ionic_adminq_post_wait(lif, &ctx);
2397         if (err)
2398                 return err;
2399         netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2400                    ctx.comp.lif_getattr.mac);
2401         if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2402                 return 0;
2403
2404         if (!is_zero_ether_addr(netdev->dev_addr)) {
2405                 /* If the netdev mac is non-zero and doesn't match the default
2406                  * device address, it was set by something earlier and we're
2407                  * likely here again after a fw-upgrade reset.  We need to be
2408                  * sure the netdev mac is in our filter list.
2409                  */
2410                 if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2411                                       netdev->dev_addr))
2412                         ionic_lif_addr(lif, netdev->dev_addr, true);
2413         } else {
2414                 /* Update the netdev mac with the device's mac */
2415                 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2416                 addr.sa_family = AF_INET;
2417                 err = eth_prepare_mac_addr_change(netdev, &addr);
2418                 if (err) {
2419                         netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2420                                     addr.sa_data, err);
2421                         return 0;
2422                 }
2423
2424                 eth_commit_mac_addr_change(netdev, &addr);
2425         }
2426
2427         netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2428                    netdev->dev_addr);
2429         ionic_lif_addr(lif, netdev->dev_addr, true);
2430
2431         return 0;
2432 }
2433
2434 static int ionic_lif_init(struct ionic_lif *lif)
2435 {
2436         struct ionic_dev *idev = &lif->ionic->idev;
2437         struct device *dev = lif->ionic->dev;
2438         struct ionic_lif_init_comp comp;
2439         int dbpage_num;
2440         int err;
2441
2442         mutex_lock(&lif->ionic->dev_cmd_lock);
2443         ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2444         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2445         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2446         mutex_unlock(&lif->ionic->dev_cmd_lock);
2447         if (err)
2448                 return err;
2449
2450         lif->hw_index = le16_to_cpu(comp.hw_index);
2451
2452         /* now that we have the hw_index we can figure out our doorbell page */
2453         lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2454         if (!lif->dbid_count) {
2455                 dev_err(dev, "No doorbell pages, aborting\n");
2456                 return -EINVAL;
2457         }
2458
2459         lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2460         if (!lif->dbid_inuse) {
2461                 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2462                 return -ENOMEM;
2463         }
2464
2465         /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2466         set_bit(0, lif->dbid_inuse);
2467         lif->kern_pid = 0;
2468
2469         dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2470         lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2471         if (!lif->kern_dbpage) {
2472                 dev_err(dev, "Cannot map dbpage, aborting\n");
2473                 err = -ENOMEM;
2474                 goto err_out_free_dbid;
2475         }
2476
2477         err = ionic_lif_adminq_init(lif);
2478         if (err)
2479                 goto err_out_adminq_deinit;
2480
2481         if (lif->ionic->nnqs_per_lif) {
2482                 err = ionic_lif_notifyq_init(lif);
2483                 if (err)
2484                         goto err_out_notifyq_deinit;
2485         }
2486
2487         err = ionic_init_nic_features(lif);
2488         if (err)
2489                 goto err_out_notifyq_deinit;
2490
2491         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2492                 err = ionic_rx_filters_init(lif);
2493                 if (err)
2494                         goto err_out_notifyq_deinit;
2495         }
2496
2497         err = ionic_station_set(lif);
2498         if (err)
2499                 goto err_out_notifyq_deinit;
2500
2501         lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2502
2503         set_bit(IONIC_LIF_F_INITED, lif->state);
2504
2505         INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2506
2507         return 0;
2508
2509 err_out_notifyq_deinit:
2510         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2511 err_out_adminq_deinit:
2512         ionic_lif_qcq_deinit(lif, lif->adminqcq);
2513         ionic_lif_reset(lif);
2514         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2515         lif->kern_dbpage = NULL;
2516 err_out_free_dbid:
2517         kfree(lif->dbid_inuse);
2518         lif->dbid_inuse = NULL;
2519
2520         return err;
2521 }
2522
2523 int ionic_lifs_init(struct ionic *ionic)
2524 {
2525         struct list_head *cur, *tmp;
2526         struct ionic_lif *lif;
2527         int err;
2528
2529         list_for_each_safe(cur, tmp, &ionic->lifs) {
2530                 lif = list_entry(cur, struct ionic_lif, list);
2531                 err = ionic_lif_init(lif);
2532                 if (err)
2533                         return err;
2534         }
2535
2536         return 0;
2537 }
2538
2539 static void ionic_lif_notify_work(struct work_struct *ws)
2540 {
2541 }
2542
2543 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2544 {
2545         struct ionic_admin_ctx ctx = {
2546                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2547                 .cmd.lif_setattr = {
2548                         .opcode = IONIC_CMD_LIF_SETATTR,
2549                         .index = cpu_to_le16(lif->index),
2550                         .attr = IONIC_LIF_ATTR_NAME,
2551                 },
2552         };
2553
2554         strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2555                 sizeof(ctx.cmd.lif_setattr.name));
2556
2557         ionic_adminq_post_wait(lif, &ctx);
2558 }
2559
2560 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2561 {
2562         if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2563                 return NULL;
2564
2565         return netdev_priv(netdev);
2566 }
2567
2568 static int ionic_lif_notify(struct notifier_block *nb,
2569                             unsigned long event, void *info)
2570 {
2571         struct net_device *ndev = netdev_notifier_info_to_dev(info);
2572         struct ionic *ionic = container_of(nb, struct ionic, nb);
2573         struct ionic_lif *lif = ionic_netdev_lif(ndev);
2574
2575         if (!lif || lif->ionic != ionic)
2576                 return NOTIFY_DONE;
2577
2578         switch (event) {
2579         case NETDEV_CHANGENAME:
2580                 ionic_lif_set_netdev_info(lif);
2581                 break;
2582         }
2583
2584         return NOTIFY_DONE;
2585 }
2586
2587 int ionic_lifs_register(struct ionic *ionic)
2588 {
2589         int err;
2590
2591         INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2592
2593         ionic->nb.notifier_call = ionic_lif_notify;
2594
2595         err = register_netdevice_notifier(&ionic->nb);
2596         if (err)
2597                 ionic->nb.notifier_call = NULL;
2598
2599         /* only register LIF0 for now */
2600         err = register_netdev(ionic->master_lif->netdev);
2601         if (err) {
2602                 dev_err(ionic->dev, "Cannot register net device, aborting\n");
2603                 return err;
2604         }
2605         ionic->master_lif->registered = true;
2606
2607         return 0;
2608 }
2609
2610 void ionic_lifs_unregister(struct ionic *ionic)
2611 {
2612         if (ionic->nb.notifier_call) {
2613                 unregister_netdevice_notifier(&ionic->nb);
2614                 cancel_work_sync(&ionic->nb_work);
2615                 ionic->nb.notifier_call = NULL;
2616         }
2617
2618         /* There is only one lif ever registered in the
2619          * current model, so don't bother searching the
2620          * ionic->lif for candidates to unregister
2621          */
2622         if (ionic->master_lif &&
2623             ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2624                 unregister_netdev(ionic->master_lif->netdev);
2625 }
2626
2627 static void ionic_lif_queue_identify(struct ionic_lif *lif)
2628 {
2629         struct ionic *ionic = lif->ionic;
2630         union ionic_q_identity *q_ident;
2631         struct ionic_dev *idev;
2632         int qtype;
2633         int err;
2634
2635         idev = &lif->ionic->idev;
2636         q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
2637
2638         for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
2639                 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
2640
2641                 /* filter out the ones we know about */
2642                 switch (qtype) {
2643                 case IONIC_QTYPE_ADMINQ:
2644                 case IONIC_QTYPE_NOTIFYQ:
2645                 case IONIC_QTYPE_RXQ:
2646                 case IONIC_QTYPE_TXQ:
2647                         break;
2648                 default:
2649                         continue;
2650                 }
2651
2652                 memset(qti, 0, sizeof(*qti));
2653
2654                 mutex_lock(&ionic->dev_cmd_lock);
2655                 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
2656                                              ionic_qtype_versions[qtype]);
2657                 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2658                 if (!err) {
2659                         qti->version   = q_ident->version;
2660                         qti->supported = q_ident->supported;
2661                         qti->features  = le64_to_cpu(q_ident->features);
2662                         qti->desc_sz   = le16_to_cpu(q_ident->desc_sz);
2663                         qti->comp_sz   = le16_to_cpu(q_ident->comp_sz);
2664                         qti->sg_desc_sz   = le16_to_cpu(q_ident->sg_desc_sz);
2665                         qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
2666                         qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
2667                 }
2668                 mutex_unlock(&ionic->dev_cmd_lock);
2669
2670                 if (err == -EINVAL) {
2671                         dev_err(ionic->dev, "qtype %d not supported\n", qtype);
2672                         continue;
2673                 } else if (err == -EIO) {
2674                         dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
2675                         return;
2676                 } else if (err) {
2677                         dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
2678                                 qtype, err);
2679                         return;
2680                 }
2681
2682                 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
2683                         qtype, qti->version);
2684                 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
2685                         qtype, qti->supported);
2686                 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
2687                         qtype, qti->features);
2688                 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
2689                         qtype, qti->desc_sz);
2690                 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
2691                         qtype, qti->comp_sz);
2692                 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
2693                         qtype, qti->sg_desc_sz);
2694                 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
2695                         qtype, qti->max_sg_elems);
2696                 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
2697                         qtype, qti->sg_desc_stride);
2698         }
2699 }
2700
2701 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2702                        union ionic_lif_identity *lid)
2703 {
2704         struct ionic_dev *idev = &ionic->idev;
2705         size_t sz;
2706         int err;
2707
2708         sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2709
2710         mutex_lock(&ionic->dev_cmd_lock);
2711         ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2712         err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2713         memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2714         mutex_unlock(&ionic->dev_cmd_lock);
2715         if (err)
2716                 return (err);
2717
2718         dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2719                 le64_to_cpu(lid->capabilities));
2720
2721         dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2722                 le32_to_cpu(lid->eth.max_ucast_filters));
2723         dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2724                 le32_to_cpu(lid->eth.max_mcast_filters));
2725         dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2726                 le64_to_cpu(lid->eth.config.features));
2727         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2728                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2729         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2730                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2731         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2732                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2733         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2734                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2735         dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2736         dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2737         dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2738                 le32_to_cpu(lid->eth.config.mtu));
2739
2740         return 0;
2741 }
2742
2743 int ionic_lifs_size(struct ionic *ionic)
2744 {
2745         struct ionic_identity *ident = &ionic->ident;
2746         unsigned int nintrs, dev_nintrs;
2747         union ionic_lif_config *lc;
2748         unsigned int ntxqs_per_lif;
2749         unsigned int nrxqs_per_lif;
2750         unsigned int neqs_per_lif;
2751         unsigned int nnqs_per_lif;
2752         unsigned int nxqs, neqs;
2753         unsigned int min_intrs;
2754         int err;
2755
2756         lc = &ident->lif.eth.config;
2757         dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2758         neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2759         nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2760         ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2761         nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2762
2763         nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2764         nxqs = min(nxqs, num_online_cpus());
2765         neqs = min(neqs_per_lif, num_online_cpus());
2766
2767 try_again:
2768         /* interrupt usage:
2769          *    1 for master lif adminq/notifyq
2770          *    1 for each CPU for master lif TxRx queue pairs
2771          *    whatever's left is for RDMA queues
2772          */
2773         nintrs = 1 + nxqs + neqs;
2774         min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2775
2776         if (nintrs > dev_nintrs)
2777                 goto try_fewer;
2778
2779         err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2780         if (err < 0 && err != -ENOSPC) {
2781                 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2782                 return err;
2783         }
2784         if (err == -ENOSPC)
2785                 goto try_fewer;
2786
2787         if (err != nintrs) {
2788                 ionic_bus_free_irq_vectors(ionic);
2789                 goto try_fewer;
2790         }
2791
2792         ionic->nnqs_per_lif = nnqs_per_lif;
2793         ionic->neqs_per_lif = neqs;
2794         ionic->ntxqs_per_lif = nxqs;
2795         ionic->nrxqs_per_lif = nxqs;
2796         ionic->nintrs = nintrs;
2797
2798         ionic_debugfs_add_sizes(ionic);
2799
2800         return 0;
2801
2802 try_fewer:
2803         if (nnqs_per_lif > 1) {
2804                 nnqs_per_lif >>= 1;
2805                 goto try_again;
2806         }
2807         if (neqs > 1) {
2808                 neqs >>= 1;
2809                 goto try_again;
2810         }
2811         if (nxqs > 1) {
2812                 nxqs >>= 1;
2813                 goto try_again;
2814         }
2815         dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2816         return -ENOSPC;
2817 }