Merge tag 'pwm/for-5.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[linux-2.6-microblaze.git] / drivers / net / ethernet / pensando / ionic / ionic_lif.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/if_vlan.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/cpumask.h>
13
14 #include "ionic.h"
15 #include "ionic_bus.h"
16 #include "ionic_lif.h"
17 #include "ionic_txrx.h"
18 #include "ionic_ethtool.h"
19 #include "ionic_debugfs.h"
20
21 /* queuetype support level */
22 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
23         [IONIC_QTYPE_ADMINQ]  = 0,   /* 0 = Base version with CQ support */
24         [IONIC_QTYPE_NOTIFYQ] = 0,   /* 0 = Base version */
25         [IONIC_QTYPE_RXQ]     = 0,   /* 0 = Base version with CQ+SG support */
26         [IONIC_QTYPE_TXQ]     = 1,   /* 0 = Base version with CQ+SG support
27                                       * 1 =   ... with Tx SG version 1
28                                       */
29 };
30
31 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
32 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
33 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
34 static void ionic_link_status_check(struct ionic_lif *lif);
35 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
36 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
37 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
38
39 static void ionic_txrx_deinit(struct ionic_lif *lif);
40 static int ionic_txrx_init(struct ionic_lif *lif);
41 static int ionic_start_queues(struct ionic_lif *lif);
42 static void ionic_stop_queues(struct ionic_lif *lif);
43 static void ionic_lif_queue_identify(struct ionic_lif *lif);
44
45 static void ionic_dim_work(struct work_struct *work)
46 {
47         struct dim *dim = container_of(work, struct dim, work);
48         struct dim_cq_moder cur_moder;
49         struct ionic_qcq *qcq;
50         u32 new_coal;
51
52         cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
53         qcq = container_of(dim, struct ionic_qcq, dim);
54         new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
55         qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
56         dim->state = DIM_START_MEASURE;
57 }
58
59 static void ionic_lif_deferred_work(struct work_struct *work)
60 {
61         struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
62         struct ionic_deferred *def = &lif->deferred;
63         struct ionic_deferred_work *w = NULL;
64
65         do {
66                 spin_lock_bh(&def->lock);
67                 if (!list_empty(&def->list)) {
68                         w = list_first_entry(&def->list,
69                                              struct ionic_deferred_work, list);
70                         list_del(&w->list);
71                 }
72                 spin_unlock_bh(&def->lock);
73
74                 if (!w)
75                         break;
76
77                 switch (w->type) {
78                 case IONIC_DW_TYPE_RX_MODE:
79                         ionic_lif_rx_mode(lif, w->rx_mode);
80                         break;
81                 case IONIC_DW_TYPE_RX_ADDR_ADD:
82                         ionic_lif_addr_add(lif, w->addr);
83                         break;
84                 case IONIC_DW_TYPE_RX_ADDR_DEL:
85                         ionic_lif_addr_del(lif, w->addr);
86                         break;
87                 case IONIC_DW_TYPE_LINK_STATUS:
88                         ionic_link_status_check(lif);
89                         break;
90                 case IONIC_DW_TYPE_LIF_RESET:
91                         if (w->fw_status)
92                                 ionic_lif_handle_fw_up(lif);
93                         else
94                                 ionic_lif_handle_fw_down(lif);
95                         break;
96                 default:
97                         break;
98                 }
99                 kfree(w);
100                 w = NULL;
101         } while (true);
102 }
103
104 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
105                                 struct ionic_deferred_work *work)
106 {
107         spin_lock_bh(&def->lock);
108         list_add_tail(&work->list, &def->list);
109         spin_unlock_bh(&def->lock);
110         schedule_work(&def->work);
111 }
112
113 static void ionic_link_status_check(struct ionic_lif *lif)
114 {
115         struct net_device *netdev = lif->netdev;
116         u16 link_status;
117         bool link_up;
118
119         if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
120                 return;
121
122         link_status = le16_to_cpu(lif->info->status.link_status);
123         link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
124
125         if (link_up) {
126                 if (!netif_carrier_ok(netdev)) {
127                         u32 link_speed;
128
129                         ionic_port_identify(lif->ionic);
130                         link_speed = le32_to_cpu(lif->info->status.link_speed);
131                         netdev_info(netdev, "Link up - %d Gbps\n",
132                                     link_speed / 1000);
133                         netif_carrier_on(netdev);
134                 }
135
136                 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
137                         mutex_lock(&lif->queue_lock);
138                         ionic_start_queues(lif);
139                         mutex_unlock(&lif->queue_lock);
140                 }
141         } else {
142                 if (netif_carrier_ok(netdev)) {
143                         netdev_info(netdev, "Link down\n");
144                         netif_carrier_off(netdev);
145                 }
146
147                 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
148                         mutex_lock(&lif->queue_lock);
149                         ionic_stop_queues(lif);
150                         mutex_unlock(&lif->queue_lock);
151                 }
152         }
153
154         clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
155 }
156
157 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
158 {
159         struct ionic_deferred_work *work;
160
161         /* we only need one request outstanding at a time */
162         if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
163                 return;
164
165         if (!can_sleep) {
166                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
167                 if (!work) {
168                         clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
169                         return;
170                 }
171
172                 work->type = IONIC_DW_TYPE_LINK_STATUS;
173                 ionic_lif_deferred_enqueue(&lif->deferred, work);
174         } else {
175                 ionic_link_status_check(lif);
176         }
177 }
178
179 static irqreturn_t ionic_isr(int irq, void *data)
180 {
181         struct napi_struct *napi = data;
182
183         napi_schedule_irqoff(napi);
184
185         return IRQ_HANDLED;
186 }
187
188 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
189 {
190         struct ionic_intr_info *intr = &qcq->intr;
191         struct device *dev = lif->ionic->dev;
192         struct ionic_queue *q = &qcq->q;
193         const char *name;
194
195         if (lif->registered)
196                 name = lif->netdev->name;
197         else
198                 name = dev_name(dev);
199
200         snprintf(intr->name, sizeof(intr->name),
201                  "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
202
203         return devm_request_irq(dev, intr->vector, ionic_isr,
204                                 0, intr->name, &qcq->napi);
205 }
206
207 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
208 {
209         struct ionic *ionic = lif->ionic;
210         int index;
211
212         index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
213         if (index == ionic->nintrs) {
214                 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
215                             __func__, index, ionic->nintrs);
216                 return -ENOSPC;
217         }
218
219         set_bit(index, ionic->intrs);
220         ionic_intr_init(&ionic->idev, intr, index);
221
222         return 0;
223 }
224
225 static void ionic_intr_free(struct ionic *ionic, int index)
226 {
227         if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
228                 clear_bit(index, ionic->intrs);
229 }
230
231 static int ionic_qcq_enable(struct ionic_qcq *qcq)
232 {
233         struct ionic_queue *q = &qcq->q;
234         struct ionic_lif *lif = q->lif;
235         struct ionic_dev *idev;
236         struct device *dev;
237
238         struct ionic_admin_ctx ctx = {
239                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
240                 .cmd.q_control = {
241                         .opcode = IONIC_CMD_Q_CONTROL,
242                         .lif_index = cpu_to_le16(lif->index),
243                         .type = q->type,
244                         .index = cpu_to_le32(q->index),
245                         .oper = IONIC_Q_ENABLE,
246                 },
247         };
248
249         idev = &lif->ionic->idev;
250         dev = lif->ionic->dev;
251
252         dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
253                 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
254
255         if (qcq->flags & IONIC_QCQ_F_INTR) {
256                 irq_set_affinity_hint(qcq->intr.vector,
257                                       &qcq->intr.affinity_mask);
258                 napi_enable(&qcq->napi);
259                 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
260                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
261                                 IONIC_INTR_MASK_CLEAR);
262         }
263
264         return ionic_adminq_post_wait(lif, &ctx);
265 }
266
267 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
268 {
269         struct ionic_queue *q;
270         struct ionic_lif *lif;
271         int err = 0;
272
273         struct ionic_admin_ctx ctx = {
274                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
275                 .cmd.q_control = {
276                         .opcode = IONIC_CMD_Q_CONTROL,
277                         .oper = IONIC_Q_DISABLE,
278                 },
279         };
280
281         if (!qcq)
282                 return -ENXIO;
283
284         q = &qcq->q;
285         lif = q->lif;
286
287         if (qcq->flags & IONIC_QCQ_F_INTR) {
288                 struct ionic_dev *idev = &lif->ionic->idev;
289
290                 cancel_work_sync(&qcq->dim.work);
291                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
292                                 IONIC_INTR_MASK_SET);
293                 synchronize_irq(qcq->intr.vector);
294                 irq_set_affinity_hint(qcq->intr.vector, NULL);
295                 napi_disable(&qcq->napi);
296         }
297
298         if (send_to_hw) {
299                 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
300                 ctx.cmd.q_control.type = q->type;
301                 ctx.cmd.q_control.index = cpu_to_le32(q->index);
302                 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
303                         ctx.cmd.q_control.index, ctx.cmd.q_control.type);
304
305                 err = ionic_adminq_post_wait(lif, &ctx);
306         }
307
308         return err;
309 }
310
311 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
312 {
313         struct ionic_dev *idev = &lif->ionic->idev;
314
315         if (!qcq)
316                 return;
317
318         if (!(qcq->flags & IONIC_QCQ_F_INITED))
319                 return;
320
321         if (qcq->flags & IONIC_QCQ_F_INTR) {
322                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
323                                 IONIC_INTR_MASK_SET);
324                 netif_napi_del(&qcq->napi);
325         }
326
327         qcq->flags &= ~IONIC_QCQ_F_INITED;
328 }
329
330 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
331 {
332         if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
333                 return;
334
335         irq_set_affinity_hint(qcq->intr.vector, NULL);
336         devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
337         qcq->intr.vector = 0;
338         ionic_intr_free(lif->ionic, qcq->intr.index);
339         qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
340 }
341
342 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
343 {
344         struct device *dev = lif->ionic->dev;
345
346         if (!qcq)
347                 return;
348
349         ionic_debugfs_del_qcq(qcq);
350
351         if (qcq->q_base) {
352                 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
353                 qcq->q_base = NULL;
354                 qcq->q_base_pa = 0;
355         }
356
357         if (qcq->cq_base) {
358                 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
359                 qcq->cq_base = NULL;
360                 qcq->cq_base_pa = 0;
361         }
362
363         if (qcq->sg_base) {
364                 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
365                 qcq->sg_base = NULL;
366                 qcq->sg_base_pa = 0;
367         }
368
369         ionic_qcq_intr_free(lif, qcq);
370
371         if (qcq->cq.info) {
372                 devm_kfree(dev, qcq->cq.info);
373                 qcq->cq.info = NULL;
374         }
375         if (qcq->q.info) {
376                 devm_kfree(dev, qcq->q.info);
377                 qcq->q.info = NULL;
378         }
379 }
380
381 static void ionic_qcqs_free(struct ionic_lif *lif)
382 {
383         struct device *dev = lif->ionic->dev;
384
385         if (lif->notifyqcq) {
386                 ionic_qcq_free(lif, lif->notifyqcq);
387                 devm_kfree(dev, lif->notifyqcq);
388                 lif->notifyqcq = NULL;
389         }
390
391         if (lif->adminqcq) {
392                 ionic_qcq_free(lif, lif->adminqcq);
393                 devm_kfree(dev, lif->adminqcq);
394                 lif->adminqcq = NULL;
395         }
396
397         if (lif->rxqcqs) {
398                 devm_kfree(dev, lif->rxqstats);
399                 lif->rxqstats = NULL;
400                 devm_kfree(dev, lif->rxqcqs);
401                 lif->rxqcqs = NULL;
402         }
403
404         if (lif->txqcqs) {
405                 devm_kfree(dev, lif->txqstats);
406                 lif->txqstats = NULL;
407                 devm_kfree(dev, lif->txqcqs);
408                 lif->txqcqs = NULL;
409         }
410 }
411
412 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
413                                       struct ionic_qcq *n_qcq)
414 {
415         if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
416                 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
417                 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
418         }
419
420         n_qcq->intr.vector = src_qcq->intr.vector;
421         n_qcq->intr.index = src_qcq->intr.index;
422 }
423
424 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
425 {
426         int err;
427
428         if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
429                 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
430                 return 0;
431         }
432
433         err = ionic_intr_alloc(lif, &qcq->intr);
434         if (err) {
435                 netdev_warn(lif->netdev, "no intr for %s: %d\n",
436                             qcq->q.name, err);
437                 goto err_out;
438         }
439
440         err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
441         if (err < 0) {
442                 netdev_warn(lif->netdev, "no vector for %s: %d\n",
443                             qcq->q.name, err);
444                 goto err_out_free_intr;
445         }
446         qcq->intr.vector = err;
447         ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
448                                IONIC_INTR_MASK_SET);
449
450         err = ionic_request_irq(lif, qcq);
451         if (err) {
452                 netdev_warn(lif->netdev, "irq request failed %d\n", err);
453                 goto err_out_free_intr;
454         }
455
456         /* try to get the irq on the local numa node first */
457         qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
458                                              dev_to_node(lif->ionic->dev));
459         if (qcq->intr.cpu != -1)
460                 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
461
462         netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
463         return 0;
464
465 err_out_free_intr:
466         ionic_intr_free(lif->ionic, qcq->intr.index);
467 err_out:
468         return err;
469 }
470
471 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
472                            unsigned int index,
473                            const char *name, unsigned int flags,
474                            unsigned int num_descs, unsigned int desc_size,
475                            unsigned int cq_desc_size,
476                            unsigned int sg_desc_size,
477                            unsigned int pid, struct ionic_qcq **qcq)
478 {
479         struct ionic_dev *idev = &lif->ionic->idev;
480         struct device *dev = lif->ionic->dev;
481         void *q_base, *cq_base, *sg_base;
482         dma_addr_t cq_base_pa = 0;
483         dma_addr_t sg_base_pa = 0;
484         dma_addr_t q_base_pa = 0;
485         struct ionic_qcq *new;
486         int err;
487
488         *qcq = NULL;
489
490         new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
491         if (!new) {
492                 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
493                 err = -ENOMEM;
494                 goto err_out;
495         }
496
497         new->flags = flags;
498
499         new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
500                                    GFP_KERNEL);
501         if (!new->q.info) {
502                 netdev_err(lif->netdev, "Cannot allocate queue info\n");
503                 err = -ENOMEM;
504                 goto err_out_free_qcq;
505         }
506
507         new->q.type = type;
508
509         err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
510                            desc_size, sg_desc_size, pid);
511         if (err) {
512                 netdev_err(lif->netdev, "Cannot initialize queue\n");
513                 goto err_out_free_q_info;
514         }
515
516         err = ionic_alloc_qcq_interrupt(lif, new);
517         if (err)
518                 goto err_out;
519
520         new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
521                                     GFP_KERNEL);
522         if (!new->cq.info) {
523                 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
524                 err = -ENOMEM;
525                 goto err_out_free_irq;
526         }
527
528         err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
529         if (err) {
530                 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
531                 goto err_out_free_cq_info;
532         }
533
534         if (flags & IONIC_QCQ_F_NOTIFYQ) {
535                 int q_size, cq_size;
536
537                 /* q & cq need to be contiguous in case of notifyq */
538                 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
539                 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
540
541                 new->q_size = PAGE_SIZE + q_size + cq_size;
542                 new->q_base = dma_alloc_coherent(dev, new->q_size,
543                                                  &new->q_base_pa, GFP_KERNEL);
544                 if (!new->q_base) {
545                         netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
546                         err = -ENOMEM;
547                         goto err_out_free_cq_info;
548                 }
549                 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
550                 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
551                 ionic_q_map(&new->q, q_base, q_base_pa);
552
553                 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
554                 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
555                 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
556                 ionic_cq_bind(&new->cq, &new->q);
557         } else {
558                 new->q_size = PAGE_SIZE + (num_descs * desc_size);
559                 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
560                                                  GFP_KERNEL);
561                 if (!new->q_base) {
562                         netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
563                         err = -ENOMEM;
564                         goto err_out_free_cq_info;
565                 }
566                 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
567                 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
568                 ionic_q_map(&new->q, q_base, q_base_pa);
569
570                 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
571                 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
572                                                   GFP_KERNEL);
573                 if (!new->cq_base) {
574                         netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
575                         err = -ENOMEM;
576                         goto err_out_free_q;
577                 }
578                 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
579                 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
580                 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
581                 ionic_cq_bind(&new->cq, &new->q);
582         }
583
584         if (flags & IONIC_QCQ_F_SG) {
585                 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
586                 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
587                                                   GFP_KERNEL);
588                 if (!new->sg_base) {
589                         netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
590                         err = -ENOMEM;
591                         goto err_out_free_cq;
592                 }
593                 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
594                 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
595                 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
596         }
597
598         INIT_WORK(&new->dim.work, ionic_dim_work);
599         new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
600
601         *qcq = new;
602
603         return 0;
604
605 err_out_free_cq:
606         dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
607 err_out_free_q:
608         dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
609 err_out_free_cq_info:
610         devm_kfree(dev, new->cq.info);
611 err_out_free_irq:
612         if (flags & IONIC_QCQ_F_INTR) {
613                 devm_free_irq(dev, new->intr.vector, &new->napi);
614                 ionic_intr_free(lif->ionic, new->intr.index);
615         }
616 err_out_free_q_info:
617         devm_kfree(dev, new->q.info);
618 err_out_free_qcq:
619         devm_kfree(dev, new);
620 err_out:
621         dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
622         return err;
623 }
624
625 static int ionic_qcqs_alloc(struct ionic_lif *lif)
626 {
627         struct device *dev = lif->ionic->dev;
628         unsigned int flags;
629         int err;
630
631         flags = IONIC_QCQ_F_INTR;
632         err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
633                               IONIC_ADMINQ_LENGTH,
634                               sizeof(struct ionic_admin_cmd),
635                               sizeof(struct ionic_admin_comp),
636                               0, lif->kern_pid, &lif->adminqcq);
637         if (err)
638                 return err;
639         ionic_debugfs_add_qcq(lif, lif->adminqcq);
640
641         if (lif->ionic->nnqs_per_lif) {
642                 flags = IONIC_QCQ_F_NOTIFYQ;
643                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
644                                       flags, IONIC_NOTIFYQ_LENGTH,
645                                       sizeof(struct ionic_notifyq_cmd),
646                                       sizeof(union ionic_notifyq_comp),
647                                       0, lif->kern_pid, &lif->notifyqcq);
648                 if (err)
649                         goto err_out;
650                 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
651
652                 /* Let the notifyq ride on the adminq interrupt */
653                 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
654         }
655
656         err = -ENOMEM;
657         lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
658                                    sizeof(struct ionic_qcq *), GFP_KERNEL);
659         if (!lif->txqcqs)
660                 goto err_out;
661         lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
662                                    sizeof(struct ionic_qcq *), GFP_KERNEL);
663         if (!lif->rxqcqs)
664                 goto err_out;
665
666         lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
667                                      sizeof(struct ionic_tx_stats), GFP_KERNEL);
668         if (!lif->txqstats)
669                 goto err_out;
670         lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
671                                      sizeof(struct ionic_rx_stats), GFP_KERNEL);
672         if (!lif->rxqstats)
673                 goto err_out;
674
675         return 0;
676
677 err_out:
678         ionic_qcqs_free(lif);
679         return err;
680 }
681
682 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
683 {
684         qcq->q.tail_idx = 0;
685         qcq->q.head_idx = 0;
686         qcq->cq.tail_idx = 0;
687         qcq->cq.done_color = 1;
688         memset(qcq->q_base, 0, qcq->q_size);
689         memset(qcq->cq_base, 0, qcq->cq_size);
690         memset(qcq->sg_base, 0, qcq->sg_size);
691 }
692
693 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
694 {
695         struct device *dev = lif->ionic->dev;
696         struct ionic_queue *q = &qcq->q;
697         struct ionic_cq *cq = &qcq->cq;
698         struct ionic_admin_ctx ctx = {
699                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
700                 .cmd.q_init = {
701                         .opcode = IONIC_CMD_Q_INIT,
702                         .lif_index = cpu_to_le16(lif->index),
703                         .type = q->type,
704                         .ver = lif->qtype_info[q->type].version,
705                         .index = cpu_to_le32(q->index),
706                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
707                                              IONIC_QINIT_F_SG),
708                         .pid = cpu_to_le16(q->pid),
709                         .ring_size = ilog2(q->num_descs),
710                         .ring_base = cpu_to_le64(q->base_pa),
711                         .cq_ring_base = cpu_to_le64(cq->base_pa),
712                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
713                 },
714         };
715         unsigned int intr_index;
716         int err;
717
718         if (qcq->flags & IONIC_QCQ_F_INTR)
719                 intr_index = qcq->intr.index;
720         else
721                 intr_index = lif->rxqcqs[q->index]->intr.index;
722         ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
723
724         dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
725         dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
726         dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
727         dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
728         dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
729         dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
730         dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
731
732         ionic_qcq_sanitize(qcq);
733
734         err = ionic_adminq_post_wait(lif, &ctx);
735         if (err)
736                 return err;
737
738         q->hw_type = ctx.comp.q_init.hw_type;
739         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
740         q->dbval = IONIC_DBELL_QID(q->hw_index);
741
742         dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
743         dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
744
745         if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
746                 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
747                                NAPI_POLL_WEIGHT);
748
749         qcq->flags |= IONIC_QCQ_F_INITED;
750
751         return 0;
752 }
753
754 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
755 {
756         struct device *dev = lif->ionic->dev;
757         struct ionic_queue *q = &qcq->q;
758         struct ionic_cq *cq = &qcq->cq;
759         struct ionic_admin_ctx ctx = {
760                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
761                 .cmd.q_init = {
762                         .opcode = IONIC_CMD_Q_INIT,
763                         .lif_index = cpu_to_le16(lif->index),
764                         .type = q->type,
765                         .ver = lif->qtype_info[q->type].version,
766                         .index = cpu_to_le32(q->index),
767                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
768                                              IONIC_QINIT_F_SG),
769                         .intr_index = cpu_to_le16(cq->bound_intr->index),
770                         .pid = cpu_to_le16(q->pid),
771                         .ring_size = ilog2(q->num_descs),
772                         .ring_base = cpu_to_le64(q->base_pa),
773                         .cq_ring_base = cpu_to_le64(cq->base_pa),
774                         .sg_ring_base = cpu_to_le64(q->sg_base_pa),
775                 },
776         };
777         int err;
778
779         dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
780         dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
781         dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
782         dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
783         dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
784         dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
785         dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
786
787         ionic_qcq_sanitize(qcq);
788
789         err = ionic_adminq_post_wait(lif, &ctx);
790         if (err)
791                 return err;
792
793         q->hw_type = ctx.comp.q_init.hw_type;
794         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
795         q->dbval = IONIC_DBELL_QID(q->hw_index);
796
797         dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
798         dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
799
800         if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
801                 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
802                                NAPI_POLL_WEIGHT);
803         else
804                 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
805                                NAPI_POLL_WEIGHT);
806
807         qcq->flags |= IONIC_QCQ_F_INITED;
808
809         return 0;
810 }
811
812 static bool ionic_notifyq_service(struct ionic_cq *cq,
813                                   struct ionic_cq_info *cq_info)
814 {
815         union ionic_notifyq_comp *comp = cq_info->cq_desc;
816         struct ionic_deferred_work *work;
817         struct net_device *netdev;
818         struct ionic_queue *q;
819         struct ionic_lif *lif;
820         u64 eid;
821
822         q = cq->bound_q;
823         lif = q->info[0].cb_arg;
824         netdev = lif->netdev;
825         eid = le64_to_cpu(comp->event.eid);
826
827         /* Have we run out of new completions to process? */
828         if ((s64)(eid - lif->last_eid) <= 0)
829                 return false;
830
831         lif->last_eid = eid;
832
833         dev_dbg(lif->ionic->dev, "notifyq event:\n");
834         dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
835                          comp, sizeof(*comp), true);
836
837         switch (le16_to_cpu(comp->event.ecode)) {
838         case IONIC_EVENT_LINK_CHANGE:
839                 ionic_link_status_check_request(lif, false);
840                 break;
841         case IONIC_EVENT_RESET:
842                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
843                 if (!work) {
844                         netdev_err(lif->netdev, "%s OOM\n", __func__);
845                 } else {
846                         work->type = IONIC_DW_TYPE_LIF_RESET;
847                         ionic_lif_deferred_enqueue(&lif->deferred, work);
848                 }
849                 break;
850         default:
851                 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
852                             comp->event.ecode, eid);
853                 break;
854         }
855
856         return true;
857 }
858
859 static bool ionic_adminq_service(struct ionic_cq *cq,
860                                  struct ionic_cq_info *cq_info)
861 {
862         struct ionic_admin_comp *comp = cq_info->cq_desc;
863
864         if (!color_match(comp->color, cq->done_color))
865                 return false;
866
867         ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
868
869         return true;
870 }
871
872 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
873 {
874         struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
875         struct ionic_lif *lif = napi_to_cq(napi)->lif;
876         struct ionic_dev *idev = &lif->ionic->idev;
877         unsigned int flags = 0;
878         int n_work = 0;
879         int a_work = 0;
880         int work_done;
881
882         if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
883                 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
884                                           ionic_notifyq_service, NULL, NULL);
885
886         if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
887                 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
888                                           ionic_adminq_service, NULL, NULL);
889
890         work_done = max(n_work, a_work);
891         if (work_done < budget && napi_complete_done(napi, work_done)) {
892                 flags |= IONIC_INTR_CRED_UNMASK;
893                 lif->adminqcq->cq.bound_intr->rearm_count++;
894         }
895
896         if (work_done || flags) {
897                 flags |= IONIC_INTR_CRED_RESET_COALESCE;
898                 ionic_intr_credits(idev->intr_ctrl,
899                                    intr->index,
900                                    n_work + a_work, flags);
901         }
902
903         return work_done;
904 }
905
906 void ionic_get_stats64(struct net_device *netdev,
907                        struct rtnl_link_stats64 *ns)
908 {
909         struct ionic_lif *lif = netdev_priv(netdev);
910         struct ionic_lif_stats *ls;
911
912         memset(ns, 0, sizeof(*ns));
913         ls = &lif->info->stats;
914
915         ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
916                          le64_to_cpu(ls->rx_mcast_packets) +
917                          le64_to_cpu(ls->rx_bcast_packets);
918
919         ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
920                          le64_to_cpu(ls->tx_mcast_packets) +
921                          le64_to_cpu(ls->tx_bcast_packets);
922
923         ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
924                        le64_to_cpu(ls->rx_mcast_bytes) +
925                        le64_to_cpu(ls->rx_bcast_bytes);
926
927         ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
928                        le64_to_cpu(ls->tx_mcast_bytes) +
929                        le64_to_cpu(ls->tx_bcast_bytes);
930
931         ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
932                          le64_to_cpu(ls->rx_mcast_drop_packets) +
933                          le64_to_cpu(ls->rx_bcast_drop_packets);
934
935         ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
936                          le64_to_cpu(ls->tx_mcast_drop_packets) +
937                          le64_to_cpu(ls->tx_bcast_drop_packets);
938
939         ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
940
941         ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
942
943         ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
944                                le64_to_cpu(ls->rx_queue_disabled) +
945                                le64_to_cpu(ls->rx_desc_fetch_error) +
946                                le64_to_cpu(ls->rx_desc_data_error);
947
948         ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
949                                 le64_to_cpu(ls->tx_queue_disabled) +
950                                 le64_to_cpu(ls->tx_desc_fetch_error) +
951                                 le64_to_cpu(ls->tx_desc_data_error);
952
953         ns->rx_errors = ns->rx_over_errors +
954                         ns->rx_missed_errors;
955
956         ns->tx_errors = ns->tx_aborted_errors;
957 }
958
959 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
960 {
961         struct ionic_admin_ctx ctx = {
962                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
963                 .cmd.rx_filter_add = {
964                         .opcode = IONIC_CMD_RX_FILTER_ADD,
965                         .lif_index = cpu_to_le16(lif->index),
966                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
967                 },
968         };
969         struct ionic_rx_filter *f;
970         int err;
971
972         /* don't bother if we already have it */
973         spin_lock_bh(&lif->rx_filters.lock);
974         f = ionic_rx_filter_by_addr(lif, addr);
975         spin_unlock_bh(&lif->rx_filters.lock);
976         if (f)
977                 return 0;
978
979         netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
980
981         memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
982         err = ionic_adminq_post_wait(lif, &ctx);
983         if (err && err != -EEXIST)
984                 return err;
985
986         return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
987 }
988
989 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
990 {
991         struct ionic_admin_ctx ctx = {
992                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
993                 .cmd.rx_filter_del = {
994                         .opcode = IONIC_CMD_RX_FILTER_DEL,
995                         .lif_index = cpu_to_le16(lif->index),
996                 },
997         };
998         struct ionic_rx_filter *f;
999         int err;
1000
1001         spin_lock_bh(&lif->rx_filters.lock);
1002         f = ionic_rx_filter_by_addr(lif, addr);
1003         if (!f) {
1004                 spin_unlock_bh(&lif->rx_filters.lock);
1005                 return -ENOENT;
1006         }
1007
1008         netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
1009                    addr, f->filter_id);
1010
1011         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1012         ionic_rx_filter_free(lif, f);
1013         spin_unlock_bh(&lif->rx_filters.lock);
1014
1015         err = ionic_adminq_post_wait(lif, &ctx);
1016         if (err && err != -EEXIST)
1017                 return err;
1018
1019         return 0;
1020 }
1021
1022 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
1023                           bool can_sleep)
1024 {
1025         struct ionic_deferred_work *work;
1026         unsigned int nmfilters;
1027         unsigned int nufilters;
1028
1029         if (add) {
1030                 /* Do we have space for this filter?  We test the counters
1031                  * here before checking the need for deferral so that we
1032                  * can return an overflow error to the stack.
1033                  */
1034                 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1035                 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1036
1037                 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
1038                         lif->nmcast++;
1039                 else if (!is_multicast_ether_addr(addr) &&
1040                          lif->nucast < nufilters)
1041                         lif->nucast++;
1042                 else
1043                         return -ENOSPC;
1044         } else {
1045                 if (is_multicast_ether_addr(addr) && lif->nmcast)
1046                         lif->nmcast--;
1047                 else if (!is_multicast_ether_addr(addr) && lif->nucast)
1048                         lif->nucast--;
1049         }
1050
1051         if (!can_sleep) {
1052                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1053                 if (!work) {
1054                         netdev_err(lif->netdev, "%s OOM\n", __func__);
1055                         return -ENOMEM;
1056                 }
1057                 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
1058                                    IONIC_DW_TYPE_RX_ADDR_DEL;
1059                 memcpy(work->addr, addr, ETH_ALEN);
1060                 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
1061                            add ? "add" : "del", addr);
1062                 ionic_lif_deferred_enqueue(&lif->deferred, work);
1063         } else {
1064                 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
1065                            add ? "add" : "del", addr);
1066                 if (add)
1067                         return ionic_lif_addr_add(lif, addr);
1068                 else
1069                         return ionic_lif_addr_del(lif, addr);
1070         }
1071
1072         return 0;
1073 }
1074
1075 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1076 {
1077         return ionic_lif_addr(netdev_priv(netdev), addr, true, true);
1078 }
1079
1080 static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
1081 {
1082         return ionic_lif_addr(netdev_priv(netdev), addr, true, false);
1083 }
1084
1085 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1086 {
1087         return ionic_lif_addr(netdev_priv(netdev), addr, false, true);
1088 }
1089
1090 static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
1091 {
1092         return ionic_lif_addr(netdev_priv(netdev), addr, false, false);
1093 }
1094
1095 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1096 {
1097         struct ionic_admin_ctx ctx = {
1098                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1099                 .cmd.rx_mode_set = {
1100                         .opcode = IONIC_CMD_RX_MODE_SET,
1101                         .lif_index = cpu_to_le16(lif->index),
1102                         .rx_mode = cpu_to_le16(rx_mode),
1103                 },
1104         };
1105         char buf[128];
1106         int err;
1107         int i;
1108 #define REMAIN(__x) (sizeof(buf) - (__x))
1109
1110         i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1111                       lif->rx_mode, rx_mode);
1112         if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1113                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1114         if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1115                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1116         if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1117                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1118         if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1119                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1120         if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1121                 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1122         netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1123
1124         err = ionic_adminq_post_wait(lif, &ctx);
1125         if (err)
1126                 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1127                             rx_mode, err);
1128         else
1129                 lif->rx_mode = rx_mode;
1130 }
1131
1132 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode,
1133                                bool from_ndo)
1134 {
1135         struct ionic_deferred_work *work;
1136
1137         if (from_ndo) {
1138                 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1139                 if (!work) {
1140                         netdev_err(lif->netdev, "%s OOM\n", __func__);
1141                         return;
1142                 }
1143                 work->type = IONIC_DW_TYPE_RX_MODE;
1144                 work->rx_mode = rx_mode;
1145                 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1146                 ionic_lif_deferred_enqueue(&lif->deferred, work);
1147         } else {
1148                 ionic_lif_rx_mode(lif, rx_mode);
1149         }
1150 }
1151
1152 static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo)
1153 {
1154         if (from_ndo)
1155                 __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
1156         else
1157                 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1158
1159 }
1160
1161 static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
1162 {
1163         struct ionic_lif *lif = netdev_priv(netdev);
1164         unsigned int nfilters;
1165         unsigned int rx_mode;
1166
1167         rx_mode = IONIC_RX_MODE_F_UNICAST;
1168         rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1169         rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1170         rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1171         rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1172
1173         /* sync unicast addresses
1174          * next check to see if we're in an overflow state
1175          *    if so, we track that we overflowed and enable NIC PROMISC
1176          *    else if the overflow is set and not needed
1177          *       we remove our overflow flag and check the netdev flags
1178          *       to see if we can disable NIC PROMISC
1179          */
1180         ionic_dev_uc_sync(netdev, from_ndo);
1181         nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1182         if (netdev_uc_count(netdev) + 1 > nfilters) {
1183                 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1184                 lif->uc_overflow = true;
1185         } else if (lif->uc_overflow) {
1186                 lif->uc_overflow = false;
1187                 if (!(netdev->flags & IFF_PROMISC))
1188                         rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1189         }
1190
1191         /* same for multicast */
1192         ionic_dev_uc_sync(netdev, from_ndo);
1193         nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1194         if (netdev_mc_count(netdev) > nfilters) {
1195                 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1196                 lif->mc_overflow = true;
1197         } else if (lif->mc_overflow) {
1198                 lif->mc_overflow = false;
1199                 if (!(netdev->flags & IFF_ALLMULTI))
1200                         rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1201         }
1202
1203         if (lif->rx_mode != rx_mode)
1204                 _ionic_lif_rx_mode(lif, rx_mode, from_ndo);
1205 }
1206
1207 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1208 {
1209         ionic_set_rx_mode(netdev, true);
1210 }
1211
1212 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1213 {
1214         u64 wanted = 0;
1215
1216         if (features & NETIF_F_HW_VLAN_CTAG_TX)
1217                 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1218         if (features & NETIF_F_HW_VLAN_CTAG_RX)
1219                 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1220         if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1221                 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1222         if (features & NETIF_F_RXHASH)
1223                 wanted |= IONIC_ETH_HW_RX_HASH;
1224         if (features & NETIF_F_RXCSUM)
1225                 wanted |= IONIC_ETH_HW_RX_CSUM;
1226         if (features & NETIF_F_SG)
1227                 wanted |= IONIC_ETH_HW_TX_SG;
1228         if (features & NETIF_F_HW_CSUM)
1229                 wanted |= IONIC_ETH_HW_TX_CSUM;
1230         if (features & NETIF_F_TSO)
1231                 wanted |= IONIC_ETH_HW_TSO;
1232         if (features & NETIF_F_TSO6)
1233                 wanted |= IONIC_ETH_HW_TSO_IPV6;
1234         if (features & NETIF_F_TSO_ECN)
1235                 wanted |= IONIC_ETH_HW_TSO_ECN;
1236         if (features & NETIF_F_GSO_GRE)
1237                 wanted |= IONIC_ETH_HW_TSO_GRE;
1238         if (features & NETIF_F_GSO_GRE_CSUM)
1239                 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1240         if (features & NETIF_F_GSO_IPXIP4)
1241                 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1242         if (features & NETIF_F_GSO_IPXIP6)
1243                 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1244         if (features & NETIF_F_GSO_UDP_TUNNEL)
1245                 wanted |= IONIC_ETH_HW_TSO_UDP;
1246         if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1247                 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1248
1249         return cpu_to_le64(wanted);
1250 }
1251
1252 static int ionic_set_nic_features(struct ionic_lif *lif,
1253                                   netdev_features_t features)
1254 {
1255         struct device *dev = lif->ionic->dev;
1256         struct ionic_admin_ctx ctx = {
1257                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1258                 .cmd.lif_setattr = {
1259                         .opcode = IONIC_CMD_LIF_SETATTR,
1260                         .index = cpu_to_le16(lif->index),
1261                         .attr = IONIC_LIF_ATTR_FEATURES,
1262                 },
1263         };
1264         u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1265                          IONIC_ETH_HW_VLAN_RX_STRIP |
1266                          IONIC_ETH_HW_VLAN_RX_FILTER;
1267         u64 old_hw_features;
1268         int err;
1269
1270         ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1271         err = ionic_adminq_post_wait(lif, &ctx);
1272         if (err)
1273                 return err;
1274
1275         old_hw_features = lif->hw_features;
1276         lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1277                                        ctx.comp.lif_setattr.features);
1278
1279         if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1280                 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1281
1282         if ((vlan_flags & features) &&
1283             !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1284                 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1285
1286         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1287                 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1288         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1289                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1290         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1291                 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1292         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1293                 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1294         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1295                 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1296         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1297                 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1298         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1299                 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1300         if (lif->hw_features & IONIC_ETH_HW_TSO)
1301                 dev_dbg(dev, "feature ETH_HW_TSO\n");
1302         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1303                 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1304         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1305                 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1306         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1307                 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1308         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1309                 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1310         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1311                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1312         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1313                 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1314         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1315                 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1316         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1317                 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1318
1319         return 0;
1320 }
1321
1322 static int ionic_init_nic_features(struct ionic_lif *lif)
1323 {
1324         struct net_device *netdev = lif->netdev;
1325         netdev_features_t features;
1326         int err;
1327
1328         /* set up what we expect to support by default */
1329         features = NETIF_F_HW_VLAN_CTAG_TX |
1330                    NETIF_F_HW_VLAN_CTAG_RX |
1331                    NETIF_F_HW_VLAN_CTAG_FILTER |
1332                    NETIF_F_RXHASH |
1333                    NETIF_F_SG |
1334                    NETIF_F_HW_CSUM |
1335                    NETIF_F_RXCSUM |
1336                    NETIF_F_TSO |
1337                    NETIF_F_TSO6 |
1338                    NETIF_F_TSO_ECN;
1339
1340         err = ionic_set_nic_features(lif, features);
1341         if (err)
1342                 return err;
1343
1344         /* tell the netdev what we actually can support */
1345         netdev->features |= NETIF_F_HIGHDMA;
1346
1347         if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1348                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1349         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1350                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1351         if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1352                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1353         if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1354                 netdev->hw_features |= NETIF_F_RXHASH;
1355         if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1356                 netdev->hw_features |= NETIF_F_SG;
1357
1358         if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1359                 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1360         if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1361                 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1362         if (lif->hw_features & IONIC_ETH_HW_TSO)
1363                 netdev->hw_enc_features |= NETIF_F_TSO;
1364         if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1365                 netdev->hw_enc_features |= NETIF_F_TSO6;
1366         if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1367                 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1368         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1369                 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1370         if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1371                 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1372         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1373                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1374         if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1375                 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1376         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1377                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1378         if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1379                 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1380
1381         netdev->hw_features |= netdev->hw_enc_features;
1382         netdev->features |= netdev->hw_features;
1383         netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1384
1385         netdev->priv_flags |= IFF_UNICAST_FLT |
1386                               IFF_LIVE_ADDR_CHANGE;
1387
1388         return 0;
1389 }
1390
1391 static int ionic_set_features(struct net_device *netdev,
1392                               netdev_features_t features)
1393 {
1394         struct ionic_lif *lif = netdev_priv(netdev);
1395         int err;
1396
1397         netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1398                    __func__, (u64)lif->netdev->features, (u64)features);
1399
1400         err = ionic_set_nic_features(lif, features);
1401
1402         return err;
1403 }
1404
1405 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1406 {
1407         struct sockaddr *addr = sa;
1408         u8 *mac;
1409         int err;
1410
1411         mac = (u8 *)addr->sa_data;
1412         if (ether_addr_equal(netdev->dev_addr, mac))
1413                 return 0;
1414
1415         err = eth_prepare_mac_addr_change(netdev, addr);
1416         if (err)
1417                 return err;
1418
1419         if (!is_zero_ether_addr(netdev->dev_addr)) {
1420                 netdev_info(netdev, "deleting mac addr %pM\n",
1421                             netdev->dev_addr);
1422                 ionic_addr_del(netdev, netdev->dev_addr);
1423         }
1424
1425         eth_commit_mac_addr_change(netdev, addr);
1426         netdev_info(netdev, "updating mac addr %pM\n", mac);
1427
1428         return ionic_addr_add(netdev, mac);
1429 }
1430
1431 static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1432 {
1433         /* Stop and clean the queues before reconfiguration */
1434         mutex_lock(&lif->queue_lock);
1435         netif_device_detach(lif->netdev);
1436         ionic_stop_queues(lif);
1437         ionic_txrx_deinit(lif);
1438 }
1439
1440 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1441 {
1442         int err;
1443
1444         /* Re-init the queues after reconfiguration */
1445
1446         /* The only way txrx_init can fail here is if communication
1447          * with FW is suddenly broken.  There's not much we can do
1448          * at this point - error messages have already been printed,
1449          * so we can continue on and the user can eventually do a
1450          * DOWN and UP to try to reset and clear the issue.
1451          */
1452         err = ionic_txrx_init(lif);
1453         mutex_unlock(&lif->queue_lock);
1454         ionic_link_status_check_request(lif, true);
1455         netif_device_attach(lif->netdev);
1456
1457         return err;
1458 }
1459
1460 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1461 {
1462         struct ionic_lif *lif = netdev_priv(netdev);
1463         struct ionic_admin_ctx ctx = {
1464                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1465                 .cmd.lif_setattr = {
1466                         .opcode = IONIC_CMD_LIF_SETATTR,
1467                         .index = cpu_to_le16(lif->index),
1468                         .attr = IONIC_LIF_ATTR_MTU,
1469                         .mtu = cpu_to_le32(new_mtu),
1470                 },
1471         };
1472         int err;
1473
1474         err = ionic_adminq_post_wait(lif, &ctx);
1475         if (err)
1476                 return err;
1477
1478         netdev->mtu = new_mtu;
1479         /* if we're not running, nothing more to do */
1480         if (!netif_running(netdev))
1481                 return 0;
1482
1483         ionic_stop_queues_reconfig(lif);
1484         return ionic_start_queues_reconfig(lif);
1485 }
1486
1487 static void ionic_tx_timeout_work(struct work_struct *ws)
1488 {
1489         struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1490
1491         netdev_info(lif->netdev, "Tx Timeout recovery\n");
1492
1493         /* if we were stopped before this scheduled job was launched,
1494          * don't bother the queues as they are already stopped.
1495          */
1496         if (!netif_running(lif->netdev))
1497                 return;
1498
1499         ionic_stop_queues_reconfig(lif);
1500         ionic_start_queues_reconfig(lif);
1501 }
1502
1503 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1504 {
1505         struct ionic_lif *lif = netdev_priv(netdev);
1506
1507         schedule_work(&lif->tx_timeout_work);
1508 }
1509
1510 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1511                                  u16 vid)
1512 {
1513         struct ionic_lif *lif = netdev_priv(netdev);
1514         struct ionic_admin_ctx ctx = {
1515                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1516                 .cmd.rx_filter_add = {
1517                         .opcode = IONIC_CMD_RX_FILTER_ADD,
1518                         .lif_index = cpu_to_le16(lif->index),
1519                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1520                         .vlan.vlan = cpu_to_le16(vid),
1521                 },
1522         };
1523         int err;
1524
1525         netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
1526         err = ionic_adminq_post_wait(lif, &ctx);
1527         if (err)
1528                 return err;
1529
1530         return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1531 }
1532
1533 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1534                                   u16 vid)
1535 {
1536         struct ionic_lif *lif = netdev_priv(netdev);
1537         struct ionic_admin_ctx ctx = {
1538                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1539                 .cmd.rx_filter_del = {
1540                         .opcode = IONIC_CMD_RX_FILTER_DEL,
1541                         .lif_index = cpu_to_le16(lif->index),
1542                 },
1543         };
1544         struct ionic_rx_filter *f;
1545
1546         spin_lock_bh(&lif->rx_filters.lock);
1547
1548         f = ionic_rx_filter_by_vlan(lif, vid);
1549         if (!f) {
1550                 spin_unlock_bh(&lif->rx_filters.lock);
1551                 return -ENOENT;
1552         }
1553
1554         netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
1555                    vid, f->filter_id);
1556
1557         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1558         ionic_rx_filter_free(lif, f);
1559         spin_unlock_bh(&lif->rx_filters.lock);
1560
1561         return ionic_adminq_post_wait(lif, &ctx);
1562 }
1563
1564 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1565                          const u8 *key, const u32 *indir)
1566 {
1567         struct ionic_admin_ctx ctx = {
1568                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1569                 .cmd.lif_setattr = {
1570                         .opcode = IONIC_CMD_LIF_SETATTR,
1571                         .attr = IONIC_LIF_ATTR_RSS,
1572                         .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1573                 },
1574         };
1575         unsigned int i, tbl_sz;
1576
1577         if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1578                 lif->rss_types = types;
1579                 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1580         }
1581
1582         if (key)
1583                 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1584
1585         if (indir) {
1586                 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1587                 for (i = 0; i < tbl_sz; i++)
1588                         lif->rss_ind_tbl[i] = indir[i];
1589         }
1590
1591         memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1592                IONIC_RSS_HASH_KEY_SIZE);
1593
1594         return ionic_adminq_post_wait(lif, &ctx);
1595 }
1596
1597 static int ionic_lif_rss_init(struct ionic_lif *lif)
1598 {
1599         unsigned int tbl_sz;
1600         unsigned int i;
1601
1602         lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1603                          IONIC_RSS_TYPE_IPV4_TCP |
1604                          IONIC_RSS_TYPE_IPV4_UDP |
1605                          IONIC_RSS_TYPE_IPV6     |
1606                          IONIC_RSS_TYPE_IPV6_TCP |
1607                          IONIC_RSS_TYPE_IPV6_UDP;
1608
1609         /* Fill indirection table with 'default' values */
1610         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1611         for (i = 0; i < tbl_sz; i++)
1612                 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1613
1614         return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1615 }
1616
1617 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1618 {
1619         int tbl_sz;
1620
1621         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1622         memset(lif->rss_ind_tbl, 0, tbl_sz);
1623         memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1624
1625         ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1626 }
1627
1628 static void ionic_txrx_disable(struct ionic_lif *lif)
1629 {
1630         unsigned int i;
1631         int err = 0;
1632
1633         if (lif->txqcqs) {
1634                 for (i = 0; i < lif->nxqs; i++)
1635                         err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
1636         }
1637
1638         if (lif->rxqcqs) {
1639                 for (i = 0; i < lif->nxqs; i++)
1640                         err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
1641         }
1642 }
1643
1644 static void ionic_txrx_deinit(struct ionic_lif *lif)
1645 {
1646         unsigned int i;
1647
1648         if (lif->txqcqs) {
1649                 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1650                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1651                         ionic_tx_flush(&lif->txqcqs[i]->cq);
1652                         ionic_tx_empty(&lif->txqcqs[i]->q);
1653                 }
1654         }
1655
1656         if (lif->rxqcqs) {
1657                 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1658                         ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1659                         ionic_rx_flush(&lif->rxqcqs[i]->cq);
1660                         ionic_rx_empty(&lif->rxqcqs[i]->q);
1661                 }
1662         }
1663         lif->rx_mode = 0;
1664 }
1665
1666 static void ionic_txrx_free(struct ionic_lif *lif)
1667 {
1668         unsigned int i;
1669
1670         if (lif->txqcqs) {
1671                 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
1672                         ionic_qcq_free(lif, lif->txqcqs[i]);
1673                         devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
1674                         lif->txqcqs[i] = NULL;
1675                 }
1676         }
1677
1678         if (lif->rxqcqs) {
1679                 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
1680                         ionic_qcq_free(lif, lif->rxqcqs[i]);
1681                         devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
1682                         lif->rxqcqs[i] = NULL;
1683                 }
1684         }
1685 }
1686
1687 static int ionic_txrx_alloc(struct ionic_lif *lif)
1688 {
1689         unsigned int sg_desc_sz;
1690         unsigned int flags;
1691         unsigned int i;
1692         int err = 0;
1693
1694         if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1695             lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1696                                           sizeof(struct ionic_txq_sg_desc_v1))
1697                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1698         else
1699                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1700
1701         flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1702         if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1703                 flags |= IONIC_QCQ_F_INTR;
1704         for (i = 0; i < lif->nxqs; i++) {
1705                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1706                                       lif->ntxq_descs,
1707                                       sizeof(struct ionic_txq_desc),
1708                                       sizeof(struct ionic_txq_comp),
1709                                       sg_desc_sz,
1710                                       lif->kern_pid, &lif->txqcqs[i]);
1711                 if (err)
1712                         goto err_out;
1713
1714                 if (flags & IONIC_QCQ_F_INTR) {
1715                         ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1716                                              lif->txqcqs[i]->intr.index,
1717                                              lif->tx_coalesce_hw);
1718                         if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
1719                                 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
1720                 }
1721
1722                 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
1723         }
1724
1725         flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1726         for (i = 0; i < lif->nxqs; i++) {
1727                 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1728                                       lif->nrxq_descs,
1729                                       sizeof(struct ionic_rxq_desc),
1730                                       sizeof(struct ionic_rxq_comp),
1731                                       sizeof(struct ionic_rxq_sg_desc),
1732                                       lif->kern_pid, &lif->rxqcqs[i]);
1733                 if (err)
1734                         goto err_out;
1735
1736                 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1737                                      lif->rxqcqs[i]->intr.index,
1738                                      lif->rx_coalesce_hw);
1739                 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
1740                         lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
1741
1742                 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1743                         ionic_link_qcq_interrupts(lif->rxqcqs[i],
1744                                                   lif->txqcqs[i]);
1745
1746                 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
1747         }
1748
1749         return 0;
1750
1751 err_out:
1752         ionic_txrx_free(lif);
1753
1754         return err;
1755 }
1756
1757 static int ionic_txrx_init(struct ionic_lif *lif)
1758 {
1759         unsigned int i;
1760         int err;
1761
1762         for (i = 0; i < lif->nxqs; i++) {
1763                 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
1764                 if (err)
1765                         goto err_out;
1766
1767                 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
1768                 if (err) {
1769                         ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1770                         goto err_out;
1771                 }
1772         }
1773
1774         if (lif->netdev->features & NETIF_F_RXHASH)
1775                 ionic_lif_rss_init(lif);
1776
1777         ionic_set_rx_mode(lif->netdev, false);
1778
1779         return 0;
1780
1781 err_out:
1782         while (i--) {
1783                 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1784                 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1785         }
1786
1787         return err;
1788 }
1789
1790 static int ionic_txrx_enable(struct ionic_lif *lif)
1791 {
1792         int derr = 0;
1793         int i, err;
1794
1795         for (i = 0; i < lif->nxqs; i++) {
1796                 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
1797                         dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
1798                         err = -ENXIO;
1799                         goto err_out;
1800                 }
1801
1802                 ionic_rx_fill(&lif->rxqcqs[i]->q);
1803                 err = ionic_qcq_enable(lif->rxqcqs[i]);
1804                 if (err)
1805                         goto err_out;
1806
1807                 err = ionic_qcq_enable(lif->txqcqs[i]);
1808                 if (err) {
1809                         derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
1810                         goto err_out;
1811                 }
1812         }
1813
1814         return 0;
1815
1816 err_out:
1817         while (i--) {
1818                 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
1819                 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
1820         }
1821
1822         return err;
1823 }
1824
1825 static int ionic_start_queues(struct ionic_lif *lif)
1826 {
1827         int err;
1828
1829         if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1830                 return 0;
1831
1832         err = ionic_txrx_enable(lif);
1833         if (err) {
1834                 clear_bit(IONIC_LIF_F_UP, lif->state);
1835                 return err;
1836         }
1837         netif_tx_wake_all_queues(lif->netdev);
1838
1839         return 0;
1840 }
1841
1842 static int ionic_open(struct net_device *netdev)
1843 {
1844         struct ionic_lif *lif = netdev_priv(netdev);
1845         int err;
1846
1847         err = ionic_txrx_alloc(lif);
1848         if (err)
1849                 return err;
1850
1851         err = ionic_txrx_init(lif);
1852         if (err)
1853                 goto err_out;
1854
1855         err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
1856         if (err)
1857                 goto err_txrx_deinit;
1858
1859         err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
1860         if (err)
1861                 goto err_txrx_deinit;
1862
1863         /* don't start the queues until we have link */
1864         if (netif_carrier_ok(netdev)) {
1865                 err = ionic_start_queues(lif);
1866                 if (err)
1867                         goto err_txrx_deinit;
1868         }
1869
1870         return 0;
1871
1872 err_txrx_deinit:
1873         ionic_txrx_deinit(lif);
1874 err_out:
1875         ionic_txrx_free(lif);
1876         return err;
1877 }
1878
1879 static void ionic_stop_queues(struct ionic_lif *lif)
1880 {
1881         if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1882                 return;
1883
1884         netif_tx_disable(lif->netdev);
1885         ionic_txrx_disable(lif);
1886 }
1887
1888 static int ionic_stop(struct net_device *netdev)
1889 {
1890         struct ionic_lif *lif = netdev_priv(netdev);
1891
1892         if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1893                 return 0;
1894
1895         ionic_stop_queues(lif);
1896         ionic_txrx_deinit(lif);
1897         ionic_txrx_free(lif);
1898
1899         return 0;
1900 }
1901
1902 static int ionic_get_vf_config(struct net_device *netdev,
1903                                int vf, struct ifla_vf_info *ivf)
1904 {
1905         struct ionic_lif *lif = netdev_priv(netdev);
1906         struct ionic *ionic = lif->ionic;
1907         int ret = 0;
1908
1909         if (!netif_device_present(netdev))
1910                 return -EBUSY;
1911
1912         down_read(&ionic->vf_op_lock);
1913
1914         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1915                 ret = -EINVAL;
1916         } else {
1917                 ivf->vf           = vf;
1918                 ivf->vlan         = ionic->vfs[vf].vlanid;
1919                 ivf->qos          = 0;
1920                 ivf->spoofchk     = ionic->vfs[vf].spoofchk;
1921                 ivf->linkstate    = ionic->vfs[vf].linkstate;
1922                 ivf->max_tx_rate  = ionic->vfs[vf].maxrate;
1923                 ivf->trusted      = ionic->vfs[vf].trusted;
1924                 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1925         }
1926
1927         up_read(&ionic->vf_op_lock);
1928         return ret;
1929 }
1930
1931 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1932                               struct ifla_vf_stats *vf_stats)
1933 {
1934         struct ionic_lif *lif = netdev_priv(netdev);
1935         struct ionic *ionic = lif->ionic;
1936         struct ionic_lif_stats *vs;
1937         int ret = 0;
1938
1939         if (!netif_device_present(netdev))
1940                 return -EBUSY;
1941
1942         down_read(&ionic->vf_op_lock);
1943
1944         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1945                 ret = -EINVAL;
1946         } else {
1947                 memset(vf_stats, 0, sizeof(*vf_stats));
1948                 vs = &ionic->vfs[vf].stats;
1949
1950                 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1951                 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1952                 vf_stats->rx_bytes   = le64_to_cpu(vs->rx_ucast_bytes);
1953                 vf_stats->tx_bytes   = le64_to_cpu(vs->tx_ucast_bytes);
1954                 vf_stats->broadcast  = le64_to_cpu(vs->rx_bcast_packets);
1955                 vf_stats->multicast  = le64_to_cpu(vs->rx_mcast_packets);
1956                 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1957                                        le64_to_cpu(vs->rx_mcast_drop_packets) +
1958                                        le64_to_cpu(vs->rx_bcast_drop_packets);
1959                 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1960                                        le64_to_cpu(vs->tx_mcast_drop_packets) +
1961                                        le64_to_cpu(vs->tx_bcast_drop_packets);
1962         }
1963
1964         up_read(&ionic->vf_op_lock);
1965         return ret;
1966 }
1967
1968 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1969 {
1970         struct ionic_lif *lif = netdev_priv(netdev);
1971         struct ionic *ionic = lif->ionic;
1972         int ret;
1973
1974         if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1975                 return -EINVAL;
1976
1977         if (!netif_device_present(netdev))
1978                 return -EBUSY;
1979
1980         down_write(&ionic->vf_op_lock);
1981
1982         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1983                 ret = -EINVAL;
1984         } else {
1985                 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1986                 if (!ret)
1987                         ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1988         }
1989
1990         up_write(&ionic->vf_op_lock);
1991         return ret;
1992 }
1993
1994 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1995                              u8 qos, __be16 proto)
1996 {
1997         struct ionic_lif *lif = netdev_priv(netdev);
1998         struct ionic *ionic = lif->ionic;
1999         int ret;
2000
2001         /* until someday when we support qos */
2002         if (qos)
2003                 return -EINVAL;
2004
2005         if (vlan > 4095)
2006                 return -EINVAL;
2007
2008         if (proto != htons(ETH_P_8021Q))
2009                 return -EPROTONOSUPPORT;
2010
2011         if (!netif_device_present(netdev))
2012                 return -EBUSY;
2013
2014         down_write(&ionic->vf_op_lock);
2015
2016         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2017                 ret = -EINVAL;
2018         } else {
2019                 ret = ionic_set_vf_config(ionic, vf,
2020                                           IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
2021                 if (!ret)
2022                         ionic->vfs[vf].vlanid = vlan;
2023         }
2024
2025         up_write(&ionic->vf_op_lock);
2026         return ret;
2027 }
2028
2029 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2030                              int tx_min, int tx_max)
2031 {
2032         struct ionic_lif *lif = netdev_priv(netdev);
2033         struct ionic *ionic = lif->ionic;
2034         int ret;
2035
2036         /* setting the min just seems silly */
2037         if (tx_min)
2038                 return -EINVAL;
2039
2040         if (!netif_device_present(netdev))
2041                 return -EBUSY;
2042
2043         down_write(&ionic->vf_op_lock);
2044
2045         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2046                 ret = -EINVAL;
2047         } else {
2048                 ret = ionic_set_vf_config(ionic, vf,
2049                                           IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
2050                 if (!ret)
2051                         lif->ionic->vfs[vf].maxrate = tx_max;
2052         }
2053
2054         up_write(&ionic->vf_op_lock);
2055         return ret;
2056 }
2057
2058 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2059 {
2060         struct ionic_lif *lif = netdev_priv(netdev);
2061         struct ionic *ionic = lif->ionic;
2062         u8 data = set;  /* convert to u8 for config */
2063         int ret;
2064
2065         if (!netif_device_present(netdev))
2066                 return -EBUSY;
2067
2068         down_write(&ionic->vf_op_lock);
2069
2070         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2071                 ret = -EINVAL;
2072         } else {
2073                 ret = ionic_set_vf_config(ionic, vf,
2074                                           IONIC_VF_ATTR_SPOOFCHK, &data);
2075                 if (!ret)
2076                         ionic->vfs[vf].spoofchk = data;
2077         }
2078
2079         up_write(&ionic->vf_op_lock);
2080         return ret;
2081 }
2082
2083 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2084 {
2085         struct ionic_lif *lif = netdev_priv(netdev);
2086         struct ionic *ionic = lif->ionic;
2087         u8 data = set;  /* convert to u8 for config */
2088         int ret;
2089
2090         if (!netif_device_present(netdev))
2091                 return -EBUSY;
2092
2093         down_write(&ionic->vf_op_lock);
2094
2095         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2096                 ret = -EINVAL;
2097         } else {
2098                 ret = ionic_set_vf_config(ionic, vf,
2099                                           IONIC_VF_ATTR_TRUST, &data);
2100                 if (!ret)
2101                         ionic->vfs[vf].trusted = data;
2102         }
2103
2104         up_write(&ionic->vf_op_lock);
2105         return ret;
2106 }
2107
2108 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2109 {
2110         struct ionic_lif *lif = netdev_priv(netdev);
2111         struct ionic *ionic = lif->ionic;
2112         u8 data;
2113         int ret;
2114
2115         switch (set) {
2116         case IFLA_VF_LINK_STATE_ENABLE:
2117                 data = IONIC_VF_LINK_STATUS_UP;
2118                 break;
2119         case IFLA_VF_LINK_STATE_DISABLE:
2120                 data = IONIC_VF_LINK_STATUS_DOWN;
2121                 break;
2122         case IFLA_VF_LINK_STATE_AUTO:
2123                 data = IONIC_VF_LINK_STATUS_AUTO;
2124                 break;
2125         default:
2126                 return -EINVAL;
2127         }
2128
2129         if (!netif_device_present(netdev))
2130                 return -EBUSY;
2131
2132         down_write(&ionic->vf_op_lock);
2133
2134         if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2135                 ret = -EINVAL;
2136         } else {
2137                 ret = ionic_set_vf_config(ionic, vf,
2138                                           IONIC_VF_ATTR_LINKSTATE, &data);
2139                 if (!ret)
2140                         ionic->vfs[vf].linkstate = set;
2141         }
2142
2143         up_write(&ionic->vf_op_lock);
2144         return ret;
2145 }
2146
2147 static const struct net_device_ops ionic_netdev_ops = {
2148         .ndo_open               = ionic_open,
2149         .ndo_stop               = ionic_stop,
2150         .ndo_start_xmit         = ionic_start_xmit,
2151         .ndo_get_stats64        = ionic_get_stats64,
2152         .ndo_set_rx_mode        = ionic_ndo_set_rx_mode,
2153         .ndo_set_features       = ionic_set_features,
2154         .ndo_set_mac_address    = ionic_set_mac_address,
2155         .ndo_validate_addr      = eth_validate_addr,
2156         .ndo_tx_timeout         = ionic_tx_timeout,
2157         .ndo_change_mtu         = ionic_change_mtu,
2158         .ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
2159         .ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
2160         .ndo_set_vf_vlan        = ionic_set_vf_vlan,
2161         .ndo_set_vf_trust       = ionic_set_vf_trust,
2162         .ndo_set_vf_mac         = ionic_set_vf_mac,
2163         .ndo_set_vf_rate        = ionic_set_vf_rate,
2164         .ndo_set_vf_spoofchk    = ionic_set_vf_spoofchk,
2165         .ndo_get_vf_config      = ionic_get_vf_config,
2166         .ndo_set_vf_link_state  = ionic_set_vf_link_state,
2167         .ndo_get_vf_stats       = ionic_get_vf_stats,
2168 };
2169
2170 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2171 {
2172         /* only swapping the queues, not the napi, flags, or other stuff */
2173         swap(a->q.num_descs,  b->q.num_descs);
2174         swap(a->q.base,       b->q.base);
2175         swap(a->q.base_pa,    b->q.base_pa);
2176         swap(a->q.info,       b->q.info);
2177         swap(a->q_base,       b->q_base);
2178         swap(a->q_base_pa,    b->q_base_pa);
2179         swap(a->q_size,       b->q_size);
2180
2181         swap(a->q.sg_base,    b->q.sg_base);
2182         swap(a->q.sg_base_pa, b->q.sg_base_pa);
2183         swap(a->sg_base,      b->sg_base);
2184         swap(a->sg_base_pa,   b->sg_base_pa);
2185         swap(a->sg_size,      b->sg_size);
2186
2187         swap(a->cq.num_descs, b->cq.num_descs);
2188         swap(a->cq.base,      b->cq.base);
2189         swap(a->cq.base_pa,   b->cq.base_pa);
2190         swap(a->cq.info,      b->cq.info);
2191         swap(a->cq_base,      b->cq_base);
2192         swap(a->cq_base_pa,   b->cq_base_pa);
2193         swap(a->cq_size,      b->cq_size);
2194 }
2195
2196 int ionic_reconfigure_queues(struct ionic_lif *lif,
2197                              struct ionic_queue_params *qparam)
2198 {
2199         struct ionic_qcq **tx_qcqs = NULL;
2200         struct ionic_qcq **rx_qcqs = NULL;
2201         unsigned int sg_desc_sz;
2202         unsigned int flags;
2203         int err = -ENOMEM;
2204         unsigned int i;
2205
2206         /* allocate temporary qcq arrays to hold new queue structs */
2207         if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2208                 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2209                                        sizeof(struct ionic_qcq *), GFP_KERNEL);
2210                 if (!tx_qcqs)
2211                         goto err_out;
2212         }
2213         if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
2214                 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2215                                        sizeof(struct ionic_qcq *), GFP_KERNEL);
2216                 if (!rx_qcqs)
2217                         goto err_out;
2218         }
2219
2220         /* allocate new desc_info and rings, but leave the interrupt setup
2221          * until later so as to not mess with the still-running queues
2222          */
2223         if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2224             lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2225                                           sizeof(struct ionic_txq_sg_desc_v1))
2226                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2227         else
2228                 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2229
2230         if (tx_qcqs) {
2231                 for (i = 0; i < qparam->nxqs; i++) {
2232                         flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2233                         err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2234                                               qparam->ntxq_descs,
2235                                               sizeof(struct ionic_txq_desc),
2236                                               sizeof(struct ionic_txq_comp),
2237                                               sg_desc_sz,
2238                                               lif->kern_pid, &tx_qcqs[i]);
2239                         if (err)
2240                                 goto err_out;
2241                 }
2242         }
2243
2244         if (rx_qcqs) {
2245                 for (i = 0; i < qparam->nxqs; i++) {
2246                         flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2247                         err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2248                                               qparam->nrxq_descs,
2249                                               sizeof(struct ionic_rxq_desc),
2250                                               sizeof(struct ionic_rxq_comp),
2251                                               sizeof(struct ionic_rxq_sg_desc),
2252                                               lif->kern_pid, &rx_qcqs[i]);
2253                         if (err)
2254                                 goto err_out;
2255                 }
2256         }
2257
2258         /* stop and clean the queues */
2259         ionic_stop_queues_reconfig(lif);
2260
2261         if (qparam->nxqs != lif->nxqs) {
2262                 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2263                 if (err)
2264                         goto err_out_reinit_unlock;
2265                 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2266                 if (err) {
2267                         netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2268                         goto err_out_reinit_unlock;
2269                 }
2270         }
2271
2272         /* swap new desc_info and rings, keeping existing interrupt config */
2273         if (tx_qcqs) {
2274                 lif->ntxq_descs = qparam->ntxq_descs;
2275                 for (i = 0; i < qparam->nxqs; i++)
2276                         ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2277         }
2278
2279         if (rx_qcqs) {
2280                 lif->nrxq_descs = qparam->nrxq_descs;
2281                 for (i = 0; i < qparam->nxqs; i++)
2282                         ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2283         }
2284
2285         /* if we need to change the interrupt layout, this is the time */
2286         if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2287             qparam->nxqs != lif->nxqs) {
2288                 if (qparam->intr_split) {
2289                         set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2290                 } else {
2291                         clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2292                         lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2293                         lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2294                 }
2295
2296                 /* clear existing interrupt assignments */
2297                 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2298                         ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2299                         ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2300                 }
2301
2302                 /* re-assign the interrupts */
2303                 for (i = 0; i < qparam->nxqs; i++) {
2304                         lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2305                         err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2306                         ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2307                                              lif->rxqcqs[i]->intr.index,
2308                                              lif->rx_coalesce_hw);
2309
2310                         if (qparam->intr_split) {
2311                                 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2312                                 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2313                                 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2314                                                      lif->txqcqs[i]->intr.index,
2315                                                      lif->tx_coalesce_hw);
2316                                 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2317                                         lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2318                         } else {
2319                                 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2320                                 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2321                         }
2322                 }
2323         }
2324
2325         /* now we can rework the debugfs mappings */
2326         if (tx_qcqs) {
2327                 for (i = 0; i < qparam->nxqs; i++) {
2328                         ionic_debugfs_del_qcq(lif->txqcqs[i]);
2329                         ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2330                 }
2331         }
2332
2333         if (rx_qcqs) {
2334                 for (i = 0; i < qparam->nxqs; i++) {
2335                         ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2336                         ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2337                 }
2338         }
2339
2340         swap(lif->nxqs, qparam->nxqs);
2341
2342 err_out_reinit_unlock:
2343         /* re-init the queues, but don't loose an error code */
2344         if (err)
2345                 ionic_start_queues_reconfig(lif);
2346         else
2347                 err = ionic_start_queues_reconfig(lif);
2348
2349 err_out:
2350         /* free old allocs without cleaning intr */
2351         for (i = 0; i < qparam->nxqs; i++) {
2352                 if (tx_qcqs && tx_qcqs[i]) {
2353                         tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2354                         ionic_qcq_free(lif, tx_qcqs[i]);
2355                         devm_kfree(lif->ionic->dev, tx_qcqs[i]);
2356                         tx_qcqs[i] = NULL;
2357                 }
2358                 if (rx_qcqs && rx_qcqs[i]) {
2359                         rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2360                         ionic_qcq_free(lif, rx_qcqs[i]);
2361                         devm_kfree(lif->ionic->dev, rx_qcqs[i]);
2362                         rx_qcqs[i] = NULL;
2363                 }
2364         }
2365
2366         /* free q array */
2367         if (rx_qcqs) {
2368                 devm_kfree(lif->ionic->dev, rx_qcqs);
2369                 rx_qcqs = NULL;
2370         }
2371         if (tx_qcqs) {
2372                 devm_kfree(lif->ionic->dev, tx_qcqs);
2373                 tx_qcqs = NULL;
2374         }
2375
2376         /* clean the unused dma and info allocations when new set is smaller
2377          * than the full array, but leave the qcq shells in place
2378          */
2379         for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
2380                 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2381                 ionic_qcq_free(lif, lif->txqcqs[i]);
2382
2383                 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2384                 ionic_qcq_free(lif, lif->rxqcqs[i]);
2385         }
2386
2387         return err;
2388 }
2389
2390 int ionic_lif_alloc(struct ionic *ionic)
2391 {
2392         struct device *dev = ionic->dev;
2393         union ionic_lif_identity *lid;
2394         struct net_device *netdev;
2395         struct ionic_lif *lif;
2396         int tbl_sz;
2397         int err;
2398
2399         lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2400         if (!lid)
2401                 return -ENOMEM;
2402
2403         netdev = alloc_etherdev_mqs(sizeof(*lif),
2404                                     ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2405         if (!netdev) {
2406                 dev_err(dev, "Cannot allocate netdev, aborting\n");
2407                 err = -ENOMEM;
2408                 goto err_out_free_lid;
2409         }
2410
2411         SET_NETDEV_DEV(netdev, dev);
2412
2413         lif = netdev_priv(netdev);
2414         lif->netdev = netdev;
2415         ionic->lif = lif;
2416         netdev->netdev_ops = &ionic_netdev_ops;
2417         ionic_ethtool_set_ops(netdev);
2418
2419         netdev->watchdog_timeo = 2 * HZ;
2420         netif_carrier_off(netdev);
2421
2422         lif->identity = lid;
2423         lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2424         err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
2425         if (err) {
2426                 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
2427                         lif->lif_type, err);
2428                 goto err_out_free_netdev;
2429         }
2430         lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2431                                      le32_to_cpu(lif->identity->eth.min_frame_size));
2432         lif->netdev->max_mtu =
2433                 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
2434
2435         lif->neqs = ionic->neqs_per_lif;
2436         lif->nxqs = ionic->ntxqs_per_lif;
2437
2438         lif->ionic = ionic;
2439         lif->index = 0;
2440         lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2441         lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2442         lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
2443
2444         /* Convert the default coalesce value to actual hw resolution */
2445         lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2446         lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2447                                                     lif->rx_coalesce_usecs);
2448         lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2449         lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2450         set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
2451         set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
2452
2453         snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
2454
2455         spin_lock_init(&lif->adminq_lock);
2456
2457         spin_lock_init(&lif->deferred.lock);
2458         INIT_LIST_HEAD(&lif->deferred.list);
2459         INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2460
2461         /* allocate lif info */
2462         lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2463         lif->info = dma_alloc_coherent(dev, lif->info_sz,
2464                                        &lif->info_pa, GFP_KERNEL);
2465         if (!lif->info) {
2466                 dev_err(dev, "Failed to allocate lif info, aborting\n");
2467                 err = -ENOMEM;
2468                 goto err_out_free_netdev;
2469         }
2470
2471         ionic_debugfs_add_lif(lif);
2472
2473         /* allocate control queues and txrx queue arrays */
2474         ionic_lif_queue_identify(lif);
2475         err = ionic_qcqs_alloc(lif);
2476         if (err)
2477                 goto err_out_free_lif_info;
2478
2479         /* allocate rss indirection table */
2480         tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2481         lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2482         lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2483                                               &lif->rss_ind_tbl_pa,
2484                                               GFP_KERNEL);
2485
2486         if (!lif->rss_ind_tbl) {
2487                 err = -ENOMEM;
2488                 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2489                 goto err_out_free_qcqs;
2490         }
2491         netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2492
2493         return 0;
2494
2495 err_out_free_qcqs:
2496         ionic_qcqs_free(lif);
2497 err_out_free_lif_info:
2498         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2499         lif->info = NULL;
2500         lif->info_pa = 0;
2501 err_out_free_netdev:
2502         free_netdev(lif->netdev);
2503         lif = NULL;
2504 err_out_free_lid:
2505         kfree(lid);
2506
2507         return err;
2508 }
2509
2510 static void ionic_lif_reset(struct ionic_lif *lif)
2511 {
2512         struct ionic_dev *idev = &lif->ionic->idev;
2513
2514         mutex_lock(&lif->ionic->dev_cmd_lock);
2515         ionic_dev_cmd_lif_reset(idev, lif->index);
2516         ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2517         mutex_unlock(&lif->ionic->dev_cmd_lock);
2518 }
2519
2520 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2521 {
2522         struct ionic *ionic = lif->ionic;
2523
2524         if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2525                 return;
2526
2527         dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2528
2529         netif_device_detach(lif->netdev);
2530
2531         if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2532                 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2533                 mutex_lock(&lif->queue_lock);
2534                 ionic_stop_queues(lif);
2535                 mutex_unlock(&lif->queue_lock);
2536         }
2537
2538         if (netif_running(lif->netdev)) {
2539                 ionic_txrx_deinit(lif);
2540                 ionic_txrx_free(lif);
2541         }
2542         ionic_lif_deinit(lif);
2543         ionic_reset(ionic);
2544         ionic_qcqs_free(lif);
2545
2546         dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2547 }
2548
2549 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2550 {
2551         struct ionic *ionic = lif->ionic;
2552         int err;
2553
2554         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2555                 return;
2556
2557         dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2558
2559         ionic_init_devinfo(ionic);
2560         err = ionic_identify(ionic);
2561         if (err)
2562                 goto err_out;
2563         err = ionic_port_identify(ionic);
2564         if (err)
2565                 goto err_out;
2566         err = ionic_port_init(ionic);
2567         if (err)
2568                 goto err_out;
2569         err = ionic_qcqs_alloc(lif);
2570         if (err)
2571                 goto err_out;
2572
2573         err = ionic_lif_init(lif);
2574         if (err)
2575                 goto err_qcqs_free;
2576
2577         if (lif->registered)
2578                 ionic_lif_set_netdev_info(lif);
2579
2580         ionic_rx_filter_replay(lif);
2581
2582         if (netif_running(lif->netdev)) {
2583                 err = ionic_txrx_alloc(lif);
2584                 if (err)
2585                         goto err_lifs_deinit;
2586
2587                 err = ionic_txrx_init(lif);
2588                 if (err)
2589                         goto err_txrx_free;
2590         }
2591
2592         clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2593         ionic_link_status_check_request(lif, true);
2594         netif_device_attach(lif->netdev);
2595         dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2596
2597         return;
2598
2599 err_txrx_free:
2600         ionic_txrx_free(lif);
2601 err_lifs_deinit:
2602         ionic_lif_deinit(lif);
2603 err_qcqs_free:
2604         ionic_qcqs_free(lif);
2605 err_out:
2606         dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2607 }
2608
2609 void ionic_lif_free(struct ionic_lif *lif)
2610 {
2611         struct device *dev = lif->ionic->dev;
2612
2613         /* free rss indirection table */
2614         dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2615                           lif->rss_ind_tbl_pa);
2616         lif->rss_ind_tbl = NULL;
2617         lif->rss_ind_tbl_pa = 0;
2618
2619         /* free queues */
2620         ionic_qcqs_free(lif);
2621         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2622                 ionic_lif_reset(lif);
2623
2624         /* free lif info */
2625         kfree(lif->identity);
2626         dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2627         lif->info = NULL;
2628         lif->info_pa = 0;
2629
2630         /* unmap doorbell page */
2631         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2632         lif->kern_dbpage = NULL;
2633         kfree(lif->dbid_inuse);
2634         lif->dbid_inuse = NULL;
2635
2636         /* free netdev & lif */
2637         ionic_debugfs_del_lif(lif);
2638         free_netdev(lif->netdev);
2639 }
2640
2641 void ionic_lif_deinit(struct ionic_lif *lif)
2642 {
2643         if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
2644                 return;
2645
2646         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2647                 cancel_work_sync(&lif->deferred.work);
2648                 cancel_work_sync(&lif->tx_timeout_work);
2649                 ionic_rx_filters_deinit(lif);
2650                 if (lif->netdev->features & NETIF_F_RXHASH)
2651                         ionic_lif_rss_deinit(lif);
2652         }
2653
2654         napi_disable(&lif->adminqcq->napi);
2655         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2656         ionic_lif_qcq_deinit(lif, lif->adminqcq);
2657
2658         mutex_destroy(&lif->queue_lock);
2659         ionic_lif_reset(lif);
2660 }
2661
2662 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2663 {
2664         struct device *dev = lif->ionic->dev;
2665         struct ionic_q_init_comp comp;
2666         struct ionic_dev *idev;
2667         struct ionic_qcq *qcq;
2668         struct ionic_queue *q;
2669         int err;
2670
2671         idev = &lif->ionic->idev;
2672         qcq = lif->adminqcq;
2673         q = &qcq->q;
2674
2675         mutex_lock(&lif->ionic->dev_cmd_lock);
2676         ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2677         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2678         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2679         mutex_unlock(&lif->ionic->dev_cmd_lock);
2680         if (err) {
2681                 netdev_err(lif->netdev, "adminq init failed %d\n", err);
2682                 return err;
2683         }
2684
2685         q->hw_type = comp.hw_type;
2686         q->hw_index = le32_to_cpu(comp.hw_index);
2687         q->dbval = IONIC_DBELL_QID(q->hw_index);
2688
2689         dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2690         dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2691
2692         netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2693                        NAPI_POLL_WEIGHT);
2694
2695         napi_enable(&qcq->napi);
2696
2697         if (qcq->flags & IONIC_QCQ_F_INTR)
2698                 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2699                                 IONIC_INTR_MASK_CLEAR);
2700
2701         qcq->flags |= IONIC_QCQ_F_INITED;
2702
2703         return 0;
2704 }
2705
2706 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2707 {
2708         struct ionic_qcq *qcq = lif->notifyqcq;
2709         struct device *dev = lif->ionic->dev;
2710         struct ionic_queue *q = &qcq->q;
2711         int err;
2712
2713         struct ionic_admin_ctx ctx = {
2714                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2715                 .cmd.q_init = {
2716                         .opcode = IONIC_CMD_Q_INIT,
2717                         .lif_index = cpu_to_le16(lif->index),
2718                         .type = q->type,
2719                         .ver = lif->qtype_info[q->type].version,
2720                         .index = cpu_to_le32(q->index),
2721                         .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2722                                              IONIC_QINIT_F_ENA),
2723                         .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2724                         .pid = cpu_to_le16(q->pid),
2725                         .ring_size = ilog2(q->num_descs),
2726                         .ring_base = cpu_to_le64(q->base_pa),
2727                 }
2728         };
2729
2730         dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2731         dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2732         dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2733         dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2734
2735         err = ionic_adminq_post_wait(lif, &ctx);
2736         if (err)
2737                 return err;
2738
2739         lif->last_eid = 0;
2740         q->hw_type = ctx.comp.q_init.hw_type;
2741         q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2742         q->dbval = IONIC_DBELL_QID(q->hw_index);
2743
2744         dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2745         dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2746
2747         /* preset the callback info */
2748         q->info[0].cb_arg = lif;
2749
2750         qcq->flags |= IONIC_QCQ_F_INITED;
2751
2752         return 0;
2753 }
2754
2755 static int ionic_station_set(struct ionic_lif *lif)
2756 {
2757         struct net_device *netdev = lif->netdev;
2758         struct ionic_admin_ctx ctx = {
2759                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2760                 .cmd.lif_getattr = {
2761                         .opcode = IONIC_CMD_LIF_GETATTR,
2762                         .index = cpu_to_le16(lif->index),
2763                         .attr = IONIC_LIF_ATTR_MAC,
2764                 },
2765         };
2766         struct sockaddr addr;
2767         int err;
2768
2769         err = ionic_adminq_post_wait(lif, &ctx);
2770         if (err)
2771                 return err;
2772         netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2773                    ctx.comp.lif_getattr.mac);
2774         if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2775                 return 0;
2776
2777         if (!is_zero_ether_addr(netdev->dev_addr)) {
2778                 /* If the netdev mac is non-zero and doesn't match the default
2779                  * device address, it was set by something earlier and we're
2780                  * likely here again after a fw-upgrade reset.  We need to be
2781                  * sure the netdev mac is in our filter list.
2782                  */
2783                 if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2784                                       netdev->dev_addr))
2785                         ionic_lif_addr(lif, netdev->dev_addr, true, true);
2786         } else {
2787                 /* Update the netdev mac with the device's mac */
2788                 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2789                 addr.sa_family = AF_INET;
2790                 err = eth_prepare_mac_addr_change(netdev, &addr);
2791                 if (err) {
2792                         netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2793                                     addr.sa_data, err);
2794                         return 0;
2795                 }
2796
2797                 eth_commit_mac_addr_change(netdev, &addr);
2798         }
2799
2800         netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2801                    netdev->dev_addr);
2802         ionic_lif_addr(lif, netdev->dev_addr, true, true);
2803
2804         return 0;
2805 }
2806
2807 int ionic_lif_init(struct ionic_lif *lif)
2808 {
2809         struct ionic_dev *idev = &lif->ionic->idev;
2810         struct device *dev = lif->ionic->dev;
2811         struct ionic_lif_init_comp comp;
2812         int dbpage_num;
2813         int err;
2814
2815         mutex_lock(&lif->ionic->dev_cmd_lock);
2816         ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2817         err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2818         ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2819         mutex_unlock(&lif->ionic->dev_cmd_lock);
2820         if (err)
2821                 return err;
2822
2823         lif->hw_index = le16_to_cpu(comp.hw_index);
2824         mutex_init(&lif->queue_lock);
2825
2826         /* now that we have the hw_index we can figure out our doorbell page */
2827         lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2828         if (!lif->dbid_count) {
2829                 dev_err(dev, "No doorbell pages, aborting\n");
2830                 return -EINVAL;
2831         }
2832
2833         lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2834         if (!lif->dbid_inuse) {
2835                 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2836                 return -ENOMEM;
2837         }
2838
2839         /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2840         set_bit(0, lif->dbid_inuse);
2841         lif->kern_pid = 0;
2842
2843         dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2844         lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2845         if (!lif->kern_dbpage) {
2846                 dev_err(dev, "Cannot map dbpage, aborting\n");
2847                 err = -ENOMEM;
2848                 goto err_out_free_dbid;
2849         }
2850
2851         err = ionic_lif_adminq_init(lif);
2852         if (err)
2853                 goto err_out_adminq_deinit;
2854
2855         if (lif->ionic->nnqs_per_lif) {
2856                 err = ionic_lif_notifyq_init(lif);
2857                 if (err)
2858                         goto err_out_notifyq_deinit;
2859         }
2860
2861         err = ionic_init_nic_features(lif);
2862         if (err)
2863                 goto err_out_notifyq_deinit;
2864
2865         if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2866                 err = ionic_rx_filters_init(lif);
2867                 if (err)
2868                         goto err_out_notifyq_deinit;
2869         }
2870
2871         err = ionic_station_set(lif);
2872         if (err)
2873                 goto err_out_notifyq_deinit;
2874
2875         lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2876
2877         set_bit(IONIC_LIF_F_INITED, lif->state);
2878
2879         INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2880
2881         return 0;
2882
2883 err_out_notifyq_deinit:
2884         ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2885 err_out_adminq_deinit:
2886         ionic_lif_qcq_deinit(lif, lif->adminqcq);
2887         ionic_lif_reset(lif);
2888         ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2889         lif->kern_dbpage = NULL;
2890 err_out_free_dbid:
2891         kfree(lif->dbid_inuse);
2892         lif->dbid_inuse = NULL;
2893
2894         return err;
2895 }
2896
2897 static void ionic_lif_notify_work(struct work_struct *ws)
2898 {
2899 }
2900
2901 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2902 {
2903         struct ionic_admin_ctx ctx = {
2904                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2905                 .cmd.lif_setattr = {
2906                         .opcode = IONIC_CMD_LIF_SETATTR,
2907                         .index = cpu_to_le16(lif->index),
2908                         .attr = IONIC_LIF_ATTR_NAME,
2909                 },
2910         };
2911
2912         strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2913                 sizeof(ctx.cmd.lif_setattr.name));
2914
2915         ionic_adminq_post_wait(lif, &ctx);
2916 }
2917
2918 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2919 {
2920         if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2921                 return NULL;
2922
2923         return netdev_priv(netdev);
2924 }
2925
2926 static int ionic_lif_notify(struct notifier_block *nb,
2927                             unsigned long event, void *info)
2928 {
2929         struct net_device *ndev = netdev_notifier_info_to_dev(info);
2930         struct ionic *ionic = container_of(nb, struct ionic, nb);
2931         struct ionic_lif *lif = ionic_netdev_lif(ndev);
2932
2933         if (!lif || lif->ionic != ionic)
2934                 return NOTIFY_DONE;
2935
2936         switch (event) {
2937         case NETDEV_CHANGENAME:
2938                 ionic_lif_set_netdev_info(lif);
2939                 break;
2940         }
2941
2942         return NOTIFY_DONE;
2943 }
2944
2945 int ionic_lif_register(struct ionic_lif *lif)
2946 {
2947         int err;
2948
2949         INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
2950
2951         lif->ionic->nb.notifier_call = ionic_lif_notify;
2952
2953         err = register_netdevice_notifier(&lif->ionic->nb);
2954         if (err)
2955                 lif->ionic->nb.notifier_call = NULL;
2956
2957         /* only register LIF0 for now */
2958         err = register_netdev(lif->netdev);
2959         if (err) {
2960                 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
2961                 return err;
2962         }
2963         lif->registered = true;
2964         ionic_lif_set_netdev_info(lif);
2965
2966         return 0;
2967 }
2968
2969 void ionic_lif_unregister(struct ionic_lif *lif)
2970 {
2971         if (lif->ionic->nb.notifier_call) {
2972                 unregister_netdevice_notifier(&lif->ionic->nb);
2973                 cancel_work_sync(&lif->ionic->nb_work);
2974                 lif->ionic->nb.notifier_call = NULL;
2975         }
2976
2977         if (lif->netdev->reg_state == NETREG_REGISTERED)
2978                 unregister_netdev(lif->netdev);
2979         lif->registered = false;
2980 }
2981
2982 static void ionic_lif_queue_identify(struct ionic_lif *lif)
2983 {
2984         struct ionic *ionic = lif->ionic;
2985         union ionic_q_identity *q_ident;
2986         struct ionic_dev *idev;
2987         int qtype;
2988         int err;
2989
2990         idev = &lif->ionic->idev;
2991         q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
2992
2993         for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
2994                 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
2995
2996                 /* filter out the ones we know about */
2997                 switch (qtype) {
2998                 case IONIC_QTYPE_ADMINQ:
2999                 case IONIC_QTYPE_NOTIFYQ:
3000                 case IONIC_QTYPE_RXQ:
3001                 case IONIC_QTYPE_TXQ:
3002                         break;
3003                 default:
3004                         continue;
3005                 }
3006
3007                 memset(qti, 0, sizeof(*qti));
3008
3009                 mutex_lock(&ionic->dev_cmd_lock);
3010                 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3011                                              ionic_qtype_versions[qtype]);
3012                 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3013                 if (!err) {
3014                         qti->version   = q_ident->version;
3015                         qti->supported = q_ident->supported;
3016                         qti->features  = le64_to_cpu(q_ident->features);
3017                         qti->desc_sz   = le16_to_cpu(q_ident->desc_sz);
3018                         qti->comp_sz   = le16_to_cpu(q_ident->comp_sz);
3019                         qti->sg_desc_sz   = le16_to_cpu(q_ident->sg_desc_sz);
3020                         qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
3021                         qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
3022                 }
3023                 mutex_unlock(&ionic->dev_cmd_lock);
3024
3025                 if (err == -EINVAL) {
3026                         dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3027                         continue;
3028                 } else if (err == -EIO) {
3029                         dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3030                         return;
3031                 } else if (err) {
3032                         dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3033                                 qtype, err);
3034                         return;
3035                 }
3036
3037                 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3038                         qtype, qti->version);
3039                 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3040                         qtype, qti->supported);
3041                 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3042                         qtype, qti->features);
3043                 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3044                         qtype, qti->desc_sz);
3045                 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3046                         qtype, qti->comp_sz);
3047                 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3048                         qtype, qti->sg_desc_sz);
3049                 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3050                         qtype, qti->max_sg_elems);
3051                 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3052                         qtype, qti->sg_desc_stride);
3053         }
3054 }
3055
3056 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3057                        union ionic_lif_identity *lid)
3058 {
3059         struct ionic_dev *idev = &ionic->idev;
3060         size_t sz;
3061         int err;
3062
3063         sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3064
3065         mutex_lock(&ionic->dev_cmd_lock);
3066         ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3067         err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3068         memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3069         mutex_unlock(&ionic->dev_cmd_lock);
3070         if (err)
3071                 return (err);
3072
3073         dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3074                 le64_to_cpu(lid->capabilities));
3075
3076         dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3077                 le32_to_cpu(lid->eth.max_ucast_filters));
3078         dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3079                 le32_to_cpu(lid->eth.max_mcast_filters));
3080         dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3081                 le64_to_cpu(lid->eth.config.features));
3082         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3083                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3084         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3085                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3086         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3087                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3088         dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3089                 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3090         dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3091         dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3092         dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3093                 le32_to_cpu(lid->eth.config.mtu));
3094
3095         return 0;
3096 }
3097
3098 int ionic_lif_size(struct ionic *ionic)
3099 {
3100         struct ionic_identity *ident = &ionic->ident;
3101         unsigned int nintrs, dev_nintrs;
3102         union ionic_lif_config *lc;
3103         unsigned int ntxqs_per_lif;
3104         unsigned int nrxqs_per_lif;
3105         unsigned int neqs_per_lif;
3106         unsigned int nnqs_per_lif;
3107         unsigned int nxqs, neqs;
3108         unsigned int min_intrs;
3109         int err;
3110
3111         lc = &ident->lif.eth.config;
3112         dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3113         neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3114         nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3115         ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3116         nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3117
3118         nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3119         nxqs = min(nxqs, num_online_cpus());
3120         neqs = min(neqs_per_lif, num_online_cpus());
3121
3122 try_again:
3123         /* interrupt usage:
3124          *    1 for master lif adminq/notifyq
3125          *    1 for each CPU for master lif TxRx queue pairs
3126          *    whatever's left is for RDMA queues
3127          */
3128         nintrs = 1 + nxqs + neqs;
3129         min_intrs = 2;  /* adminq + 1 TxRx queue pair */
3130
3131         if (nintrs > dev_nintrs)
3132                 goto try_fewer;
3133
3134         err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3135         if (err < 0 && err != -ENOSPC) {
3136                 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3137                 return err;
3138         }
3139         if (err == -ENOSPC)
3140                 goto try_fewer;
3141
3142         if (err != nintrs) {
3143                 ionic_bus_free_irq_vectors(ionic);
3144                 goto try_fewer;
3145         }
3146
3147         ionic->nnqs_per_lif = nnqs_per_lif;
3148         ionic->neqs_per_lif = neqs;
3149         ionic->ntxqs_per_lif = nxqs;
3150         ionic->nrxqs_per_lif = nxqs;
3151         ionic->nintrs = nintrs;
3152
3153         ionic_debugfs_add_sizes(ionic);
3154
3155         return 0;
3156
3157 try_fewer:
3158         if (nnqs_per_lif > 1) {
3159                 nnqs_per_lif >>= 1;
3160                 goto try_again;
3161         }
3162         if (neqs > 1) {
3163                 neqs >>= 1;
3164                 goto try_again;
3165         }
3166         if (nxqs > 1) {
3167                 nxqs >>= 1;
3168                 goto try_again;
3169         }
3170         dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3171         return -ENOSPC;
3172 }