net: hns3: fix VLAN filter restore issue after reset
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #ifdef CONFIG_RFS_ACCEL
8 #include <linux/cpu_rmap.h>
9 #endif
10 #include <linux/if_vlan.h>
11 #include <linux/ip.h>
12 #include <linux/ipv6.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/aer.h>
16 #include <linux/skbuff.h>
17 #include <linux/sctp.h>
18 #include <linux/vermagic.h>
19 #include <net/gre.h>
20 #include <net/pkt_cls.h>
21 #include <net/tcp.h>
22 #include <net/vxlan.h>
23
24 #include "hnae3.h"
25 #include "hns3_enet.h"
26
27 #define hns3_set_field(origin, shift, val)      ((origin) |= ((val) << (shift)))
28 #define hns3_tx_bd_count(S)     DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
29
30 static void hns3_clear_all_ring(struct hnae3_handle *h);
31 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
32 static void hns3_remove_hw_addr(struct net_device *netdev);
33
34 static const char hns3_driver_name[] = "hns3";
35 const char hns3_driver_version[] = VERMAGIC_STRING;
36 static const char hns3_driver_string[] =
37                         "Hisilicon Ethernet Network Driver for Hip08 Family";
38 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
39 static struct hnae3_client client;
40
41 static int debug = -1;
42 module_param(debug, int, 0);
43 MODULE_PARM_DESC(debug, " Network interface message level setting");
44
45 #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
46                            NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
47
48 /* hns3_pci_tbl - PCI Device ID Table
49  *
50  * Last entry must be all 0s
51  *
52  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
53  *   Class, Class Mask, private data (not used) }
54  */
55 static const struct pci_device_id hns3_pci_tbl[] = {
56         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
57         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
58         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
59          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
60         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
61          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
62         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
63          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
64         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
65          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
66         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
67          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
68         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
69         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
70          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
71         /* required last entry */
72         {0, }
73 };
74 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
75
76 static irqreturn_t hns3_irq_handle(int irq, void *vector)
77 {
78         struct hns3_enet_tqp_vector *tqp_vector = vector;
79
80         napi_schedule_irqoff(&tqp_vector->napi);
81
82         return IRQ_HANDLED;
83 }
84
85 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
86 {
87         struct hns3_enet_tqp_vector *tqp_vectors;
88         unsigned int i;
89
90         for (i = 0; i < priv->vector_num; i++) {
91                 tqp_vectors = &priv->tqp_vector[i];
92
93                 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
94                         continue;
95
96                 /* clear the affinity mask */
97                 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
98
99                 /* release the irq resource */
100                 free_irq(tqp_vectors->vector_irq, tqp_vectors);
101                 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
102         }
103 }
104
105 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
106 {
107         struct hns3_enet_tqp_vector *tqp_vectors;
108         int txrx_int_idx = 0;
109         int rx_int_idx = 0;
110         int tx_int_idx = 0;
111         unsigned int i;
112         int ret;
113
114         for (i = 0; i < priv->vector_num; i++) {
115                 tqp_vectors = &priv->tqp_vector[i];
116
117                 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
118                         continue;
119
120                 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
121                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
122                                  "%s-%s-%d", priv->netdev->name, "TxRx",
123                                  txrx_int_idx++);
124                         txrx_int_idx++;
125                 } else if (tqp_vectors->rx_group.ring) {
126                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
127                                  "%s-%s-%d", priv->netdev->name, "Rx",
128                                  rx_int_idx++);
129                 } else if (tqp_vectors->tx_group.ring) {
130                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
131                                  "%s-%s-%d", priv->netdev->name, "Tx",
132                                  tx_int_idx++);
133                 } else {
134                         /* Skip this unused q_vector */
135                         continue;
136                 }
137
138                 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
139
140                 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
141                                   tqp_vectors->name,
142                                        tqp_vectors);
143                 if (ret) {
144                         netdev_err(priv->netdev, "request irq(%d) fail\n",
145                                    tqp_vectors->vector_irq);
146                         return ret;
147                 }
148
149                 irq_set_affinity_hint(tqp_vectors->vector_irq,
150                                       &tqp_vectors->affinity_mask);
151
152                 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
153         }
154
155         return 0;
156 }
157
158 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
159                                  u32 mask_en)
160 {
161         writel(mask_en, tqp_vector->mask_addr);
162 }
163
164 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
165 {
166         napi_enable(&tqp_vector->napi);
167
168         /* enable vector */
169         hns3_mask_vector_irq(tqp_vector, 1);
170 }
171
172 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
173 {
174         /* disable vector */
175         hns3_mask_vector_irq(tqp_vector, 0);
176
177         disable_irq(tqp_vector->vector_irq);
178         napi_disable(&tqp_vector->napi);
179 }
180
181 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
182                                  u32 rl_value)
183 {
184         u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
185
186         /* this defines the configuration for RL (Interrupt Rate Limiter).
187          * Rl defines rate of interrupts i.e. number of interrupts-per-second
188          * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
189          */
190
191         if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
192             !tqp_vector->rx_group.coal.gl_adapt_enable)
193                 /* According to the hardware, the range of rl_reg is
194                  * 0-59 and the unit is 4.
195                  */
196                 rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
197
198         writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
199 }
200
201 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
202                                     u32 gl_value)
203 {
204         u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
205
206         writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
207 }
208
209 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
210                                     u32 gl_value)
211 {
212         u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
213
214         writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
215 }
216
217 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
218                                    struct hns3_nic_priv *priv)
219 {
220         /* initialize the configuration for interrupt coalescing.
221          * 1. GL (Interrupt Gap Limiter)
222          * 2. RL (Interrupt Rate Limiter)
223          */
224
225         /* Default: enable interrupt coalescing self-adaptive and GL */
226         tqp_vector->tx_group.coal.gl_adapt_enable = 1;
227         tqp_vector->rx_group.coal.gl_adapt_enable = 1;
228
229         tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
230         tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
231
232         tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
233         tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
234 }
235
236 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
237                                       struct hns3_nic_priv *priv)
238 {
239         struct hnae3_handle *h = priv->ae_handle;
240
241         hns3_set_vector_coalesce_tx_gl(tqp_vector,
242                                        tqp_vector->tx_group.coal.int_gl);
243         hns3_set_vector_coalesce_rx_gl(tqp_vector,
244                                        tqp_vector->rx_group.coal.int_gl);
245         hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
246 }
247
248 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
249 {
250         struct hnae3_handle *h = hns3_get_handle(netdev);
251         struct hnae3_knic_private_info *kinfo = &h->kinfo;
252         unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
253         int i, ret;
254
255         if (kinfo->num_tc <= 1) {
256                 netdev_reset_tc(netdev);
257         } else {
258                 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
259                 if (ret) {
260                         netdev_err(netdev,
261                                    "netdev_set_num_tc fail, ret=%d!\n", ret);
262                         return ret;
263                 }
264
265                 for (i = 0; i < HNAE3_MAX_TC; i++) {
266                         if (!kinfo->tc_info[i].enable)
267                                 continue;
268
269                         netdev_set_tc_queue(netdev,
270                                             kinfo->tc_info[i].tc,
271                                             kinfo->tc_info[i].tqp_count,
272                                             kinfo->tc_info[i].tqp_offset);
273                 }
274         }
275
276         ret = netif_set_real_num_tx_queues(netdev, queue_size);
277         if (ret) {
278                 netdev_err(netdev,
279                            "netif_set_real_num_tx_queues fail, ret=%d!\n",
280                            ret);
281                 return ret;
282         }
283
284         ret = netif_set_real_num_rx_queues(netdev, queue_size);
285         if (ret) {
286                 netdev_err(netdev,
287                            "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
288                 return ret;
289         }
290
291         return 0;
292 }
293
294 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
295 {
296         u16 alloc_tqps, max_rss_size, rss_size;
297
298         h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
299         rss_size = alloc_tqps / h->kinfo.num_tc;
300
301         return min_t(u16, rss_size, max_rss_size);
302 }
303
304 static void hns3_tqp_enable(struct hnae3_queue *tqp)
305 {
306         u32 rcb_reg;
307
308         rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
309         rcb_reg |= BIT(HNS3_RING_EN_B);
310         hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
311 }
312
313 static void hns3_tqp_disable(struct hnae3_queue *tqp)
314 {
315         u32 rcb_reg;
316
317         rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
318         rcb_reg &= ~BIT(HNS3_RING_EN_B);
319         hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
320 }
321
322 static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
323 {
324 #ifdef CONFIG_RFS_ACCEL
325         free_irq_cpu_rmap(netdev->rx_cpu_rmap);
326         netdev->rx_cpu_rmap = NULL;
327 #endif
328 }
329
330 static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
331 {
332 #ifdef CONFIG_RFS_ACCEL
333         struct hns3_nic_priv *priv = netdev_priv(netdev);
334         struct hns3_enet_tqp_vector *tqp_vector;
335         int i, ret;
336
337         if (!netdev->rx_cpu_rmap) {
338                 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
339                 if (!netdev->rx_cpu_rmap)
340                         return -ENOMEM;
341         }
342
343         for (i = 0; i < priv->vector_num; i++) {
344                 tqp_vector = &priv->tqp_vector[i];
345                 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
346                                        tqp_vector->vector_irq);
347                 if (ret) {
348                         hns3_free_rx_cpu_rmap(netdev);
349                         return ret;
350                 }
351         }
352 #endif
353         return 0;
354 }
355
356 static int hns3_nic_net_up(struct net_device *netdev)
357 {
358         struct hns3_nic_priv *priv = netdev_priv(netdev);
359         struct hnae3_handle *h = priv->ae_handle;
360         int i, j;
361         int ret;
362
363         ret = hns3_nic_reset_all_ring(h);
364         if (ret)
365                 return ret;
366
367         /* the device can work without cpu rmap, only aRFS needs it */
368         ret = hns3_set_rx_cpu_rmap(netdev);
369         if (ret)
370                 netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
371
372         /* get irq resource for all vectors */
373         ret = hns3_nic_init_irq(priv);
374         if (ret) {
375                 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
376                 goto free_rmap;
377         }
378
379         clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
380
381         /* enable the vectors */
382         for (i = 0; i < priv->vector_num; i++)
383                 hns3_vector_enable(&priv->tqp_vector[i]);
384
385         /* enable rcb */
386         for (j = 0; j < h->kinfo.num_tqps; j++)
387                 hns3_tqp_enable(h->kinfo.tqp[j]);
388
389         /* start the ae_dev */
390         ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
391         if (ret)
392                 goto out_start_err;
393
394         return 0;
395
396 out_start_err:
397         set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
398         while (j--)
399                 hns3_tqp_disable(h->kinfo.tqp[j]);
400
401         for (j = i - 1; j >= 0; j--)
402                 hns3_vector_disable(&priv->tqp_vector[j]);
403
404         hns3_nic_uninit_irq(priv);
405 free_rmap:
406         hns3_free_rx_cpu_rmap(netdev);
407         return ret;
408 }
409
410 static void hns3_config_xps(struct hns3_nic_priv *priv)
411 {
412         int i;
413
414         for (i = 0; i < priv->vector_num; i++) {
415                 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
416                 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
417
418                 while (ring) {
419                         int ret;
420
421                         ret = netif_set_xps_queue(priv->netdev,
422                                                   &tqp_vector->affinity_mask,
423                                                   ring->tqp->tqp_index);
424                         if (ret)
425                                 netdev_warn(priv->netdev,
426                                             "set xps queue failed: %d", ret);
427
428                         ring = ring->next;
429                 }
430         }
431 }
432
433 static int hns3_nic_net_open(struct net_device *netdev)
434 {
435         struct hns3_nic_priv *priv = netdev_priv(netdev);
436         struct hnae3_handle *h = hns3_get_handle(netdev);
437         struct hnae3_knic_private_info *kinfo;
438         int i, ret;
439
440         if (hns3_nic_resetting(netdev))
441                 return -EBUSY;
442
443         netif_carrier_off(netdev);
444
445         ret = hns3_nic_set_real_num_queue(netdev);
446         if (ret)
447                 return ret;
448
449         ret = hns3_nic_net_up(netdev);
450         if (ret) {
451                 netdev_err(netdev,
452                            "hns net up fail, ret=%d!\n", ret);
453                 return ret;
454         }
455
456         kinfo = &h->kinfo;
457         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
458                 netdev_set_prio_tc_map(netdev, i,
459                                        kinfo->prio_tc[i]);
460         }
461
462         if (h->ae_algo->ops->set_timer_task)
463                 h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
464
465         hns3_config_xps(priv);
466         return 0;
467 }
468
469 static void hns3_nic_net_down(struct net_device *netdev)
470 {
471         struct hns3_nic_priv *priv = netdev_priv(netdev);
472         struct hnae3_handle *h = hns3_get_handle(netdev);
473         const struct hnae3_ae_ops *ops;
474         int i;
475
476         /* disable vectors */
477         for (i = 0; i < priv->vector_num; i++)
478                 hns3_vector_disable(&priv->tqp_vector[i]);
479
480         /* disable rcb */
481         for (i = 0; i < h->kinfo.num_tqps; i++)
482                 hns3_tqp_disable(h->kinfo.tqp[i]);
483
484         /* stop ae_dev */
485         ops = priv->ae_handle->ae_algo->ops;
486         if (ops->stop)
487                 ops->stop(priv->ae_handle);
488
489         hns3_free_rx_cpu_rmap(netdev);
490
491         /* free irq resources */
492         hns3_nic_uninit_irq(priv);
493
494         hns3_clear_all_ring(priv->ae_handle);
495 }
496
497 static int hns3_nic_net_stop(struct net_device *netdev)
498 {
499         struct hns3_nic_priv *priv = netdev_priv(netdev);
500         struct hnae3_handle *h = hns3_get_handle(netdev);
501
502         if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
503                 return 0;
504
505         if (h->ae_algo->ops->set_timer_task)
506                 h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
507
508         netif_tx_stop_all_queues(netdev);
509         netif_carrier_off(netdev);
510
511         hns3_nic_net_down(netdev);
512
513         return 0;
514 }
515
516 static int hns3_nic_uc_sync(struct net_device *netdev,
517                             const unsigned char *addr)
518 {
519         struct hnae3_handle *h = hns3_get_handle(netdev);
520
521         if (h->ae_algo->ops->add_uc_addr)
522                 return h->ae_algo->ops->add_uc_addr(h, addr);
523
524         return 0;
525 }
526
527 static int hns3_nic_uc_unsync(struct net_device *netdev,
528                               const unsigned char *addr)
529 {
530         struct hnae3_handle *h = hns3_get_handle(netdev);
531
532         if (h->ae_algo->ops->rm_uc_addr)
533                 return h->ae_algo->ops->rm_uc_addr(h, addr);
534
535         return 0;
536 }
537
538 static int hns3_nic_mc_sync(struct net_device *netdev,
539                             const unsigned char *addr)
540 {
541         struct hnae3_handle *h = hns3_get_handle(netdev);
542
543         if (h->ae_algo->ops->add_mc_addr)
544                 return h->ae_algo->ops->add_mc_addr(h, addr);
545
546         return 0;
547 }
548
549 static int hns3_nic_mc_unsync(struct net_device *netdev,
550                               const unsigned char *addr)
551 {
552         struct hnae3_handle *h = hns3_get_handle(netdev);
553
554         if (h->ae_algo->ops->rm_mc_addr)
555                 return h->ae_algo->ops->rm_mc_addr(h, addr);
556
557         return 0;
558 }
559
560 static u8 hns3_get_netdev_flags(struct net_device *netdev)
561 {
562         u8 flags = 0;
563
564         if (netdev->flags & IFF_PROMISC) {
565                 flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
566         } else {
567                 flags |= HNAE3_VLAN_FLTR;
568                 if (netdev->flags & IFF_ALLMULTI)
569                         flags |= HNAE3_USER_MPE;
570         }
571
572         return flags;
573 }
574
575 static void hns3_nic_set_rx_mode(struct net_device *netdev)
576 {
577         struct hnae3_handle *h = hns3_get_handle(netdev);
578         u8 new_flags;
579         int ret;
580
581         new_flags = hns3_get_netdev_flags(netdev);
582
583         ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
584         if (ret) {
585                 netdev_err(netdev, "sync uc address fail\n");
586                 if (ret == -ENOSPC)
587                         new_flags |= HNAE3_OVERFLOW_UPE;
588         }
589
590         if (netdev->flags & IFF_MULTICAST) {
591                 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
592                                     hns3_nic_mc_unsync);
593                 if (ret) {
594                         netdev_err(netdev, "sync mc address fail\n");
595                         if (ret == -ENOSPC)
596                                 new_flags |= HNAE3_OVERFLOW_MPE;
597                 }
598         }
599
600         /* User mode Promisc mode enable and vlan filtering is disabled to
601          * let all packets in. MAC-VLAN Table overflow Promisc enabled and
602          * vlan fitering is enabled
603          */
604         hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
605         h->netdev_flags = new_flags;
606         hns3_update_promisc_mode(netdev, new_flags);
607 }
608
609 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
610 {
611         struct hns3_nic_priv *priv = netdev_priv(netdev);
612         struct hnae3_handle *h = priv->ae_handle;
613
614         if (h->ae_algo->ops->set_promisc_mode) {
615                 return h->ae_algo->ops->set_promisc_mode(h,
616                                                 promisc_flags & HNAE3_UPE,
617                                                 promisc_flags & HNAE3_MPE);
618         }
619
620         return 0;
621 }
622
623 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
624 {
625         struct hns3_nic_priv *priv = netdev_priv(netdev);
626         struct hnae3_handle *h = priv->ae_handle;
627         bool last_state;
628
629         if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
630                 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
631                 if (enable != last_state) {
632                         netdev_info(netdev,
633                                     "%s vlan filter\n",
634                                     enable ? "enable" : "disable");
635                         h->ae_algo->ops->enable_vlan_filter(h, enable);
636                 }
637         }
638 }
639
640 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
641                         u16 *mss, u32 *type_cs_vlan_tso)
642 {
643         u32 l4_offset, hdr_len;
644         union l3_hdr_info l3;
645         union l4_hdr_info l4;
646         u32 l4_paylen;
647         int ret;
648
649         if (!skb_is_gso(skb))
650                 return 0;
651
652         ret = skb_cow_head(skb, 0);
653         if (unlikely(ret))
654                 return ret;
655
656         l3.hdr = skb_network_header(skb);
657         l4.hdr = skb_transport_header(skb);
658
659         /* Software should clear the IPv4's checksum field when tso is
660          * needed.
661          */
662         if (l3.v4->version == 4)
663                 l3.v4->check = 0;
664
665         /* tunnel packet.*/
666         if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
667                                          SKB_GSO_GRE_CSUM |
668                                          SKB_GSO_UDP_TUNNEL |
669                                          SKB_GSO_UDP_TUNNEL_CSUM)) {
670                 if ((!(skb_shinfo(skb)->gso_type &
671                     SKB_GSO_PARTIAL)) &&
672                     (skb_shinfo(skb)->gso_type &
673                     SKB_GSO_UDP_TUNNEL_CSUM)) {
674                         /* Software should clear the udp's checksum
675                          * field when tso is needed.
676                          */
677                         l4.udp->check = 0;
678                 }
679                 /* reset l3&l4 pointers from outer to inner headers */
680                 l3.hdr = skb_inner_network_header(skb);
681                 l4.hdr = skb_inner_transport_header(skb);
682
683                 /* Software should clear the IPv4's checksum field when
684                  * tso is needed.
685                  */
686                 if (l3.v4->version == 4)
687                         l3.v4->check = 0;
688         }
689
690         /* normal or tunnel packet*/
691         l4_offset = l4.hdr - skb->data;
692         hdr_len = (l4.tcp->doff << 2) + l4_offset;
693
694         /* remove payload length from inner pseudo checksum when tso*/
695         l4_paylen = skb->len - l4_offset;
696         csum_replace_by_diff(&l4.tcp->check,
697                              (__force __wsum)htonl(l4_paylen));
698
699         /* find the txbd field values */
700         *paylen = skb->len - hdr_len;
701         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
702
703         /* get MSS for TSO */
704         *mss = skb_shinfo(skb)->gso_size;
705
706         return 0;
707 }
708
709 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
710                                 u8 *il4_proto)
711 {
712         union l3_hdr_info l3;
713         unsigned char *l4_hdr;
714         unsigned char *exthdr;
715         u8 l4_proto_tmp;
716         __be16 frag_off;
717
718         /* find outer header point */
719         l3.hdr = skb_network_header(skb);
720         l4_hdr = skb_transport_header(skb);
721
722         if (skb->protocol == htons(ETH_P_IPV6)) {
723                 exthdr = l3.hdr + sizeof(*l3.v6);
724                 l4_proto_tmp = l3.v6->nexthdr;
725                 if (l4_hdr != exthdr)
726                         ipv6_skip_exthdr(skb, exthdr - skb->data,
727                                          &l4_proto_tmp, &frag_off);
728         } else if (skb->protocol == htons(ETH_P_IP)) {
729                 l4_proto_tmp = l3.v4->protocol;
730         } else {
731                 return -EINVAL;
732         }
733
734         *ol4_proto = l4_proto_tmp;
735
736         /* tunnel packet */
737         if (!skb->encapsulation) {
738                 *il4_proto = 0;
739                 return 0;
740         }
741
742         /* find inner header point */
743         l3.hdr = skb_inner_network_header(skb);
744         l4_hdr = skb_inner_transport_header(skb);
745
746         if (l3.v6->version == 6) {
747                 exthdr = l3.hdr + sizeof(*l3.v6);
748                 l4_proto_tmp = l3.v6->nexthdr;
749                 if (l4_hdr != exthdr)
750                         ipv6_skip_exthdr(skb, exthdr - skb->data,
751                                          &l4_proto_tmp, &frag_off);
752         } else if (l3.v4->version == 4) {
753                 l4_proto_tmp = l3.v4->protocol;
754         }
755
756         *il4_proto = l4_proto_tmp;
757
758         return 0;
759 }
760
761 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
762  * and it is udp packet, which has a dest port as the IANA assigned.
763  * the hardware is expected to do the checksum offload, but the
764  * hardware will not do the checksum offload when udp dest port is
765  * 4789.
766  */
767 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
768 {
769         union l4_hdr_info l4;
770
771         l4.hdr = skb_transport_header(skb);
772
773         if (!(!skb->encapsulation &&
774               l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
775                 return false;
776
777         skb_checksum_help(skb);
778
779         return true;
780 }
781
782 static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
783                                   u32 *ol_type_vlan_len_msec)
784 {
785         u32 l2_len, l3_len, l4_len;
786         unsigned char *il2_hdr;
787         union l3_hdr_info l3;
788         union l4_hdr_info l4;
789
790         l3.hdr = skb_network_header(skb);
791         l4.hdr = skb_transport_header(skb);
792
793         /* compute OL2 header size, defined in 2 Bytes */
794         l2_len = l3.hdr - skb->data;
795         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
796
797         /* compute OL3 header size, defined in 4 Bytes */
798         l3_len = l4.hdr - l3.hdr;
799         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
800
801         il2_hdr = skb_inner_mac_header(skb);
802         /* compute OL4 header size, defined in 4 Bytes. */
803         l4_len = il2_hdr - l4.hdr;
804         hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
805
806         /* define outer network header type */
807         if (skb->protocol == htons(ETH_P_IP)) {
808                 if (skb_is_gso(skb))
809                         hns3_set_field(*ol_type_vlan_len_msec,
810                                        HNS3_TXD_OL3T_S,
811                                        HNS3_OL3T_IPV4_CSUM);
812                 else
813                         hns3_set_field(*ol_type_vlan_len_msec,
814                                        HNS3_TXD_OL3T_S,
815                                        HNS3_OL3T_IPV4_NO_CSUM);
816
817         } else if (skb->protocol == htons(ETH_P_IPV6)) {
818                 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
819                                HNS3_OL3T_IPV6);
820         }
821
822         if (ol4_proto == IPPROTO_UDP)
823                 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
824                                HNS3_TUN_MAC_IN_UDP);
825         else if (ol4_proto == IPPROTO_GRE)
826                 hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
827                                HNS3_TUN_NVGRE);
828 }
829
830 static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
831                            u8 il4_proto, u32 *type_cs_vlan_tso,
832                            u32 *ol_type_vlan_len_msec)
833 {
834         unsigned char *l2_hdr = skb->data;
835         u32 l4_proto = ol4_proto;
836         union l4_hdr_info l4;
837         union l3_hdr_info l3;
838         u32 l2_len, l3_len;
839
840         l4.hdr = skb_transport_header(skb);
841         l3.hdr = skb_network_header(skb);
842
843         /* handle encapsulation skb */
844         if (skb->encapsulation) {
845                 /* If this is a not UDP/GRE encapsulation skb */
846                 if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
847                         /* drop the skb tunnel packet if hardware don't support,
848                          * because hardware can't calculate csum when TSO.
849                          */
850                         if (skb_is_gso(skb))
851                                 return -EDOM;
852
853                         /* the stack computes the IP header already,
854                          * driver calculate l4 checksum when not TSO.
855                          */
856                         skb_checksum_help(skb);
857                         return 0;
858                 }
859
860                 hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
861
862                 /* switch to inner header */
863                 l2_hdr = skb_inner_mac_header(skb);
864                 l3.hdr = skb_inner_network_header(skb);
865                 l4.hdr = skb_inner_transport_header(skb);
866                 l4_proto = il4_proto;
867         }
868
869         if (l3.v4->version == 4) {
870                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
871                                HNS3_L3T_IPV4);
872
873                 /* the stack computes the IP header already, the only time we
874                  * need the hardware to recompute it is in the case of TSO.
875                  */
876                 if (skb_is_gso(skb))
877                         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
878         } else if (l3.v6->version == 6) {
879                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
880                                HNS3_L3T_IPV6);
881         }
882
883         /* compute inner(/normal) L2 header size, defined in 2 Bytes */
884         l2_len = l3.hdr - l2_hdr;
885         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
886
887         /* compute inner(/normal) L3 header size, defined in 4 Bytes */
888         l3_len = l4.hdr - l3.hdr;
889         hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
890
891         /* compute inner(/normal) L4 header size, defined in 4 Bytes */
892         switch (l4_proto) {
893         case IPPROTO_TCP:
894                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
895                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
896                                HNS3_L4T_TCP);
897                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
898                                l4.tcp->doff);
899                 break;
900         case IPPROTO_UDP:
901                 if (hns3_tunnel_csum_bug(skb))
902                         break;
903
904                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
905                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
906                                HNS3_L4T_UDP);
907                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
908                                (sizeof(struct udphdr) >> 2));
909                 break;
910         case IPPROTO_SCTP:
911                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
912                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
913                                HNS3_L4T_SCTP);
914                 hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
915                                (sizeof(struct sctphdr) >> 2));
916                 break;
917         default:
918                 /* drop the skb tunnel packet if hardware don't support,
919                  * because hardware can't calculate csum when TSO.
920                  */
921                 if (skb_is_gso(skb))
922                         return -EDOM;
923
924                 /* the stack computes the IP header already,
925                  * driver calculate l4 checksum when not TSO.
926                  */
927                 skb_checksum_help(skb);
928                 return 0;
929         }
930
931         return 0;
932 }
933
934 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
935 {
936         /* Config bd buffer end */
937         hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
938         hns3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
939 }
940
941 static int hns3_fill_desc_vtags(struct sk_buff *skb,
942                                 struct hns3_enet_ring *tx_ring,
943                                 u32 *inner_vlan_flag,
944                                 u32 *out_vlan_flag,
945                                 u16 *inner_vtag,
946                                 u16 *out_vtag)
947 {
948 #define HNS3_TX_VLAN_PRIO_SHIFT 13
949
950         struct hnae3_handle *handle = tx_ring->tqp->handle;
951
952         /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
953          * header is allowed in skb, otherwise it will cause RAS error.
954          */
955         if (unlikely(skb_vlan_tagged_multi(skb) &&
956                      handle->port_base_vlan_state ==
957                      HNAE3_PORT_BASE_VLAN_ENABLE))
958                 return -EINVAL;
959
960         if (skb->protocol == htons(ETH_P_8021Q) &&
961             !(tx_ring->tqp->handle->kinfo.netdev->features &
962             NETIF_F_HW_VLAN_CTAG_TX)) {
963                 /* When HW VLAN acceleration is turned off, and the stack
964                  * sets the protocol to 802.1q, the driver just need to
965                  * set the protocol to the encapsulated ethertype.
966                  */
967                 skb->protocol = vlan_get_protocol(skb);
968                 return 0;
969         }
970
971         if (skb_vlan_tag_present(skb)) {
972                 u16 vlan_tag;
973
974                 vlan_tag = skb_vlan_tag_get(skb);
975                 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
976
977                 /* Based on hw strategy, use out_vtag in two layer tag case,
978                  * and use inner_vtag in one tag case.
979                  */
980                 if (skb->protocol == htons(ETH_P_8021Q)) {
981                         if (handle->port_base_vlan_state ==
982                             HNAE3_PORT_BASE_VLAN_DISABLE){
983                                 hns3_set_field(*out_vlan_flag,
984                                                HNS3_TXD_OVLAN_B, 1);
985                                 *out_vtag = vlan_tag;
986                         } else {
987                                 hns3_set_field(*inner_vlan_flag,
988                                                HNS3_TXD_VLAN_B, 1);
989                                 *inner_vtag = vlan_tag;
990                         }
991                 } else {
992                         hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
993                         *inner_vtag = vlan_tag;
994                 }
995         } else if (skb->protocol == htons(ETH_P_8021Q)) {
996                 struct vlan_ethhdr *vhdr;
997                 int rc;
998
999                 rc = skb_cow_head(skb, 0);
1000                 if (unlikely(rc < 0))
1001                         return rc;
1002                 vhdr = (struct vlan_ethhdr *)skb->data;
1003                 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
1004                                         << HNS3_TX_VLAN_PRIO_SHIFT);
1005         }
1006
1007         skb->protocol = vlan_get_protocol(skb);
1008         return 0;
1009 }
1010
1011 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1012                           int size, int frag_end, enum hns_desc_type type)
1013 {
1014         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
1015         struct hns3_desc *desc = &ring->desc[ring->next_to_use];
1016         struct device *dev = ring_to_dev(ring);
1017         struct skb_frag_struct *frag;
1018         unsigned int frag_buf_num;
1019         int k, sizeoflast;
1020         dma_addr_t dma;
1021
1022         if (type == DESC_TYPE_SKB) {
1023                 struct sk_buff *skb = (struct sk_buff *)priv;
1024                 u32 ol_type_vlan_len_msec = 0;
1025                 u32 type_cs_vlan_tso = 0;
1026                 u32 paylen = skb->len;
1027                 u16 inner_vtag = 0;
1028                 u16 out_vtag = 0;
1029                 u16 mss = 0;
1030                 int ret;
1031
1032                 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1033                                            &ol_type_vlan_len_msec,
1034                                            &inner_vtag, &out_vtag);
1035                 if (unlikely(ret))
1036                         return ret;
1037
1038                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1039                         u8 ol4_proto, il4_proto;
1040
1041                         skb_reset_mac_len(skb);
1042
1043                         ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1044                         if (unlikely(ret))
1045                                 return ret;
1046
1047                         ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
1048                                               &type_cs_vlan_tso,
1049                                               &ol_type_vlan_len_msec);
1050                         if (unlikely(ret))
1051                                 return ret;
1052
1053                         ret = hns3_set_tso(skb, &paylen, &mss,
1054                                            &type_cs_vlan_tso);
1055                         if (unlikely(ret))
1056                                 return ret;
1057                 }
1058
1059                 /* Set txbd */
1060                 desc->tx.ol_type_vlan_len_msec =
1061                         cpu_to_le32(ol_type_vlan_len_msec);
1062                 desc->tx.type_cs_vlan_tso_len =
1063                         cpu_to_le32(type_cs_vlan_tso);
1064                 desc->tx.paylen = cpu_to_le32(paylen);
1065                 desc->tx.mss = cpu_to_le16(mss);
1066                 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1067                 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1068
1069                 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1070         } else {
1071                 frag = (struct skb_frag_struct *)priv;
1072                 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1073         }
1074
1075         if (unlikely(dma_mapping_error(dev, dma))) {
1076                 ring->stats.sw_err_cnt++;
1077                 return -ENOMEM;
1078         }
1079
1080         desc_cb->length = size;
1081
1082         if (likely(size <= HNS3_MAX_BD_SIZE)) {
1083                 u16 bdtp_fe_sc_vld_ra_ri = 0;
1084
1085                 desc_cb->priv = priv;
1086                 desc_cb->dma = dma;
1087                 desc_cb->type = type;
1088                 desc->addr = cpu_to_le64(dma);
1089                 desc->tx.send_size = cpu_to_le16(size);
1090                 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
1091                 desc->tx.bdtp_fe_sc_vld_ra_ri =
1092                         cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1093
1094                 ring_ptr_move_fw(ring, next_to_use);
1095                 return 0;
1096         }
1097
1098         frag_buf_num = hns3_tx_bd_count(size);
1099         sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1100         sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1101
1102         /* When frag size is bigger than hardware limit, split this frag */
1103         for (k = 0; k < frag_buf_num; k++) {
1104                 u16 bdtp_fe_sc_vld_ra_ri = 0;
1105
1106                 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1107                 desc_cb->priv = priv;
1108                 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1109                 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1110                                         DESC_TYPE_SKB : DESC_TYPE_PAGE;
1111
1112                 /* now, fill the descriptor */
1113                 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1114                 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1115                                 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1116                 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1117                                        frag_end && (k == frag_buf_num - 1) ?
1118                                                 1 : 0);
1119                 desc->tx.bdtp_fe_sc_vld_ra_ri =
1120                                 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1121
1122                 /* move ring pointer to next.*/
1123                 ring_ptr_move_fw(ring, next_to_use);
1124
1125                 desc_cb = &ring->desc_cb[ring->next_to_use];
1126                 desc = &ring->desc[ring->next_to_use];
1127         }
1128
1129         return 0;
1130 }
1131
1132 static int hns3_nic_bd_num(struct sk_buff *skb)
1133 {
1134         int size = skb_headlen(skb);
1135         int i, bd_num;
1136
1137         /* if the total len is within the max bd limit */
1138         if (likely(skb->len <= HNS3_MAX_BD_SIZE))
1139                 return skb_shinfo(skb)->nr_frags + 1;
1140
1141         bd_num = hns3_tx_bd_count(size);
1142
1143         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1144                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1145                 int frag_bd_num;
1146
1147                 size = skb_frag_size(frag);
1148                 frag_bd_num = hns3_tx_bd_count(size);
1149
1150                 if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
1151                         return -ENOMEM;
1152
1153                 bd_num += frag_bd_num;
1154         }
1155
1156         return bd_num;
1157 }
1158
1159 static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
1160 {
1161         if (!skb->encapsulation)
1162                 return skb_transport_offset(skb) + tcp_hdrlen(skb);
1163
1164         return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
1165 }
1166
1167 /* HW need every continuous 8 buffer data to be larger than MSS,
1168  * we simplify it by ensuring skb_headlen + the first continuous
1169  * 7 frags to to be larger than gso header len + mss, and the remaining
1170  * continuous 7 frags to be larger than MSS except the last 7 frags.
1171  */
1172 static bool hns3_skb_need_linearized(struct sk_buff *skb)
1173 {
1174         int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
1175         unsigned int tot_len = 0;
1176         int i;
1177
1178         for (i = 0; i < bd_limit; i++)
1179                 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1180
1181         /* ensure headlen + the first 7 frags is greater than mss + header
1182          * and the first 7 frags is greater than mss.
1183          */
1184         if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
1185             hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
1186                 return true;
1187
1188         /* ensure the remaining continuous 7 buffer is greater than mss */
1189         for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
1190                 tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
1191                 tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
1192
1193                 if (tot_len < skb_shinfo(skb)->gso_size)
1194                         return true;
1195         }
1196
1197         return false;
1198 }
1199
1200 static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
1201                                   struct sk_buff **out_skb)
1202 {
1203         struct sk_buff *skb = *out_skb;
1204         int bd_num;
1205
1206         bd_num = hns3_nic_bd_num(skb);
1207         if (bd_num < 0)
1208                 return bd_num;
1209
1210         if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
1211                 struct sk_buff *new_skb;
1212
1213                 if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
1214                         goto out;
1215
1216                 bd_num = hns3_tx_bd_count(skb->len);
1217                 if (unlikely(ring_space(ring) < bd_num))
1218                         return -EBUSY;
1219                 /* manual split the send packet */
1220                 new_skb = skb_copy(skb, GFP_ATOMIC);
1221                 if (!new_skb)
1222                         return -ENOMEM;
1223                 dev_kfree_skb_any(skb);
1224                 *out_skb = new_skb;
1225
1226                 u64_stats_update_begin(&ring->syncp);
1227                 ring->stats.tx_copy++;
1228                 u64_stats_update_end(&ring->syncp);
1229         }
1230
1231 out:
1232         if (unlikely(ring_space(ring) < bd_num))
1233                 return -EBUSY;
1234
1235         return bd_num;
1236 }
1237
1238 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1239 {
1240         struct device *dev = ring_to_dev(ring);
1241         unsigned int i;
1242
1243         for (i = 0; i < ring->desc_num; i++) {
1244                 /* check if this is where we started */
1245                 if (ring->next_to_use == next_to_use_orig)
1246                         break;
1247
1248                 /* rollback one */
1249                 ring_ptr_move_bw(ring, next_to_use);
1250
1251                 /* unmap the descriptor dma address */
1252                 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1253                         dma_unmap_single(dev,
1254                                          ring->desc_cb[ring->next_to_use].dma,
1255                                         ring->desc_cb[ring->next_to_use].length,
1256                                         DMA_TO_DEVICE);
1257                 else if (ring->desc_cb[ring->next_to_use].length)
1258                         dma_unmap_page(dev,
1259                                        ring->desc_cb[ring->next_to_use].dma,
1260                                        ring->desc_cb[ring->next_to_use].length,
1261                                        DMA_TO_DEVICE);
1262
1263                 ring->desc_cb[ring->next_to_use].length = 0;
1264                 ring->desc_cb[ring->next_to_use].dma = 0;
1265         }
1266 }
1267
1268 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1269 {
1270         struct hns3_nic_priv *priv = netdev_priv(netdev);
1271         struct hns3_nic_ring_data *ring_data =
1272                 &tx_ring_data(priv, skb->queue_mapping);
1273         struct hns3_enet_ring *ring = ring_data->ring;
1274         struct netdev_queue *dev_queue;
1275         struct skb_frag_struct *frag;
1276         int next_to_use_head;
1277         int buf_num;
1278         int seg_num;
1279         int size;
1280         int ret;
1281         int i;
1282
1283         /* Prefetch the data used later */
1284         prefetch(skb->data);
1285
1286         buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
1287         if (unlikely(buf_num <= 0)) {
1288                 if (buf_num == -EBUSY) {
1289                         u64_stats_update_begin(&ring->syncp);
1290                         ring->stats.tx_busy++;
1291                         u64_stats_update_end(&ring->syncp);
1292                         goto out_net_tx_busy;
1293                 } else if (buf_num == -ENOMEM) {
1294                         u64_stats_update_begin(&ring->syncp);
1295                         ring->stats.sw_err_cnt++;
1296                         u64_stats_update_end(&ring->syncp);
1297                 }
1298
1299                 if (net_ratelimit())
1300                         netdev_err(netdev, "xmit error: %d!\n", buf_num);
1301
1302                 goto out_err_tx_ok;
1303         }
1304
1305         /* No. of segments (plus a header) */
1306         seg_num = skb_shinfo(skb)->nr_frags + 1;
1307         /* Fill the first part */
1308         size = skb_headlen(skb);
1309
1310         next_to_use_head = ring->next_to_use;
1311
1312         ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1313                              DESC_TYPE_SKB);
1314         if (unlikely(ret))
1315                 goto fill_err;
1316
1317         /* Fill the fragments */
1318         for (i = 1; i < seg_num; i++) {
1319                 frag = &skb_shinfo(skb)->frags[i - 1];
1320                 size = skb_frag_size(frag);
1321
1322                 ret = hns3_fill_desc(ring, frag, size,
1323                                      seg_num - 1 == i ? 1 : 0,
1324                                      DESC_TYPE_PAGE);
1325
1326                 if (unlikely(ret))
1327                         goto fill_err;
1328         }
1329
1330         /* Complete translate all packets */
1331         dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1332         netdev_tx_sent_queue(dev_queue, skb->len);
1333
1334         wmb(); /* Commit all data before submit */
1335
1336         hnae3_queue_xmit(ring->tqp, buf_num);
1337
1338         return NETDEV_TX_OK;
1339
1340 fill_err:
1341         hns3_clear_desc(ring, next_to_use_head);
1342
1343 out_err_tx_ok:
1344         dev_kfree_skb_any(skb);
1345         return NETDEV_TX_OK;
1346
1347 out_net_tx_busy:
1348         netif_stop_subqueue(netdev, ring_data->queue_index);
1349         smp_mb(); /* Commit all data before submit */
1350
1351         return NETDEV_TX_BUSY;
1352 }
1353
1354 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1355 {
1356         struct hnae3_handle *h = hns3_get_handle(netdev);
1357         struct sockaddr *mac_addr = p;
1358         int ret;
1359
1360         if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1361                 return -EADDRNOTAVAIL;
1362
1363         if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1364                 netdev_info(netdev, "already using mac address %pM\n",
1365                             mac_addr->sa_data);
1366                 return 0;
1367         }
1368
1369         ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1370         if (ret) {
1371                 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1372                 return ret;
1373         }
1374
1375         ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1376
1377         return 0;
1378 }
1379
1380 static int hns3_nic_do_ioctl(struct net_device *netdev,
1381                              struct ifreq *ifr, int cmd)
1382 {
1383         struct hnae3_handle *h = hns3_get_handle(netdev);
1384
1385         if (!netif_running(netdev))
1386                 return -EINVAL;
1387
1388         if (!h->ae_algo->ops->do_ioctl)
1389                 return -EOPNOTSUPP;
1390
1391         return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1392 }
1393
1394 static int hns3_nic_set_features(struct net_device *netdev,
1395                                  netdev_features_t features)
1396 {
1397         netdev_features_t changed = netdev->features ^ features;
1398         struct hns3_nic_priv *priv = netdev_priv(netdev);
1399         struct hnae3_handle *h = priv->ae_handle;
1400         bool enable;
1401         int ret;
1402
1403         if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
1404                 enable = !!(features & NETIF_F_GRO_HW);
1405                 ret = h->ae_algo->ops->set_gro_en(h, enable);
1406                 if (ret)
1407                         return ret;
1408         }
1409
1410         if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1411             h->ae_algo->ops->enable_vlan_filter) {
1412                 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
1413                 h->ae_algo->ops->enable_vlan_filter(h, enable);
1414         }
1415
1416         if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1417             h->ae_algo->ops->enable_hw_strip_rxvtag) {
1418                 enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1419                 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
1420                 if (ret)
1421                         return ret;
1422         }
1423
1424         if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1425                 enable = !!(features & NETIF_F_NTUPLE);
1426                 h->ae_algo->ops->enable_fd(h, enable);
1427         }
1428
1429         netdev->features = features;
1430         return 0;
1431 }
1432
1433 static void hns3_nic_get_stats64(struct net_device *netdev,
1434                                  struct rtnl_link_stats64 *stats)
1435 {
1436         struct hns3_nic_priv *priv = netdev_priv(netdev);
1437         int queue_num = priv->ae_handle->kinfo.num_tqps;
1438         struct hnae3_handle *handle = priv->ae_handle;
1439         struct hns3_enet_ring *ring;
1440         u64 rx_length_errors = 0;
1441         u64 rx_crc_errors = 0;
1442         u64 rx_multicast = 0;
1443         unsigned int start;
1444         u64 tx_errors = 0;
1445         u64 rx_errors = 0;
1446         unsigned int idx;
1447         u64 tx_bytes = 0;
1448         u64 rx_bytes = 0;
1449         u64 tx_pkts = 0;
1450         u64 rx_pkts = 0;
1451         u64 tx_drop = 0;
1452         u64 rx_drop = 0;
1453
1454         if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1455                 return;
1456
1457         handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1458
1459         for (idx = 0; idx < queue_num; idx++) {
1460                 /* fetch the tx stats */
1461                 ring = priv->ring_data[idx].ring;
1462                 do {
1463                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1464                         tx_bytes += ring->stats.tx_bytes;
1465                         tx_pkts += ring->stats.tx_pkts;
1466                         tx_drop += ring->stats.sw_err_cnt;
1467                         tx_errors += ring->stats.sw_err_cnt;
1468                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1469
1470                 /* fetch the rx stats */
1471                 ring = priv->ring_data[idx + queue_num].ring;
1472                 do {
1473                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1474                         rx_bytes += ring->stats.rx_bytes;
1475                         rx_pkts += ring->stats.rx_pkts;
1476                         rx_drop += ring->stats.non_vld_descs;
1477                         rx_drop += ring->stats.l2_err;
1478                         rx_errors += ring->stats.non_vld_descs;
1479                         rx_errors += ring->stats.l2_err;
1480                         rx_crc_errors += ring->stats.l2_err;
1481                         rx_crc_errors += ring->stats.l3l4_csum_err;
1482                         rx_multicast += ring->stats.rx_multicast;
1483                         rx_length_errors += ring->stats.err_pkt_len;
1484                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1485         }
1486
1487         stats->tx_bytes = tx_bytes;
1488         stats->tx_packets = tx_pkts;
1489         stats->rx_bytes = rx_bytes;
1490         stats->rx_packets = rx_pkts;
1491
1492         stats->rx_errors = rx_errors;
1493         stats->multicast = rx_multicast;
1494         stats->rx_length_errors = rx_length_errors;
1495         stats->rx_crc_errors = rx_crc_errors;
1496         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1497
1498         stats->tx_errors = tx_errors;
1499         stats->rx_dropped = rx_drop;
1500         stats->tx_dropped = tx_drop;
1501         stats->collisions = netdev->stats.collisions;
1502         stats->rx_over_errors = netdev->stats.rx_over_errors;
1503         stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1504         stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1505         stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1506         stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1507         stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1508         stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1509         stats->tx_window_errors = netdev->stats.tx_window_errors;
1510         stats->rx_compressed = netdev->stats.rx_compressed;
1511         stats->tx_compressed = netdev->stats.tx_compressed;
1512 }
1513
1514 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1515 {
1516         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1517         struct hnae3_handle *h = hns3_get_handle(netdev);
1518         struct hnae3_knic_private_info *kinfo = &h->kinfo;
1519         u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1520         u8 tc = mqprio_qopt->qopt.num_tc;
1521         u16 mode = mqprio_qopt->mode;
1522         u8 hw = mqprio_qopt->qopt.hw;
1523
1524         if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1525                mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1526                 return -EOPNOTSUPP;
1527
1528         if (tc > HNAE3_MAX_TC)
1529                 return -EINVAL;
1530
1531         if (!netdev)
1532                 return -EINVAL;
1533
1534         return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1535                 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1536 }
1537
1538 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1539                              void *type_data)
1540 {
1541         if (type != TC_SETUP_QDISC_MQPRIO)
1542                 return -EOPNOTSUPP;
1543
1544         return hns3_setup_tc(dev, type_data);
1545 }
1546
1547 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1548                                 __be16 proto, u16 vid)
1549 {
1550         struct hnae3_handle *h = hns3_get_handle(netdev);
1551         int ret = -EIO;
1552
1553         if (h->ae_algo->ops->set_vlan_filter)
1554                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1555
1556         return ret;
1557 }
1558
1559 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1560                                  __be16 proto, u16 vid)
1561 {
1562         struct hnae3_handle *h = hns3_get_handle(netdev);
1563         int ret = -EIO;
1564
1565         if (h->ae_algo->ops->set_vlan_filter)
1566                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1567
1568         return ret;
1569 }
1570
1571 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1572                                 u8 qos, __be16 vlan_proto)
1573 {
1574         struct hnae3_handle *h = hns3_get_handle(netdev);
1575         int ret = -EIO;
1576
1577         if (h->ae_algo->ops->set_vf_vlan_filter)
1578                 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1579                                                    qos, vlan_proto);
1580
1581         return ret;
1582 }
1583
1584 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1585 {
1586         struct hnae3_handle *h = hns3_get_handle(netdev);
1587         int ret;
1588
1589         if (hns3_nic_resetting(netdev))
1590                 return -EBUSY;
1591
1592         if (!h->ae_algo->ops->set_mtu)
1593                 return -EOPNOTSUPP;
1594
1595         ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1596         if (ret)
1597                 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1598                            ret);
1599         else
1600                 netdev->mtu = new_mtu;
1601
1602         return ret;
1603 }
1604
1605 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1606 {
1607         struct hns3_nic_priv *priv = netdev_priv(ndev);
1608         struct hnae3_handle *h = hns3_get_handle(ndev);
1609         struct hns3_enet_ring *tx_ring = NULL;
1610         struct napi_struct *napi;
1611         int timeout_queue = 0;
1612         int hw_head, hw_tail;
1613         int fbd_num, fbd_oft;
1614         int ebd_num, ebd_oft;
1615         int bd_num, bd_err;
1616         int ring_en, tc;
1617         int i;
1618
1619         /* Find the stopped queue the same way the stack does */
1620         for (i = 0; i < ndev->num_tx_queues; i++) {
1621                 struct netdev_queue *q;
1622                 unsigned long trans_start;
1623
1624                 q = netdev_get_tx_queue(ndev, i);
1625                 trans_start = q->trans_start;
1626                 if (netif_xmit_stopped(q) &&
1627                     time_after(jiffies,
1628                                (trans_start + ndev->watchdog_timeo))) {
1629                         timeout_queue = i;
1630                         break;
1631                 }
1632         }
1633
1634         if (i == ndev->num_tx_queues) {
1635                 netdev_info(ndev,
1636                             "no netdev TX timeout queue found, timeout count: %llu\n",
1637                             priv->tx_timeout_count);
1638                 return false;
1639         }
1640
1641         priv->tx_timeout_count++;
1642
1643         tx_ring = priv->ring_data[timeout_queue].ring;
1644         napi = &tx_ring->tqp_vector->napi;
1645
1646         netdev_info(ndev,
1647                     "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
1648                     priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
1649                     tx_ring->next_to_clean, napi->state);
1650
1651         netdev_info(ndev,
1652                     "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
1653                     tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
1654                     tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
1655
1656         netdev_info(ndev,
1657                     "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
1658                     tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
1659                     tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
1660
1661         /* When mac received many pause frames continuous, it's unable to send
1662          * packets, which may cause tx timeout
1663          */
1664         if (h->ae_algo->ops->update_stats &&
1665             h->ae_algo->ops->get_mac_pause_stats) {
1666                 u64 tx_pause_cnt, rx_pause_cnt;
1667
1668                 h->ae_algo->ops->update_stats(h, &ndev->stats);
1669                 h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
1670                                                      &rx_pause_cnt);
1671                 netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
1672                             tx_pause_cnt, rx_pause_cnt);
1673         }
1674
1675         hw_head = readl_relaxed(tx_ring->tqp->io_base +
1676                                 HNS3_RING_TX_RING_HEAD_REG);
1677         hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1678                                 HNS3_RING_TX_RING_TAIL_REG);
1679         fbd_num = readl_relaxed(tx_ring->tqp->io_base +
1680                                 HNS3_RING_TX_RING_FBDNUM_REG);
1681         fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
1682                                 HNS3_RING_TX_RING_OFFSET_REG);
1683         ebd_num = readl_relaxed(tx_ring->tqp->io_base +
1684                                 HNS3_RING_TX_RING_EBDNUM_REG);
1685         ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
1686                                 HNS3_RING_TX_RING_EBD_OFFSET_REG);
1687         bd_num = readl_relaxed(tx_ring->tqp->io_base +
1688                                HNS3_RING_TX_RING_BD_NUM_REG);
1689         bd_err = readl_relaxed(tx_ring->tqp->io_base +
1690                                HNS3_RING_TX_RING_BD_ERR_REG);
1691         ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
1692         tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
1693
1694         netdev_info(ndev,
1695                     "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
1696                     bd_num, hw_head, hw_tail, bd_err,
1697                     readl(tx_ring->tqp_vector->mask_addr));
1698         netdev_info(ndev,
1699                     "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
1700                     ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
1701
1702         return true;
1703 }
1704
1705 static void hns3_nic_net_timeout(struct net_device *ndev)
1706 {
1707         struct hns3_nic_priv *priv = netdev_priv(ndev);
1708         struct hnae3_handle *h = priv->ae_handle;
1709
1710         if (!hns3_get_tx_timeo_queue_info(ndev))
1711                 return;
1712
1713         /* request the reset, and let the hclge to determine
1714          * which reset level should be done
1715          */
1716         if (h->ae_algo->ops->reset_event)
1717                 h->ae_algo->ops->reset_event(h->pdev, h);
1718 }
1719
1720 #ifdef CONFIG_RFS_ACCEL
1721 static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1722                               u16 rxq_index, u32 flow_id)
1723 {
1724         struct hnae3_handle *h = hns3_get_handle(dev);
1725         struct flow_keys fkeys;
1726
1727         if (!h->ae_algo->ops->add_arfs_entry)
1728                 return -EOPNOTSUPP;
1729
1730         if (skb->encapsulation)
1731                 return -EPROTONOSUPPORT;
1732
1733         if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
1734                 return -EPROTONOSUPPORT;
1735
1736         if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
1737              fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
1738             (fkeys.basic.ip_proto != IPPROTO_TCP &&
1739              fkeys.basic.ip_proto != IPPROTO_UDP))
1740                 return -EPROTONOSUPPORT;
1741
1742         return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
1743 }
1744 #endif
1745
1746 static const struct net_device_ops hns3_nic_netdev_ops = {
1747         .ndo_open               = hns3_nic_net_open,
1748         .ndo_stop               = hns3_nic_net_stop,
1749         .ndo_start_xmit         = hns3_nic_net_xmit,
1750         .ndo_tx_timeout         = hns3_nic_net_timeout,
1751         .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1752         .ndo_do_ioctl           = hns3_nic_do_ioctl,
1753         .ndo_change_mtu         = hns3_nic_change_mtu,
1754         .ndo_set_features       = hns3_nic_set_features,
1755         .ndo_get_stats64        = hns3_nic_get_stats64,
1756         .ndo_setup_tc           = hns3_nic_setup_tc,
1757         .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1758         .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1759         .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1760         .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1761 #ifdef CONFIG_RFS_ACCEL
1762         .ndo_rx_flow_steer      = hns3_rx_flow_steer,
1763 #endif
1764
1765 };
1766
1767 bool hns3_is_phys_func(struct pci_dev *pdev)
1768 {
1769         u32 dev_id = pdev->device;
1770
1771         switch (dev_id) {
1772         case HNAE3_DEV_ID_GE:
1773         case HNAE3_DEV_ID_25GE:
1774         case HNAE3_DEV_ID_25GE_RDMA:
1775         case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1776         case HNAE3_DEV_ID_50GE_RDMA:
1777         case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1778         case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1779                 return true;
1780         case HNAE3_DEV_ID_100G_VF:
1781         case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1782                 return false;
1783         default:
1784                 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1785                          dev_id);
1786         }
1787
1788         return false;
1789 }
1790
1791 static void hns3_disable_sriov(struct pci_dev *pdev)
1792 {
1793         /* If our VFs are assigned we cannot shut down SR-IOV
1794          * without causing issues, so just leave the hardware
1795          * available but disabled
1796          */
1797         if (pci_vfs_assigned(pdev)) {
1798                 dev_warn(&pdev->dev,
1799                          "disabling driver while VFs are assigned\n");
1800                 return;
1801         }
1802
1803         pci_disable_sriov(pdev);
1804 }
1805
1806 static void hns3_get_dev_capability(struct pci_dev *pdev,
1807                                     struct hnae3_ae_dev *ae_dev)
1808 {
1809         if (pdev->revision >= 0x21) {
1810                 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1811                 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
1812         }
1813 }
1814
1815 /* hns3_probe - Device initialization routine
1816  * @pdev: PCI device information struct
1817  * @ent: entry in hns3_pci_tbl
1818  *
1819  * hns3_probe initializes a PF identified by a pci_dev structure.
1820  * The OS initialization, configuring of the PF private structure,
1821  * and a hardware reset occur.
1822  *
1823  * Returns 0 on success, negative on failure
1824  */
1825 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1826 {
1827         struct hnae3_ae_dev *ae_dev;
1828         int ret;
1829
1830         ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1831                               GFP_KERNEL);
1832         if (!ae_dev) {
1833                 ret = -ENOMEM;
1834                 return ret;
1835         }
1836
1837         ae_dev->pdev = pdev;
1838         ae_dev->flag = ent->driver_data;
1839         ae_dev->dev_type = HNAE3_DEV_KNIC;
1840         ae_dev->reset_type = HNAE3_NONE_RESET;
1841         hns3_get_dev_capability(pdev, ae_dev);
1842         pci_set_drvdata(pdev, ae_dev);
1843
1844         ret = hnae3_register_ae_dev(ae_dev);
1845         if (ret) {
1846                 devm_kfree(&pdev->dev, ae_dev);
1847                 pci_set_drvdata(pdev, NULL);
1848         }
1849
1850         return ret;
1851 }
1852
1853 /* hns3_remove - Device removal routine
1854  * @pdev: PCI device information struct
1855  */
1856 static void hns3_remove(struct pci_dev *pdev)
1857 {
1858         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1859
1860         if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1861                 hns3_disable_sriov(pdev);
1862
1863         hnae3_unregister_ae_dev(ae_dev);
1864         pci_set_drvdata(pdev, NULL);
1865 }
1866
1867 /**
1868  * hns3_pci_sriov_configure
1869  * @pdev: pointer to a pci_dev structure
1870  * @num_vfs: number of VFs to allocate
1871  *
1872  * Enable or change the number of VFs. Called when the user updates the number
1873  * of VFs in sysfs.
1874  **/
1875 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1876 {
1877         int ret;
1878
1879         if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1880                 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1881                 return -EINVAL;
1882         }
1883
1884         if (num_vfs) {
1885                 ret = pci_enable_sriov(pdev, num_vfs);
1886                 if (ret)
1887                         dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1888                 else
1889                         return num_vfs;
1890         } else if (!pci_vfs_assigned(pdev)) {
1891                 pci_disable_sriov(pdev);
1892         } else {
1893                 dev_warn(&pdev->dev,
1894                          "Unable to free VFs because some are assigned to VMs.\n");
1895         }
1896
1897         return 0;
1898 }
1899
1900 static void hns3_shutdown(struct pci_dev *pdev)
1901 {
1902         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1903
1904         hnae3_unregister_ae_dev(ae_dev);
1905         devm_kfree(&pdev->dev, ae_dev);
1906         pci_set_drvdata(pdev, NULL);
1907
1908         if (system_state == SYSTEM_POWER_OFF)
1909                 pci_set_power_state(pdev, PCI_D3hot);
1910 }
1911
1912 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1913                                             pci_channel_state_t state)
1914 {
1915         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1916         pci_ers_result_t ret;
1917
1918         dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1919
1920         if (state == pci_channel_io_perm_failure)
1921                 return PCI_ERS_RESULT_DISCONNECT;
1922
1923         if (!ae_dev) {
1924                 dev_err(&pdev->dev,
1925                         "Can't recover - error happened during device init\n");
1926                 return PCI_ERS_RESULT_NONE;
1927         }
1928
1929         if (ae_dev->ops->handle_hw_ras_error)
1930                 ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
1931         else
1932                 return PCI_ERS_RESULT_NONE;
1933
1934         return ret;
1935 }
1936
1937 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1938 {
1939         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1940         struct device *dev = &pdev->dev;
1941
1942         dev_info(dev, "requesting reset due to PCI error\n");
1943
1944         /* request the reset */
1945         if (ae_dev->ops->reset_event) {
1946                 if (!ae_dev->override_pci_need_reset)
1947                         ae_dev->ops->reset_event(pdev, NULL);
1948
1949                 return PCI_ERS_RESULT_RECOVERED;
1950         }
1951
1952         return PCI_ERS_RESULT_DISCONNECT;
1953 }
1954
1955 static void hns3_reset_prepare(struct pci_dev *pdev)
1956 {
1957         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1958
1959         dev_info(&pdev->dev, "hns3 flr prepare\n");
1960         if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
1961                 ae_dev->ops->flr_prepare(ae_dev);
1962 }
1963
1964 static void hns3_reset_done(struct pci_dev *pdev)
1965 {
1966         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1967
1968         dev_info(&pdev->dev, "hns3 flr done\n");
1969         if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
1970                 ae_dev->ops->flr_done(ae_dev);
1971 }
1972
1973 static const struct pci_error_handlers hns3_err_handler = {
1974         .error_detected = hns3_error_detected,
1975         .slot_reset     = hns3_slot_reset,
1976         .reset_prepare  = hns3_reset_prepare,
1977         .reset_done     = hns3_reset_done,
1978 };
1979
1980 static struct pci_driver hns3_driver = {
1981         .name     = hns3_driver_name,
1982         .id_table = hns3_pci_tbl,
1983         .probe    = hns3_probe,
1984         .remove   = hns3_remove,
1985         .shutdown = hns3_shutdown,
1986         .sriov_configure = hns3_pci_sriov_configure,
1987         .err_handler    = &hns3_err_handler,
1988 };
1989
1990 /* set default feature to hns3 */
1991 static void hns3_set_default_feature(struct net_device *netdev)
1992 {
1993         struct hnae3_handle *h = hns3_get_handle(netdev);
1994         struct pci_dev *pdev = h->pdev;
1995
1996         netdev->priv_flags |= IFF_UNICAST_FLT;
1997
1998         netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1999                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2000                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2001                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2002                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2003
2004         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
2005
2006         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
2007
2008         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2009                 NETIF_F_HW_VLAN_CTAG_FILTER |
2010                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2011                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2012                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2013                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2014                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2015
2016         netdev->vlan_features |=
2017                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2018                 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
2019                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2020                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2021                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2022
2023         netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2024                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2025                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2026                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2027                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
2028                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
2029
2030         if (pdev->revision >= 0x21) {
2031                 netdev->hw_features |= NETIF_F_GRO_HW;
2032                 netdev->features |= NETIF_F_GRO_HW;
2033
2034                 if (!(h->flags & HNAE3_SUPPORT_VF)) {
2035                         netdev->hw_features |= NETIF_F_NTUPLE;
2036                         netdev->features |= NETIF_F_NTUPLE;
2037                 }
2038         }
2039 }
2040
2041 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
2042                              struct hns3_desc_cb *cb)
2043 {
2044         unsigned int order = hnae3_page_order(ring);
2045         struct page *p;
2046
2047         p = dev_alloc_pages(order);
2048         if (!p)
2049                 return -ENOMEM;
2050
2051         cb->priv = p;
2052         cb->page_offset = 0;
2053         cb->reuse_flag = 0;
2054         cb->buf  = page_address(p);
2055         cb->length = hnae3_page_size(ring);
2056         cb->type = DESC_TYPE_PAGE;
2057
2058         return 0;
2059 }
2060
2061 static void hns3_free_buffer(struct hns3_enet_ring *ring,
2062                              struct hns3_desc_cb *cb)
2063 {
2064         if (cb->type == DESC_TYPE_SKB)
2065                 dev_kfree_skb_any((struct sk_buff *)cb->priv);
2066         else if (!HNAE3_IS_TX_RING(ring))
2067                 put_page((struct page *)cb->priv);
2068         memset(cb, 0, sizeof(*cb));
2069 }
2070
2071 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
2072 {
2073         cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
2074                                cb->length, ring_to_dma_dir(ring));
2075
2076         if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
2077                 return -EIO;
2078
2079         return 0;
2080 }
2081
2082 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
2083                               struct hns3_desc_cb *cb)
2084 {
2085         if (cb->type == DESC_TYPE_SKB)
2086                 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
2087                                  ring_to_dma_dir(ring));
2088         else if (cb->length)
2089                 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
2090                                ring_to_dma_dir(ring));
2091 }
2092
2093 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
2094 {
2095         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2096         ring->desc[i].addr = 0;
2097 }
2098
2099 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
2100 {
2101         struct hns3_desc_cb *cb = &ring->desc_cb[i];
2102
2103         if (!ring->desc_cb[i].dma)
2104                 return;
2105
2106         hns3_buffer_detach(ring, i);
2107         hns3_free_buffer(ring, cb);
2108 }
2109
2110 static void hns3_free_buffers(struct hns3_enet_ring *ring)
2111 {
2112         int i;
2113
2114         for (i = 0; i < ring->desc_num; i++)
2115                 hns3_free_buffer_detach(ring, i);
2116 }
2117
2118 /* free desc along with its attached buffer */
2119 static void hns3_free_desc(struct hns3_enet_ring *ring)
2120 {
2121         int size = ring->desc_num * sizeof(ring->desc[0]);
2122
2123         hns3_free_buffers(ring);
2124
2125         if (ring->desc) {
2126                 dma_free_coherent(ring_to_dev(ring), size,
2127                                   ring->desc, ring->desc_dma_addr);
2128                 ring->desc = NULL;
2129         }
2130 }
2131
2132 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
2133 {
2134         int size = ring->desc_num * sizeof(ring->desc[0]);
2135
2136         ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
2137                                         &ring->desc_dma_addr, GFP_KERNEL);
2138         if (!ring->desc)
2139                 return -ENOMEM;
2140
2141         return 0;
2142 }
2143
2144 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
2145                                    struct hns3_desc_cb *cb)
2146 {
2147         int ret;
2148
2149         ret = hns3_alloc_buffer(ring, cb);
2150         if (ret)
2151                 goto out;
2152
2153         ret = hns3_map_buffer(ring, cb);
2154         if (ret)
2155                 goto out_with_buf;
2156
2157         return 0;
2158
2159 out_with_buf:
2160         hns3_free_buffer(ring, cb);
2161 out:
2162         return ret;
2163 }
2164
2165 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2166 {
2167         int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2168
2169         if (ret)
2170                 return ret;
2171
2172         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2173
2174         return 0;
2175 }
2176
2177 /* Allocate memory for raw pkg, and map with dma */
2178 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2179 {
2180         int i, j, ret;
2181
2182         for (i = 0; i < ring->desc_num; i++) {
2183                 ret = hns3_alloc_buffer_attach(ring, i);
2184                 if (ret)
2185                         goto out_buffer_fail;
2186         }
2187
2188         return 0;
2189
2190 out_buffer_fail:
2191         for (j = i - 1; j >= 0; j--)
2192                 hns3_free_buffer_detach(ring, j);
2193         return ret;
2194 }
2195
2196 /* detach a in-used buffer and replace with a reserved one  */
2197 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2198                                 struct hns3_desc_cb *res_cb)
2199 {
2200         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2201         ring->desc_cb[i] = *res_cb;
2202         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2203         ring->desc[i].rx.bd_base_info = 0;
2204 }
2205
2206 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2207 {
2208         ring->desc_cb[i].reuse_flag = 0;
2209         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2210                 + ring->desc_cb[i].page_offset);
2211         ring->desc[i].rx.bd_base_info = 0;
2212 }
2213
2214 static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
2215                                   int *bytes, int *pkts)
2216 {
2217         int ntc = ring->next_to_clean;
2218         struct hns3_desc_cb *desc_cb;
2219
2220         while (head != ntc) {
2221                 desc_cb = &ring->desc_cb[ntc];
2222                 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2223                 (*bytes) += desc_cb->length;
2224                 /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
2225                 hns3_free_buffer_detach(ring, ntc);
2226
2227                 if (++ntc == ring->desc_num)
2228                         ntc = 0;
2229
2230                 /* Issue prefetch for next Tx descriptor */
2231                 prefetch(&ring->desc_cb[ntc]);
2232         }
2233
2234         /* This smp_store_release() pairs with smp_load_acquire() in
2235          * ring_space called by hns3_nic_net_xmit.
2236          */
2237         smp_store_release(&ring->next_to_clean, ntc);
2238 }
2239
2240 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2241 {
2242         int u = ring->next_to_use;
2243         int c = ring->next_to_clean;
2244
2245         if (unlikely(h > ring->desc_num))
2246                 return 0;
2247
2248         return u > c ? (h > c && h <= u) : (h > c || h <= u);
2249 }
2250
2251 void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2252 {
2253         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2254         struct hns3_nic_priv *priv = netdev_priv(netdev);
2255         struct netdev_queue *dev_queue;
2256         int bytes, pkts;
2257         int head;
2258
2259         head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2260         rmb(); /* Make sure head is ready before touch any data */
2261
2262         if (is_ring_empty(ring) || head == ring->next_to_clean)
2263                 return; /* no data to poll */
2264
2265         if (unlikely(!is_valid_clean_head(ring, head))) {
2266                 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2267                            ring->next_to_use, ring->next_to_clean);
2268
2269                 u64_stats_update_begin(&ring->syncp);
2270                 ring->stats.io_err_cnt++;
2271                 u64_stats_update_end(&ring->syncp);
2272                 return;
2273         }
2274
2275         bytes = 0;
2276         pkts = 0;
2277         hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
2278
2279         ring->tqp_vector->tx_group.total_bytes += bytes;
2280         ring->tqp_vector->tx_group.total_packets += pkts;
2281
2282         u64_stats_update_begin(&ring->syncp);
2283         ring->stats.tx_bytes += bytes;
2284         ring->stats.tx_pkts += pkts;
2285         u64_stats_update_end(&ring->syncp);
2286
2287         dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2288         netdev_tx_completed_queue(dev_queue, pkts, bytes);
2289
2290         if (unlikely(pkts && netif_carrier_ok(netdev) &&
2291                      (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2292                 /* Make sure that anybody stopping the queue after this
2293                  * sees the new next_to_clean.
2294                  */
2295                 smp_mb();
2296                 if (netif_tx_queue_stopped(dev_queue) &&
2297                     !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2298                         netif_tx_wake_queue(dev_queue);
2299                         ring->stats.restart_queue++;
2300                 }
2301         }
2302 }
2303
2304 static int hns3_desc_unused(struct hns3_enet_ring *ring)
2305 {
2306         int ntc = ring->next_to_clean;
2307         int ntu = ring->next_to_use;
2308
2309         return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2310 }
2311
2312 static void
2313 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2314 {
2315         struct hns3_desc_cb *desc_cb;
2316         struct hns3_desc_cb res_cbs;
2317         int i, ret;
2318
2319         for (i = 0; i < cleand_count; i++) {
2320                 desc_cb = &ring->desc_cb[ring->next_to_use];
2321                 if (desc_cb->reuse_flag) {
2322                         u64_stats_update_begin(&ring->syncp);
2323                         ring->stats.reuse_pg_cnt++;
2324                         u64_stats_update_end(&ring->syncp);
2325
2326                         hns3_reuse_buffer(ring, ring->next_to_use);
2327                 } else {
2328                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
2329                         if (ret) {
2330                                 u64_stats_update_begin(&ring->syncp);
2331                                 ring->stats.sw_err_cnt++;
2332                                 u64_stats_update_end(&ring->syncp);
2333
2334                                 netdev_err(ring->tqp->handle->kinfo.netdev,
2335                                            "hnae reserve buffer map failed.\n");
2336                                 break;
2337                         }
2338                         hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2339
2340                         u64_stats_update_begin(&ring->syncp);
2341                         ring->stats.non_reuse_pg++;
2342                         u64_stats_update_end(&ring->syncp);
2343                 }
2344
2345                 ring_ptr_move_fw(ring, next_to_use);
2346         }
2347
2348         wmb(); /* Make all data has been write before submit */
2349         writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2350 }
2351
2352 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2353                                 struct hns3_enet_ring *ring, int pull_len,
2354                                 struct hns3_desc_cb *desc_cb)
2355 {
2356         struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2357         int size = le16_to_cpu(desc->rx.size);
2358         u32 truesize = hnae3_buf_size(ring);
2359
2360         skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2361                         size - pull_len, truesize);
2362
2363         /* Avoid re-using remote pages, or the stack is still using the page
2364          * when page_offset rollback to zero, flag default unreuse
2365          */
2366         if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
2367             (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
2368                 return;
2369
2370         /* Move offset up to the next cache line */
2371         desc_cb->page_offset += truesize;
2372
2373         if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
2374                 desc_cb->reuse_flag = 1;
2375                 /* Bump ref count on page before it is given*/
2376                 get_page(desc_cb->priv);
2377         } else if (page_count(desc_cb->priv) == 1) {
2378                 desc_cb->reuse_flag = 1;
2379                 desc_cb->page_offset = 0;
2380                 get_page(desc_cb->priv);
2381         }
2382 }
2383
2384 static int hns3_gro_complete(struct sk_buff *skb)
2385 {
2386         __be16 type = skb->protocol;
2387         struct tcphdr *th;
2388         int depth = 0;
2389
2390         while (type == htons(ETH_P_8021Q)) {
2391                 struct vlan_hdr *vh;
2392
2393                 if ((depth + VLAN_HLEN) > skb_headlen(skb))
2394                         return -EFAULT;
2395
2396                 vh = (struct vlan_hdr *)(skb->data + depth);
2397                 type = vh->h_vlan_encapsulated_proto;
2398                 depth += VLAN_HLEN;
2399         }
2400
2401         if (type == htons(ETH_P_IP)) {
2402                 depth += sizeof(struct iphdr);
2403         } else if (type == htons(ETH_P_IPV6)) {
2404                 depth += sizeof(struct ipv6hdr);
2405         } else {
2406                 netdev_err(skb->dev,
2407                            "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
2408                            be16_to_cpu(type), depth);
2409                 return -EFAULT;
2410         }
2411
2412         th = (struct tcphdr *)(skb->data + depth);
2413         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2414         if (th->cwr)
2415                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2416
2417         skb->ip_summed = CHECKSUM_UNNECESSARY;
2418
2419         return 0;
2420 }
2421
2422 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2423                              u32 l234info, u32 bd_base_info, u32 ol_info)
2424 {
2425         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2426         int l3_type, l4_type;
2427         int ol4_type;
2428
2429         skb->ip_summed = CHECKSUM_NONE;
2430
2431         skb_checksum_none_assert(skb);
2432
2433         if (!(netdev->features & NETIF_F_RXCSUM))
2434                 return;
2435
2436         /* check if hardware has done checksum */
2437         if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
2438                 return;
2439
2440         if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
2441                                  BIT(HNS3_RXD_OL3E_B) |
2442                                  BIT(HNS3_RXD_OL4E_B)))) {
2443                 u64_stats_update_begin(&ring->syncp);
2444                 ring->stats.l3l4_csum_err++;
2445                 u64_stats_update_end(&ring->syncp);
2446
2447                 return;
2448         }
2449
2450         ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
2451                                    HNS3_RXD_OL4ID_S);
2452         switch (ol4_type) {
2453         case HNS3_OL4_TYPE_MAC_IN_UDP:
2454         case HNS3_OL4_TYPE_NVGRE:
2455                 skb->csum_level = 1;
2456                 /* fall through */
2457         case HNS3_OL4_TYPE_NO_TUN:
2458                 l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2459                                           HNS3_RXD_L3ID_S);
2460                 l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2461                                           HNS3_RXD_L4ID_S);
2462
2463                 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2464                 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2465                      l3_type == HNS3_L3_TYPE_IPV6) &&
2466                     (l4_type == HNS3_L4_TYPE_UDP ||
2467                      l4_type == HNS3_L4_TYPE_TCP ||
2468                      l4_type == HNS3_L4_TYPE_SCTP))
2469                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2470                 break;
2471         default:
2472                 break;
2473         }
2474 }
2475
2476 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2477 {
2478         if (skb_has_frag_list(skb))
2479                 napi_gro_flush(&ring->tqp_vector->napi, false);
2480
2481         napi_gro_receive(&ring->tqp_vector->napi, skb);
2482 }
2483
2484 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2485                                 struct hns3_desc *desc, u32 l234info,
2486                                 u16 *vlan_tag)
2487 {
2488         struct hnae3_handle *handle = ring->tqp->handle;
2489         struct pci_dev *pdev = ring->tqp->handle->pdev;
2490
2491         if (pdev->revision == 0x20) {
2492                 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2493                 if (!(*vlan_tag & VLAN_VID_MASK))
2494                         *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2495
2496                 return (*vlan_tag != 0);
2497         }
2498
2499 #define HNS3_STRP_OUTER_VLAN    0x1
2500 #define HNS3_STRP_INNER_VLAN    0x2
2501 #define HNS3_STRP_BOTH          0x3
2502
2503         /* Hardware always insert VLAN tag into RX descriptor when
2504          * remove the tag from packet, driver needs to determine
2505          * reporting which tag to stack.
2506          */
2507         switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2508                                 HNS3_RXD_STRP_TAGP_S)) {
2509         case HNS3_STRP_OUTER_VLAN:
2510                 if (handle->port_base_vlan_state !=
2511                                 HNAE3_PORT_BASE_VLAN_DISABLE)
2512                         return false;
2513
2514                 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2515                 return true;
2516         case HNS3_STRP_INNER_VLAN:
2517                 if (handle->port_base_vlan_state !=
2518                                 HNAE3_PORT_BASE_VLAN_DISABLE)
2519                         return false;
2520
2521                 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2522                 return true;
2523         case HNS3_STRP_BOTH:
2524                 if (handle->port_base_vlan_state ==
2525                                 HNAE3_PORT_BASE_VLAN_DISABLE)
2526                         *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2527                 else
2528                         *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2529
2530                 return true;
2531         default:
2532                 return false;
2533         }
2534 }
2535
2536 static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
2537                           unsigned char *va)
2538 {
2539 #define HNS3_NEED_ADD_FRAG      1
2540         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2541         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2542         struct sk_buff *skb;
2543
2544         ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
2545         skb = ring->skb;
2546         if (unlikely(!skb)) {
2547                 netdev_err(netdev, "alloc rx skb fail\n");
2548
2549                 u64_stats_update_begin(&ring->syncp);
2550                 ring->stats.sw_err_cnt++;
2551                 u64_stats_update_end(&ring->syncp);
2552
2553                 return -ENOMEM;
2554         }
2555
2556         prefetchw(skb->data);
2557
2558         ring->pending_buf = 1;
2559         ring->frag_num = 0;
2560         ring->tail_skb = NULL;
2561         if (length <= HNS3_RX_HEAD_SIZE) {
2562                 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2563
2564                 /* We can reuse buffer as-is, just make sure it is local */
2565                 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2566                         desc_cb->reuse_flag = 1;
2567                 else /* This page cannot be reused so discard it */
2568                         put_page(desc_cb->priv);
2569
2570                 ring_ptr_move_fw(ring, next_to_clean);
2571                 return 0;
2572         }
2573         u64_stats_update_begin(&ring->syncp);
2574         ring->stats.seg_pkt_cnt++;
2575         u64_stats_update_end(&ring->syncp);
2576
2577         ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
2578         __skb_put(skb, ring->pull_len);
2579         hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
2580                             desc_cb);
2581         ring_ptr_move_fw(ring, next_to_clean);
2582
2583         return HNS3_NEED_ADD_FRAG;
2584 }
2585
2586 static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
2587                          struct sk_buff **out_skb, bool pending)
2588 {
2589         struct sk_buff *skb = *out_skb;
2590         struct sk_buff *head_skb = *out_skb;
2591         struct sk_buff *new_skb;
2592         struct hns3_desc_cb *desc_cb;
2593         struct hns3_desc *pre_desc;
2594         u32 bd_base_info;
2595         int pre_bd;
2596
2597         /* if there is pending bd, the SW param next_to_clean has moved
2598          * to next and the next is NULL
2599          */
2600         if (pending) {
2601                 pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
2602                         ring->desc_num;
2603                 pre_desc = &ring->desc[pre_bd];
2604                 bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
2605         } else {
2606                 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2607         }
2608
2609         while (!(bd_base_info & BIT(HNS3_RXD_FE_B))) {
2610                 desc = &ring->desc[ring->next_to_clean];
2611                 desc_cb = &ring->desc_cb[ring->next_to_clean];
2612                 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2613                 /* make sure HW write desc complete */
2614                 dma_rmb();
2615                 if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
2616                         return -ENXIO;
2617
2618                 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
2619                         new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2620                                                  HNS3_RX_HEAD_SIZE);
2621                         if (unlikely(!new_skb)) {
2622                                 netdev_err(ring->tqp->handle->kinfo.netdev,
2623                                            "alloc rx skb frag fail\n");
2624                                 return -ENXIO;
2625                         }
2626                         ring->frag_num = 0;
2627
2628                         if (ring->tail_skb) {
2629                                 ring->tail_skb->next = new_skb;
2630                                 ring->tail_skb = new_skb;
2631                         } else {
2632                                 skb_shinfo(skb)->frag_list = new_skb;
2633                                 ring->tail_skb = new_skb;
2634                         }
2635                 }
2636
2637                 if (ring->tail_skb) {
2638                         head_skb->truesize += hnae3_buf_size(ring);
2639                         head_skb->data_len += le16_to_cpu(desc->rx.size);
2640                         head_skb->len += le16_to_cpu(desc->rx.size);
2641                         skb = ring->tail_skb;
2642                 }
2643
2644                 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
2645                 ring_ptr_move_fw(ring, next_to_clean);
2646                 ring->pending_buf++;
2647         }
2648
2649         return 0;
2650 }
2651
2652 static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
2653                                      struct sk_buff *skb, u32 l234info,
2654                                      u32 bd_base_info, u32 ol_info)
2655 {
2656         u16 gro_count;
2657         u32 l3_type;
2658
2659         gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
2660                                     HNS3_RXD_GRO_COUNT_S);
2661         /* if there is no HW GRO, do not set gro params */
2662         if (!gro_count) {
2663                 hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
2664                 return 0;
2665         }
2666
2667         NAPI_GRO_CB(skb)->count = gro_count;
2668
2669         l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2670                                   HNS3_RXD_L3ID_S);
2671         if (l3_type == HNS3_L3_TYPE_IPV4)
2672                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2673         else if (l3_type == HNS3_L3_TYPE_IPV6)
2674                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
2675         else
2676                 return -EFAULT;
2677
2678         skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
2679                                                     HNS3_RXD_GRO_SIZE_M,
2680                                                     HNS3_RXD_GRO_SIZE_S);
2681
2682         return  hns3_gro_complete(skb);
2683 }
2684
2685 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2686                                      struct sk_buff *skb, u32 rss_hash)
2687 {
2688         struct hnae3_handle *handle = ring->tqp->handle;
2689         enum pkt_hash_types rss_type;
2690
2691         if (rss_hash)
2692                 rss_type = handle->kinfo.rss_type;
2693         else
2694                 rss_type = PKT_HASH_TYPE_NONE;
2695
2696         skb_set_hash(skb, rss_hash, rss_type);
2697 }
2698
2699 static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
2700 {
2701         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2702         enum hns3_pkt_l2t_type l2_frame_type;
2703         u32 bd_base_info, l234info, ol_info;
2704         struct hns3_desc *desc;
2705         unsigned int len;
2706         int pre_ntc, ret;
2707
2708         /* bdinfo handled below is only valid on the last BD of the
2709          * current packet, and ring->next_to_clean indicates the first
2710          * descriptor of next packet, so need - 1 below.
2711          */
2712         pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
2713                                         (ring->desc_num - 1);
2714         desc = &ring->desc[pre_ntc];
2715         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2716         l234info = le32_to_cpu(desc->rx.l234_info);
2717         ol_info = le32_to_cpu(desc->rx.ol_info);
2718
2719         /* Based on hw strategy, the tag offloaded will be stored at
2720          * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2721          * in one layer tag case.
2722          */
2723         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2724                 u16 vlan_tag;
2725
2726                 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2727                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2728                                                vlan_tag);
2729         }
2730
2731         if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
2732                 u64_stats_update_begin(&ring->syncp);
2733                 ring->stats.non_vld_descs++;
2734                 u64_stats_update_end(&ring->syncp);
2735
2736                 return -EINVAL;
2737         }
2738
2739         if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
2740                                   BIT(HNS3_RXD_L2E_B))))) {
2741                 u64_stats_update_begin(&ring->syncp);
2742                 if (l234info & BIT(HNS3_RXD_L2E_B))
2743                         ring->stats.l2_err++;
2744                 else
2745                         ring->stats.err_pkt_len++;
2746                 u64_stats_update_end(&ring->syncp);
2747
2748                 return -EFAULT;
2749         }
2750
2751         len = skb->len;
2752
2753         /* Do update ip stack process */
2754         skb->protocol = eth_type_trans(skb, netdev);
2755
2756         /* This is needed in order to enable forwarding support */
2757         ret = hns3_set_gro_and_checksum(ring, skb, l234info,
2758                                         bd_base_info, ol_info);
2759         if (unlikely(ret)) {
2760                 u64_stats_update_begin(&ring->syncp);
2761                 ring->stats.rx_err_cnt++;
2762                 u64_stats_update_end(&ring->syncp);
2763                 return ret;
2764         }
2765
2766         l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
2767                                         HNS3_RXD_DMAC_S);
2768
2769         u64_stats_update_begin(&ring->syncp);
2770         ring->stats.rx_pkts++;
2771         ring->stats.rx_bytes += len;
2772
2773         if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
2774                 ring->stats.rx_multicast++;
2775
2776         u64_stats_update_end(&ring->syncp);
2777
2778         ring->tqp_vector->rx_group.total_bytes += len;
2779
2780         hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
2781         return 0;
2782 }
2783
2784 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2785                              struct sk_buff **out_skb)
2786 {
2787         struct sk_buff *skb = ring->skb;
2788         struct hns3_desc_cb *desc_cb;
2789         struct hns3_desc *desc;
2790         u32 bd_base_info;
2791         int length;
2792         int ret;
2793
2794         desc = &ring->desc[ring->next_to_clean];
2795         desc_cb = &ring->desc_cb[ring->next_to_clean];
2796
2797         prefetch(desc);
2798
2799         length = le16_to_cpu(desc->rx.size);
2800         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2801
2802         /* Check valid BD */
2803         if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
2804                 return -ENXIO;
2805
2806         if (!skb)
2807                 ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2808
2809         /* Prefetch first cache line of first page
2810          * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2811          * line size is 64B so need to prefetch twice to make it 128B. But in
2812          * actual we can have greater size of caches with 128B Level 1 cache
2813          * lines. In such a case, single fetch would suffice to cache in the
2814          * relevant part of the header.
2815          */
2816         prefetch(ring->va);
2817 #if L1_CACHE_BYTES < 128
2818         prefetch(ring->va + L1_CACHE_BYTES);
2819 #endif
2820
2821         if (!skb) {
2822                 ret = hns3_alloc_skb(ring, length, ring->va);
2823                 *out_skb = skb = ring->skb;
2824
2825                 if (ret < 0) /* alloc buffer fail */
2826                         return ret;
2827                 if (ret > 0) { /* need add frag */
2828                         ret = hns3_add_frag(ring, desc, &skb, false);
2829                         if (ret)
2830                                 return ret;
2831
2832                         /* As the head data may be changed when GRO enable, copy
2833                          * the head data in after other data rx completed
2834                          */
2835                         memcpy(skb->data, ring->va,
2836                                ALIGN(ring->pull_len, sizeof(long)));
2837                 }
2838         } else {
2839                 ret = hns3_add_frag(ring, desc, &skb, true);
2840                 if (ret)
2841                         return ret;
2842
2843                 /* As the head data may be changed when GRO enable, copy
2844                  * the head data in after other data rx completed
2845                  */
2846                 memcpy(skb->data, ring->va,
2847                        ALIGN(ring->pull_len, sizeof(long)));
2848         }
2849
2850         ret = hns3_handle_bdinfo(ring, skb);
2851         if (unlikely(ret)) {
2852                 dev_kfree_skb_any(skb);
2853                 return ret;
2854         }
2855
2856         skb_record_rx_queue(skb, ring->tqp->tqp_index);
2857         *out_skb = skb;
2858
2859         return 0;
2860 }
2861
2862 int hns3_clean_rx_ring(
2863                 struct hns3_enet_ring *ring, int budget,
2864                 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2865 {
2866 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2867         int recv_pkts, recv_bds, clean_count, err;
2868         int unused_count = hns3_desc_unused(ring);
2869         struct sk_buff *skb = ring->skb;
2870         int num;
2871
2872         num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2873         rmb(); /* Make sure num taken effect before the other data is touched */
2874
2875         recv_pkts = 0, recv_bds = 0, clean_count = 0;
2876         num -= unused_count;
2877         unused_count -= ring->pending_buf;
2878
2879         while (recv_pkts < budget && recv_bds < num) {
2880                 /* Reuse or realloc buffers */
2881                 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2882                         hns3_nic_alloc_rx_buffers(ring,
2883                                                   clean_count + unused_count);
2884                         clean_count = 0;
2885                         unused_count = hns3_desc_unused(ring) -
2886                                         ring->pending_buf;
2887                 }
2888
2889                 /* Poll one pkt */
2890                 err = hns3_handle_rx_bd(ring, &skb);
2891                 if (unlikely(!skb)) /* This fault cannot be repaired */
2892                         goto out;
2893
2894                 if (err == -ENXIO) { /* Do not get FE for the packet */
2895                         goto out;
2896                 } else if (unlikely(err)) {  /* Do jump the err */
2897                         recv_bds += ring->pending_buf;
2898                         clean_count += ring->pending_buf;
2899                         ring->skb = NULL;
2900                         ring->pending_buf = 0;
2901                         continue;
2902                 }
2903
2904                 rx_fn(ring, skb);
2905                 recv_bds += ring->pending_buf;
2906                 clean_count += ring->pending_buf;
2907                 ring->skb = NULL;
2908                 ring->pending_buf = 0;
2909
2910                 recv_pkts++;
2911         }
2912
2913 out:
2914         /* Make all data has been write before submit */
2915         if (clean_count + unused_count > 0)
2916                 hns3_nic_alloc_rx_buffers(ring,
2917                                           clean_count + unused_count);
2918
2919         return recv_pkts;
2920 }
2921
2922 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2923 {
2924         struct hns3_enet_tqp_vector *tqp_vector =
2925                                         ring_group->ring->tqp_vector;
2926         enum hns3_flow_level_range new_flow_level;
2927         int packets_per_msecs;
2928         int bytes_per_msecs;
2929         u32 time_passed_ms;
2930         u16 new_int_gl;
2931
2932         if (!tqp_vector->last_jiffies)
2933                 return false;
2934
2935         if (ring_group->total_packets == 0) {
2936                 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2937                 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2938                 return true;
2939         }
2940
2941         /* Simple throttlerate management
2942          * 0-10MB/s   lower     (50000 ints/s)
2943          * 10-20MB/s   middle    (20000 ints/s)
2944          * 20-1249MB/s high      (18000 ints/s)
2945          * > 40000pps  ultra     (8000 ints/s)
2946          */
2947         new_flow_level = ring_group->coal.flow_level;
2948         new_int_gl = ring_group->coal.int_gl;
2949         time_passed_ms =
2950                 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2951
2952         if (!time_passed_ms)
2953                 return false;
2954
2955         do_div(ring_group->total_packets, time_passed_ms);
2956         packets_per_msecs = ring_group->total_packets;
2957
2958         do_div(ring_group->total_bytes, time_passed_ms);
2959         bytes_per_msecs = ring_group->total_bytes;
2960
2961 #define HNS3_RX_LOW_BYTE_RATE 10000
2962 #define HNS3_RX_MID_BYTE_RATE 20000
2963
2964         switch (new_flow_level) {
2965         case HNS3_FLOW_LOW:
2966                 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2967                         new_flow_level = HNS3_FLOW_MID;
2968                 break;
2969         case HNS3_FLOW_MID:
2970                 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2971                         new_flow_level = HNS3_FLOW_HIGH;
2972                 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2973                         new_flow_level = HNS3_FLOW_LOW;
2974                 break;
2975         case HNS3_FLOW_HIGH:
2976         case HNS3_FLOW_ULTRA:
2977         default:
2978                 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2979                         new_flow_level = HNS3_FLOW_MID;
2980                 break;
2981         }
2982
2983 #define HNS3_RX_ULTRA_PACKET_RATE 40
2984
2985         if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2986             &tqp_vector->rx_group == ring_group)
2987                 new_flow_level = HNS3_FLOW_ULTRA;
2988
2989         switch (new_flow_level) {
2990         case HNS3_FLOW_LOW:
2991                 new_int_gl = HNS3_INT_GL_50K;
2992                 break;
2993         case HNS3_FLOW_MID:
2994                 new_int_gl = HNS3_INT_GL_20K;
2995                 break;
2996         case HNS3_FLOW_HIGH:
2997                 new_int_gl = HNS3_INT_GL_18K;
2998                 break;
2999         case HNS3_FLOW_ULTRA:
3000                 new_int_gl = HNS3_INT_GL_8K;
3001                 break;
3002         default:
3003                 break;
3004         }
3005
3006         ring_group->total_bytes = 0;
3007         ring_group->total_packets = 0;
3008         ring_group->coal.flow_level = new_flow_level;
3009         if (new_int_gl != ring_group->coal.int_gl) {
3010                 ring_group->coal.int_gl = new_int_gl;
3011                 return true;
3012         }
3013         return false;
3014 }
3015
3016 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
3017 {
3018         struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
3019         struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
3020         bool rx_update, tx_update;
3021
3022         /* update param every 1000ms */
3023         if (time_before(jiffies,
3024                         tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
3025                 return;
3026
3027         if (rx_group->coal.gl_adapt_enable) {
3028                 rx_update = hns3_get_new_int_gl(rx_group);
3029                 if (rx_update)
3030                         hns3_set_vector_coalesce_rx_gl(tqp_vector,
3031                                                        rx_group->coal.int_gl);
3032         }
3033
3034         if (tx_group->coal.gl_adapt_enable) {
3035                 tx_update = hns3_get_new_int_gl(tx_group);
3036                 if (tx_update)
3037                         hns3_set_vector_coalesce_tx_gl(tqp_vector,
3038                                                        tx_group->coal.int_gl);
3039         }
3040
3041         tqp_vector->last_jiffies = jiffies;
3042 }
3043
3044 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
3045 {
3046         struct hns3_nic_priv *priv = netdev_priv(napi->dev);
3047         struct hns3_enet_ring *ring;
3048         int rx_pkt_total = 0;
3049
3050         struct hns3_enet_tqp_vector *tqp_vector =
3051                 container_of(napi, struct hns3_enet_tqp_vector, napi);
3052         bool clean_complete = true;
3053         int rx_budget = budget;
3054
3055         if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3056                 napi_complete(napi);
3057                 return 0;
3058         }
3059
3060         /* Since the actual Tx work is minimal, we can give the Tx a larger
3061          * budget and be more aggressive about cleaning up the Tx descriptors.
3062          */
3063         hns3_for_each_ring(ring, tqp_vector->tx_group)
3064                 hns3_clean_tx_ring(ring);
3065
3066         /* make sure rx ring budget not smaller than 1 */
3067         if (tqp_vector->num_tqps > 1)
3068                 rx_budget = max(budget / tqp_vector->num_tqps, 1);
3069
3070         hns3_for_each_ring(ring, tqp_vector->rx_group) {
3071                 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
3072                                                     hns3_rx_skb);
3073
3074                 if (rx_cleaned >= rx_budget)
3075                         clean_complete = false;
3076
3077                 rx_pkt_total += rx_cleaned;
3078         }
3079
3080         tqp_vector->rx_group.total_packets += rx_pkt_total;
3081
3082         if (!clean_complete)
3083                 return budget;
3084
3085         if (napi_complete(napi) &&
3086             likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
3087                 hns3_update_new_int_gl(tqp_vector);
3088                 hns3_mask_vector_irq(tqp_vector, 1);
3089         }
3090
3091         return rx_pkt_total;
3092 }
3093
3094 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3095                                       struct hnae3_ring_chain_node *head)
3096 {
3097         struct pci_dev *pdev = tqp_vector->handle->pdev;
3098         struct hnae3_ring_chain_node *cur_chain = head;
3099         struct hnae3_ring_chain_node *chain;
3100         struct hns3_enet_ring *tx_ring;
3101         struct hns3_enet_ring *rx_ring;
3102
3103         tx_ring = tqp_vector->tx_group.ring;
3104         if (tx_ring) {
3105                 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
3106                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3107                               HNAE3_RING_TYPE_TX);
3108                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3109                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
3110
3111                 cur_chain->next = NULL;
3112
3113                 while (tx_ring->next) {
3114                         tx_ring = tx_ring->next;
3115
3116                         chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
3117                                              GFP_KERNEL);
3118                         if (!chain)
3119                                 goto err_free_chain;
3120
3121                         cur_chain->next = chain;
3122                         chain->tqp_index = tx_ring->tqp->tqp_index;
3123                         hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3124                                       HNAE3_RING_TYPE_TX);
3125                         hnae3_set_field(chain->int_gl_idx,
3126                                         HNAE3_RING_GL_IDX_M,
3127                                         HNAE3_RING_GL_IDX_S,
3128                                         HNAE3_RING_GL_TX);
3129
3130                         cur_chain = chain;
3131                 }
3132         }
3133
3134         rx_ring = tqp_vector->rx_group.ring;
3135         if (!tx_ring && rx_ring) {
3136                 cur_chain->next = NULL;
3137                 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
3138                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
3139                               HNAE3_RING_TYPE_RX);
3140                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3141                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3142
3143                 rx_ring = rx_ring->next;
3144         }
3145
3146         while (rx_ring) {
3147                 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
3148                 if (!chain)
3149                         goto err_free_chain;
3150
3151                 cur_chain->next = chain;
3152                 chain->tqp_index = rx_ring->tqp->tqp_index;
3153                 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
3154                               HNAE3_RING_TYPE_RX);
3155                 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
3156                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
3157
3158                 cur_chain = chain;
3159
3160                 rx_ring = rx_ring->next;
3161         }
3162
3163         return 0;
3164
3165 err_free_chain:
3166         cur_chain = head->next;
3167         while (cur_chain) {
3168                 chain = cur_chain->next;
3169                 devm_kfree(&pdev->dev, cur_chain);
3170                 cur_chain = chain;
3171         }
3172         head->next = NULL;
3173
3174         return -ENOMEM;
3175 }
3176
3177 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
3178                                         struct hnae3_ring_chain_node *head)
3179 {
3180         struct pci_dev *pdev = tqp_vector->handle->pdev;
3181         struct hnae3_ring_chain_node *chain_tmp, *chain;
3182
3183         chain = head->next;
3184
3185         while (chain) {
3186                 chain_tmp = chain->next;
3187                 devm_kfree(&pdev->dev, chain);
3188                 chain = chain_tmp;
3189         }
3190 }
3191
3192 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
3193                                    struct hns3_enet_ring *ring)
3194 {
3195         ring->next = group->ring;
3196         group->ring = ring;
3197
3198         group->count++;
3199 }
3200
3201 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
3202 {
3203         struct pci_dev *pdev = priv->ae_handle->pdev;
3204         struct hns3_enet_tqp_vector *tqp_vector;
3205         int num_vectors = priv->vector_num;
3206         int numa_node;
3207         int vector_i;
3208
3209         numa_node = dev_to_node(&pdev->dev);
3210
3211         for (vector_i = 0; vector_i < num_vectors; vector_i++) {
3212                 tqp_vector = &priv->tqp_vector[vector_i];
3213                 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
3214                                 &tqp_vector->affinity_mask);
3215         }
3216 }
3217
3218 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
3219 {
3220         struct hnae3_ring_chain_node vector_ring_chain;
3221         struct hnae3_handle *h = priv->ae_handle;
3222         struct hns3_enet_tqp_vector *tqp_vector;
3223         int ret = 0;
3224         int i;
3225
3226         hns3_nic_set_cpumask(priv);
3227
3228         for (i = 0; i < priv->vector_num; i++) {
3229                 tqp_vector = &priv->tqp_vector[i];
3230                 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
3231                 tqp_vector->num_tqps = 0;
3232         }
3233
3234         for (i = 0; i < h->kinfo.num_tqps; i++) {
3235                 u16 vector_i = i % priv->vector_num;
3236                 u16 tqp_num = h->kinfo.num_tqps;
3237
3238                 tqp_vector = &priv->tqp_vector[vector_i];
3239
3240                 hns3_add_ring_to_group(&tqp_vector->tx_group,
3241                                        priv->ring_data[i].ring);
3242
3243                 hns3_add_ring_to_group(&tqp_vector->rx_group,
3244                                        priv->ring_data[i + tqp_num].ring);
3245
3246                 priv->ring_data[i].ring->tqp_vector = tqp_vector;
3247                 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
3248                 tqp_vector->num_tqps++;
3249         }
3250
3251         for (i = 0; i < priv->vector_num; i++) {
3252                 tqp_vector = &priv->tqp_vector[i];
3253
3254                 tqp_vector->rx_group.total_bytes = 0;
3255                 tqp_vector->rx_group.total_packets = 0;
3256                 tqp_vector->tx_group.total_bytes = 0;
3257                 tqp_vector->tx_group.total_packets = 0;
3258                 tqp_vector->handle = h;
3259
3260                 ret = hns3_get_vector_ring_chain(tqp_vector,
3261                                                  &vector_ring_chain);
3262                 if (ret)
3263                         goto map_ring_fail;
3264
3265                 ret = h->ae_algo->ops->map_ring_to_vector(h,
3266                         tqp_vector->vector_irq, &vector_ring_chain);
3267
3268                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3269
3270                 if (ret)
3271                         goto map_ring_fail;
3272
3273                 netif_napi_add(priv->netdev, &tqp_vector->napi,
3274                                hns3_nic_common_poll, NAPI_POLL_WEIGHT);
3275         }
3276
3277         return 0;
3278
3279 map_ring_fail:
3280         while (i--)
3281                 netif_napi_del(&priv->tqp_vector[i].napi);
3282
3283         return ret;
3284 }
3285
3286 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
3287 {
3288 #define HNS3_VECTOR_PF_MAX_NUM          64
3289
3290         struct hnae3_handle *h = priv->ae_handle;
3291         struct hns3_enet_tqp_vector *tqp_vector;
3292         struct hnae3_vector_info *vector;
3293         struct pci_dev *pdev = h->pdev;
3294         u16 tqp_num = h->kinfo.num_tqps;
3295         u16 vector_num;
3296         int ret = 0;
3297         u16 i;
3298
3299         /* RSS size, cpu online and vector_num should be the same */
3300         /* Should consider 2p/4p later */
3301         vector_num = min_t(u16, num_online_cpus(), tqp_num);
3302         vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
3303
3304         vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
3305                               GFP_KERNEL);
3306         if (!vector)
3307                 return -ENOMEM;
3308
3309         vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
3310
3311         priv->vector_num = vector_num;
3312         priv->tqp_vector = (struct hns3_enet_tqp_vector *)
3313                 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
3314                              GFP_KERNEL);
3315         if (!priv->tqp_vector) {
3316                 ret = -ENOMEM;
3317                 goto out;
3318         }
3319
3320         for (i = 0; i < priv->vector_num; i++) {
3321                 tqp_vector = &priv->tqp_vector[i];
3322                 tqp_vector->idx = i;
3323                 tqp_vector->mask_addr = vector[i].io_addr;
3324                 tqp_vector->vector_irq = vector[i].vector;
3325                 hns3_vector_gl_rl_init(tqp_vector, priv);
3326         }
3327
3328 out:
3329         devm_kfree(&pdev->dev, vector);
3330         return ret;
3331 }
3332
3333 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
3334 {
3335         group->ring = NULL;
3336         group->count = 0;
3337 }
3338
3339 static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
3340 {
3341         struct hnae3_ring_chain_node vector_ring_chain;
3342         struct hnae3_handle *h = priv->ae_handle;
3343         struct hns3_enet_tqp_vector *tqp_vector;
3344         int i;
3345
3346         for (i = 0; i < priv->vector_num; i++) {
3347                 tqp_vector = &priv->tqp_vector[i];
3348
3349                 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
3350                         continue;
3351
3352                 hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain);
3353
3354                 h->ae_algo->ops->unmap_ring_from_vector(h,
3355                         tqp_vector->vector_irq, &vector_ring_chain);
3356
3357                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
3358
3359                 if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
3360                         irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
3361                         free_irq(tqp_vector->vector_irq, tqp_vector);
3362                         tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
3363                 }
3364
3365                 hns3_clear_ring_group(&tqp_vector->rx_group);
3366                 hns3_clear_ring_group(&tqp_vector->tx_group);
3367                 netif_napi_del(&priv->tqp_vector[i].napi);
3368         }
3369 }
3370
3371 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
3372 {
3373         struct hnae3_handle *h = priv->ae_handle;
3374         struct pci_dev *pdev = h->pdev;
3375         int i, ret;
3376
3377         for (i = 0; i < priv->vector_num; i++) {
3378                 struct hns3_enet_tqp_vector *tqp_vector;
3379
3380                 tqp_vector = &priv->tqp_vector[i];
3381                 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
3382                 if (ret)
3383                         return ret;
3384         }
3385
3386         devm_kfree(&pdev->dev, priv->tqp_vector);
3387         return 0;
3388 }
3389
3390 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
3391                              int ring_type)
3392 {
3393         struct hns3_nic_ring_data *ring_data = priv->ring_data;
3394         int queue_num = priv->ae_handle->kinfo.num_tqps;
3395         struct pci_dev *pdev = priv->ae_handle->pdev;
3396         struct hns3_enet_ring *ring;
3397         int desc_num;
3398
3399         ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3400         if (!ring)
3401                 return -ENOMEM;
3402
3403         if (ring_type == HNAE3_RING_TYPE_TX) {
3404                 desc_num = priv->ae_handle->kinfo.num_tx_desc;
3405                 ring_data[q->tqp_index].ring = ring;
3406                 ring_data[q->tqp_index].queue_index = q->tqp_index;
3407                 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3408         } else {
3409                 desc_num = priv->ae_handle->kinfo.num_rx_desc;
3410                 ring_data[q->tqp_index + queue_num].ring = ring;
3411                 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
3412                 ring->io_base = q->io_base;
3413         }
3414
3415         hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3416
3417         ring->tqp = q;
3418         ring->desc = NULL;
3419         ring->desc_cb = NULL;
3420         ring->dev = priv->dev;
3421         ring->desc_dma_addr = 0;
3422         ring->buf_size = q->buf_size;
3423         ring->desc_num = desc_num;
3424         ring->next_to_use = 0;
3425         ring->next_to_clean = 0;
3426
3427         return 0;
3428 }
3429
3430 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3431                               struct hns3_nic_priv *priv)
3432 {
3433         int ret;
3434
3435         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3436         if (ret)
3437                 return ret;
3438
3439         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3440         if (ret) {
3441                 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3442                 return ret;
3443         }
3444
3445         return 0;
3446 }
3447
3448 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3449 {
3450         struct hnae3_handle *h = priv->ae_handle;
3451         struct pci_dev *pdev = h->pdev;
3452         int i, ret;
3453
3454         priv->ring_data =  devm_kzalloc(&pdev->dev,
3455                                         array3_size(h->kinfo.num_tqps,
3456                                                     sizeof(*priv->ring_data),
3457                                                     2),
3458                                         GFP_KERNEL);
3459         if (!priv->ring_data)
3460                 return -ENOMEM;
3461
3462         for (i = 0; i < h->kinfo.num_tqps; i++) {
3463                 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3464                 if (ret)
3465                         goto err;
3466         }
3467
3468         return 0;
3469 err:
3470         while (i--) {
3471                 devm_kfree(priv->dev, priv->ring_data[i].ring);
3472                 devm_kfree(priv->dev,
3473                            priv->ring_data[i + h->kinfo.num_tqps].ring);
3474         }
3475
3476         devm_kfree(&pdev->dev, priv->ring_data);
3477         priv->ring_data = NULL;
3478         return ret;
3479 }
3480
3481 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3482 {
3483         struct hnae3_handle *h = priv->ae_handle;
3484         int i;
3485
3486         if (!priv->ring_data)
3487                 return;
3488
3489         for (i = 0; i < h->kinfo.num_tqps; i++) {
3490                 devm_kfree(priv->dev, priv->ring_data[i].ring);
3491                 devm_kfree(priv->dev,
3492                            priv->ring_data[i + h->kinfo.num_tqps].ring);
3493         }
3494         devm_kfree(priv->dev, priv->ring_data);
3495         priv->ring_data = NULL;
3496 }
3497
3498 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3499 {
3500         int ret;
3501
3502         if (ring->desc_num <= 0 || ring->buf_size <= 0)
3503                 return -EINVAL;
3504
3505         ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
3506                                      sizeof(ring->desc_cb[0]), GFP_KERNEL);
3507         if (!ring->desc_cb) {
3508                 ret = -ENOMEM;
3509                 goto out;
3510         }
3511
3512         ret = hns3_alloc_desc(ring);
3513         if (ret)
3514                 goto out_with_desc_cb;
3515
3516         if (!HNAE3_IS_TX_RING(ring)) {
3517                 ret = hns3_alloc_ring_buffers(ring);
3518                 if (ret)
3519                         goto out_with_desc;
3520         }
3521
3522         return 0;
3523
3524 out_with_desc:
3525         hns3_free_desc(ring);
3526 out_with_desc_cb:
3527         devm_kfree(ring_to_dev(ring), ring->desc_cb);
3528         ring->desc_cb = NULL;
3529 out:
3530         return ret;
3531 }
3532
3533 static void hns3_fini_ring(struct hns3_enet_ring *ring)
3534 {
3535         hns3_free_desc(ring);
3536         devm_kfree(ring_to_dev(ring), ring->desc_cb);
3537         ring->desc_cb = NULL;
3538         ring->next_to_clean = 0;
3539         ring->next_to_use = 0;
3540         ring->pending_buf = 0;
3541         if (ring->skb) {
3542                 dev_kfree_skb_any(ring->skb);
3543                 ring->skb = NULL;
3544         }
3545 }
3546
3547 static int hns3_buf_size2type(u32 buf_size)
3548 {
3549         int bd_size_type;
3550
3551         switch (buf_size) {
3552         case 512:
3553                 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3554                 break;
3555         case 1024:
3556                 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3557                 break;
3558         case 2048:
3559                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3560                 break;
3561         case 4096:
3562                 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3563                 break;
3564         default:
3565                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3566         }
3567
3568         return bd_size_type;
3569 }
3570
3571 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3572 {
3573         dma_addr_t dma = ring->desc_dma_addr;
3574         struct hnae3_queue *q = ring->tqp;
3575
3576         if (!HNAE3_IS_TX_RING(ring)) {
3577                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3578                                (u32)dma);
3579                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3580                                (u32)((dma >> 31) >> 1));
3581
3582                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3583                                hns3_buf_size2type(ring->buf_size));
3584                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3585                                ring->desc_num / 8 - 1);
3586
3587         } else {
3588                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3589                                (u32)dma);
3590                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3591                                (u32)((dma >> 31) >> 1));
3592
3593                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3594                                ring->desc_num / 8 - 1);
3595         }
3596 }
3597
3598 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3599 {
3600         struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3601         int i;
3602
3603         for (i = 0; i < HNAE3_MAX_TC; i++) {
3604                 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3605                 int j;
3606
3607                 if (!tc_info->enable)
3608                         continue;
3609
3610                 for (j = 0; j < tc_info->tqp_count; j++) {
3611                         struct hnae3_queue *q;
3612
3613                         q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3614                         hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3615                                        tc_info->tc);
3616                 }
3617         }
3618 }
3619
3620 int hns3_init_all_ring(struct hns3_nic_priv *priv)
3621 {
3622         struct hnae3_handle *h = priv->ae_handle;
3623         int ring_num = h->kinfo.num_tqps * 2;
3624         int i, j;
3625         int ret;
3626
3627         for (i = 0; i < ring_num; i++) {
3628                 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3629                 if (ret) {
3630                         dev_err(priv->dev,
3631                                 "Alloc ring memory fail! ret=%d\n", ret);
3632                         goto out_when_alloc_ring_memory;
3633                 }
3634
3635                 u64_stats_init(&priv->ring_data[i].ring->syncp);
3636         }
3637
3638         return 0;
3639
3640 out_when_alloc_ring_memory:
3641         for (j = i - 1; j >= 0; j--)
3642                 hns3_fini_ring(priv->ring_data[j].ring);
3643
3644         return -ENOMEM;
3645 }
3646
3647 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3648 {
3649         struct hnae3_handle *h = priv->ae_handle;
3650         int i;
3651
3652         for (i = 0; i < h->kinfo.num_tqps; i++) {
3653                 hns3_fini_ring(priv->ring_data[i].ring);
3654                 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3655         }
3656         return 0;
3657 }
3658
3659 /* Set mac addr if it is configured. or leave it to the AE driver */
3660 static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3661 {
3662         struct hns3_nic_priv *priv = netdev_priv(netdev);
3663         struct hnae3_handle *h = priv->ae_handle;
3664         u8 mac_addr_temp[ETH_ALEN];
3665         int ret = 0;
3666
3667         if (h->ae_algo->ops->get_mac_addr && init) {
3668                 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3669                 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3670         }
3671
3672         /* Check if the MAC address is valid, if not get a random one */
3673         if (!is_valid_ether_addr(netdev->dev_addr)) {
3674                 eth_hw_addr_random(netdev);
3675                 dev_warn(priv->dev, "using random MAC address %pM\n",
3676                          netdev->dev_addr);
3677         }
3678
3679         if (h->ae_algo->ops->set_mac_addr)
3680                 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3681
3682         return ret;
3683 }
3684
3685 static int hns3_init_phy(struct net_device *netdev)
3686 {
3687         struct hnae3_handle *h = hns3_get_handle(netdev);
3688         int ret = 0;
3689
3690         if (h->ae_algo->ops->mac_connect_phy)
3691                 ret = h->ae_algo->ops->mac_connect_phy(h);
3692
3693         return ret;
3694 }
3695
3696 static void hns3_uninit_phy(struct net_device *netdev)
3697 {
3698         struct hnae3_handle *h = hns3_get_handle(netdev);
3699
3700         if (h->ae_algo->ops->mac_disconnect_phy)
3701                 h->ae_algo->ops->mac_disconnect_phy(h);
3702 }
3703
3704 static int hns3_restore_fd_rules(struct net_device *netdev)
3705 {
3706         struct hnae3_handle *h = hns3_get_handle(netdev);
3707         int ret = 0;
3708
3709         if (h->ae_algo->ops->restore_fd_rules)
3710                 ret = h->ae_algo->ops->restore_fd_rules(h);
3711
3712         return ret;
3713 }
3714
3715 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3716 {
3717         struct hnae3_handle *h = hns3_get_handle(netdev);
3718
3719         if (h->ae_algo->ops->del_all_fd_entries)
3720                 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3721 }
3722
3723 static int hns3_client_start(struct hnae3_handle *handle)
3724 {
3725         if (!handle->ae_algo->ops->client_start)
3726                 return 0;
3727
3728         return handle->ae_algo->ops->client_start(handle);
3729 }
3730
3731 static void hns3_client_stop(struct hnae3_handle *handle)
3732 {
3733         if (!handle->ae_algo->ops->client_stop)
3734                 return;
3735
3736         handle->ae_algo->ops->client_stop(handle);
3737 }
3738
3739 static void hns3_info_show(struct hns3_nic_priv *priv)
3740 {
3741         struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3742
3743         dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
3744         dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
3745         dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
3746         dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
3747         dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
3748         dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
3749         dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
3750         dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
3751         dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
3752 }
3753
3754 static int hns3_client_init(struct hnae3_handle *handle)
3755 {
3756         struct pci_dev *pdev = handle->pdev;
3757         u16 alloc_tqps, max_rss_size;
3758         struct hns3_nic_priv *priv;
3759         struct net_device *netdev;
3760         int ret;
3761
3762         handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3763                                                     &max_rss_size);
3764         netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3765         if (!netdev)
3766                 return -ENOMEM;
3767
3768         priv = netdev_priv(netdev);
3769         priv->dev = &pdev->dev;
3770         priv->netdev = netdev;
3771         priv->ae_handle = handle;
3772         priv->tx_timeout_count = 0;
3773         set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
3774
3775         handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
3776
3777         handle->kinfo.netdev = netdev;
3778         handle->priv = (void *)priv;
3779
3780         hns3_init_mac_addr(netdev, true);
3781
3782         hns3_set_default_feature(netdev);
3783
3784         netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3785         netdev->priv_flags |= IFF_UNICAST_FLT;
3786         netdev->netdev_ops = &hns3_nic_netdev_ops;
3787         SET_NETDEV_DEV(netdev, &pdev->dev);
3788         hns3_ethtool_set_ops(netdev);
3789
3790         /* Carrier off reporting is important to ethtool even BEFORE open */
3791         netif_carrier_off(netdev);
3792
3793         ret = hns3_get_ring_config(priv);
3794         if (ret) {
3795                 ret = -ENOMEM;
3796                 goto out_get_ring_cfg;
3797         }
3798
3799         ret = hns3_nic_alloc_vector_data(priv);
3800         if (ret) {
3801                 ret = -ENOMEM;
3802                 goto out_alloc_vector_data;
3803         }
3804
3805         ret = hns3_nic_init_vector_data(priv);
3806         if (ret) {
3807                 ret = -ENOMEM;
3808                 goto out_init_vector_data;
3809         }
3810
3811         ret = hns3_init_all_ring(priv);
3812         if (ret) {
3813                 ret = -ENOMEM;
3814                 goto out_init_ring_data;
3815         }
3816
3817         ret = hns3_init_phy(netdev);
3818         if (ret)
3819                 goto out_init_phy;
3820
3821         ret = register_netdev(netdev);
3822         if (ret) {
3823                 dev_err(priv->dev, "probe register netdev fail!\n");
3824                 goto out_reg_netdev_fail;
3825         }
3826
3827         ret = hns3_client_start(handle);
3828         if (ret) {
3829                 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
3830                         goto out_client_start;
3831         }
3832
3833         hns3_dcbnl_setup(handle);
3834
3835         hns3_dbg_init(handle);
3836
3837         /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
3838         netdev->max_mtu = HNS3_MAX_MTU;
3839
3840         set_bit(HNS3_NIC_STATE_INITED, &priv->state);
3841
3842         if (netif_msg_drv(handle))
3843                 hns3_info_show(priv);
3844
3845         return ret;
3846
3847 out_client_start:
3848         unregister_netdev(netdev);
3849 out_reg_netdev_fail:
3850         hns3_uninit_phy(netdev);
3851 out_init_phy:
3852         hns3_uninit_all_ring(priv);
3853 out_init_ring_data:
3854         hns3_nic_uninit_vector_data(priv);
3855 out_init_vector_data:
3856         hns3_nic_dealloc_vector_data(priv);
3857 out_alloc_vector_data:
3858         priv->ring_data = NULL;
3859 out_get_ring_cfg:
3860         priv->ae_handle = NULL;
3861         free_netdev(netdev);
3862         return ret;
3863 }
3864
3865 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3866 {
3867         struct net_device *netdev = handle->kinfo.netdev;
3868         struct hns3_nic_priv *priv = netdev_priv(netdev);
3869         int ret;
3870
3871         hns3_remove_hw_addr(netdev);
3872
3873         if (netdev->reg_state != NETREG_UNINITIALIZED)
3874                 unregister_netdev(netdev);
3875
3876         hns3_client_stop(handle);
3877
3878         hns3_uninit_phy(netdev);
3879
3880         if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
3881                 netdev_warn(netdev, "already uninitialized\n");
3882                 goto out_netdev_free;
3883         }
3884
3885         hns3_del_all_fd_rules(netdev, true);
3886
3887         hns3_force_clear_all_rx_ring(handle);
3888
3889         hns3_nic_uninit_vector_data(priv);
3890
3891         ret = hns3_nic_dealloc_vector_data(priv);
3892         if (ret)
3893                 netdev_err(netdev, "dealloc vector error\n");
3894
3895         ret = hns3_uninit_all_ring(priv);
3896         if (ret)
3897                 netdev_err(netdev, "uninit ring error\n");
3898
3899         hns3_put_ring_config(priv);
3900
3901         hns3_dbg_uninit(handle);
3902
3903 out_netdev_free:
3904         free_netdev(netdev);
3905 }
3906
3907 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3908 {
3909         struct net_device *netdev = handle->kinfo.netdev;
3910
3911         if (!netdev)
3912                 return;
3913
3914         if (linkup) {
3915                 netif_carrier_on(netdev);
3916                 netif_tx_wake_all_queues(netdev);
3917                 if (netif_msg_link(handle))
3918                         netdev_info(netdev, "link up\n");
3919         } else {
3920                 netif_carrier_off(netdev);
3921                 netif_tx_stop_all_queues(netdev);
3922                 if (netif_msg_link(handle))
3923                         netdev_info(netdev, "link down\n");
3924         }
3925 }
3926
3927 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3928 {
3929         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3930         struct net_device *ndev = kinfo->netdev;
3931
3932         if (tc > HNAE3_MAX_TC)
3933                 return -EINVAL;
3934
3935         if (!ndev)
3936                 return -ENODEV;
3937
3938         return hns3_nic_set_real_num_queue(ndev);
3939 }
3940
3941 static int hns3_recover_hw_addr(struct net_device *ndev)
3942 {
3943         struct netdev_hw_addr_list *list;
3944         struct netdev_hw_addr *ha, *tmp;
3945         int ret = 0;
3946
3947         netif_addr_lock_bh(ndev);
3948         /* go through and sync uc_addr entries to the device */
3949         list = &ndev->uc;
3950         list_for_each_entry_safe(ha, tmp, &list->list, list) {
3951                 ret = hns3_nic_uc_sync(ndev, ha->addr);
3952                 if (ret)
3953                         goto out;
3954         }
3955
3956         /* go through and sync mc_addr entries to the device */
3957         list = &ndev->mc;
3958         list_for_each_entry_safe(ha, tmp, &list->list, list) {
3959                 ret = hns3_nic_mc_sync(ndev, ha->addr);
3960                 if (ret)
3961                         goto out;
3962         }
3963
3964 out:
3965         netif_addr_unlock_bh(ndev);
3966         return ret;
3967 }
3968
3969 static void hns3_remove_hw_addr(struct net_device *netdev)
3970 {
3971         struct netdev_hw_addr_list *list;
3972         struct netdev_hw_addr *ha, *tmp;
3973
3974         hns3_nic_uc_unsync(netdev, netdev->dev_addr);
3975
3976         netif_addr_lock_bh(netdev);
3977         /* go through and unsync uc_addr entries to the device */
3978         list = &netdev->uc;
3979         list_for_each_entry_safe(ha, tmp, &list->list, list)
3980                 hns3_nic_uc_unsync(netdev, ha->addr);
3981
3982         /* go through and unsync mc_addr entries to the device */
3983         list = &netdev->mc;
3984         list_for_each_entry_safe(ha, tmp, &list->list, list)
3985                 if (ha->refcount > 1)
3986                         hns3_nic_mc_unsync(netdev, ha->addr);
3987
3988         netif_addr_unlock_bh(netdev);
3989 }
3990
3991 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3992 {
3993         while (ring->next_to_clean != ring->next_to_use) {
3994                 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3995                 hns3_free_buffer_detach(ring, ring->next_to_clean);
3996                 ring_ptr_move_fw(ring, next_to_clean);
3997         }
3998 }
3999
4000 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
4001 {
4002         struct hns3_desc_cb res_cbs;
4003         int ret;
4004
4005         while (ring->next_to_use != ring->next_to_clean) {
4006                 /* When a buffer is not reused, it's memory has been
4007                  * freed in hns3_handle_rx_bd or will be freed by
4008                  * stack, so we need to replace the buffer here.
4009                  */
4010                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4011                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
4012                         if (ret) {
4013                                 u64_stats_update_begin(&ring->syncp);
4014                                 ring->stats.sw_err_cnt++;
4015                                 u64_stats_update_end(&ring->syncp);
4016                                 /* if alloc new buffer fail, exit directly
4017                                  * and reclear in up flow.
4018                                  */
4019                                 netdev_warn(ring->tqp->handle->kinfo.netdev,
4020                                             "reserve buffer map failed, ret = %d\n",
4021                                             ret);
4022                                 return ret;
4023                         }
4024                         hns3_replace_buffer(ring, ring->next_to_use,
4025                                             &res_cbs);
4026                 }
4027                 ring_ptr_move_fw(ring, next_to_use);
4028         }
4029
4030         /* Free the pending skb in rx ring */
4031         if (ring->skb) {
4032                 dev_kfree_skb_any(ring->skb);
4033                 ring->skb = NULL;
4034                 ring->pending_buf = 0;
4035         }
4036
4037         return 0;
4038 }
4039
4040 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
4041 {
4042         while (ring->next_to_use != ring->next_to_clean) {
4043                 /* When a buffer is not reused, it's memory has been
4044                  * freed in hns3_handle_rx_bd or will be freed by
4045                  * stack, so only need to unmap the buffer here.
4046                  */
4047                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
4048                         hns3_unmap_buffer(ring,
4049                                           &ring->desc_cb[ring->next_to_use]);
4050                         ring->desc_cb[ring->next_to_use].dma = 0;
4051                 }
4052
4053                 ring_ptr_move_fw(ring, next_to_use);
4054         }
4055 }
4056
4057 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
4058 {
4059         struct net_device *ndev = h->kinfo.netdev;
4060         struct hns3_nic_priv *priv = netdev_priv(ndev);
4061         struct hns3_enet_ring *ring;
4062         u32 i;
4063
4064         for (i = 0; i < h->kinfo.num_tqps; i++) {
4065                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4066                 hns3_force_clear_rx_ring(ring);
4067         }
4068 }
4069
4070 static void hns3_clear_all_ring(struct hnae3_handle *h)
4071 {
4072         struct net_device *ndev = h->kinfo.netdev;
4073         struct hns3_nic_priv *priv = netdev_priv(ndev);
4074         u32 i;
4075
4076         for (i = 0; i < h->kinfo.num_tqps; i++) {
4077                 struct netdev_queue *dev_queue;
4078                 struct hns3_enet_ring *ring;
4079
4080                 ring = priv->ring_data[i].ring;
4081                 hns3_clear_tx_ring(ring);
4082                 dev_queue = netdev_get_tx_queue(ndev,
4083                                                 priv->ring_data[i].queue_index);
4084                 netdev_tx_reset_queue(dev_queue);
4085
4086                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4087                 /* Continue to clear other rings even if clearing some
4088                  * rings failed.
4089                  */
4090                 hns3_clear_rx_ring(ring);
4091         }
4092 }
4093
4094 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
4095 {
4096         struct net_device *ndev = h->kinfo.netdev;
4097         struct hns3_nic_priv *priv = netdev_priv(ndev);
4098         struct hns3_enet_ring *rx_ring;
4099         int i, j;
4100         int ret;
4101
4102         for (i = 0; i < h->kinfo.num_tqps; i++) {
4103                 ret = h->ae_algo->ops->reset_queue(h, i);
4104                 if (ret)
4105                         return ret;
4106
4107                 hns3_init_ring_hw(priv->ring_data[i].ring);
4108
4109                 /* We need to clear tx ring here because self test will
4110                  * use the ring and will not run down before up
4111                  */
4112                 hns3_clear_tx_ring(priv->ring_data[i].ring);
4113                 priv->ring_data[i].ring->next_to_clean = 0;
4114                 priv->ring_data[i].ring->next_to_use = 0;
4115
4116                 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
4117                 hns3_init_ring_hw(rx_ring);
4118                 ret = hns3_clear_rx_ring(rx_ring);
4119                 if (ret)
4120                         return ret;
4121
4122                 /* We can not know the hardware head and tail when this
4123                  * function is called in reset flow, so we reuse all desc.
4124                  */
4125                 for (j = 0; j < rx_ring->desc_num; j++)
4126                         hns3_reuse_buffer(rx_ring, j);
4127
4128                 rx_ring->next_to_clean = 0;
4129                 rx_ring->next_to_use = 0;
4130         }
4131
4132         hns3_init_tx_ring_tc(priv);
4133
4134         return 0;
4135 }
4136
4137 static void hns3_store_coal(struct hns3_nic_priv *priv)
4138 {
4139         /* ethtool only support setting and querying one coal
4140          * configuation for now, so save the vector 0' coal
4141          * configuation here in order to restore it.
4142          */
4143         memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
4144                sizeof(struct hns3_enet_coalesce));
4145         memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
4146                sizeof(struct hns3_enet_coalesce));
4147 }
4148
4149 static void hns3_restore_coal(struct hns3_nic_priv *priv)
4150 {
4151         u16 vector_num = priv->vector_num;
4152         int i;
4153
4154         for (i = 0; i < vector_num; i++) {
4155                 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
4156                        sizeof(struct hns3_enet_coalesce));
4157                 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
4158                        sizeof(struct hns3_enet_coalesce));
4159         }
4160 }
4161
4162 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
4163 {
4164         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4165         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4166         struct net_device *ndev = kinfo->netdev;
4167         struct hns3_nic_priv *priv = netdev_priv(ndev);
4168
4169         if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
4170                 return 0;
4171
4172         /* it is cumbersome for hardware to pick-and-choose entries for deletion
4173          * from table space. Hence, for function reset software intervention is
4174          * required to delete the entries
4175          */
4176         if (hns3_dev_ongoing_func_reset(ae_dev)) {
4177                 hns3_remove_hw_addr(ndev);
4178                 hns3_del_all_fd_rules(ndev, false);
4179         }
4180
4181         if (!netif_running(ndev))
4182                 return 0;
4183
4184         return hns3_nic_net_stop(ndev);
4185 }
4186
4187 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
4188 {
4189         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
4190         struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
4191         int ret = 0;
4192
4193         clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4194
4195         if (netif_running(kinfo->netdev)) {
4196                 ret = hns3_nic_net_open(kinfo->netdev);
4197                 if (ret) {
4198                         set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
4199                         netdev_err(kinfo->netdev,
4200                                    "hns net up fail, ret=%d!\n", ret);
4201                         return ret;
4202                 }
4203         }
4204
4205         return ret;
4206 }
4207
4208 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
4209 {
4210         struct net_device *netdev = handle->kinfo.netdev;
4211         struct hns3_nic_priv *priv = netdev_priv(netdev);
4212         int ret;
4213
4214         /* Carrier off reporting is important to ethtool even BEFORE open */
4215         netif_carrier_off(netdev);
4216
4217         ret = hns3_get_ring_config(priv);
4218         if (ret)
4219                 return ret;
4220
4221         ret = hns3_nic_alloc_vector_data(priv);
4222         if (ret)
4223                 goto err_put_ring;
4224
4225         hns3_restore_coal(priv);
4226
4227         ret = hns3_nic_init_vector_data(priv);
4228         if (ret)
4229                 goto err_dealloc_vector;
4230
4231         ret = hns3_init_all_ring(priv);
4232         if (ret)
4233                 goto err_uninit_vector;
4234
4235         ret = hns3_client_start(handle);
4236         if (ret) {
4237                 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
4238                 goto err_uninit_ring;
4239         }
4240
4241         set_bit(HNS3_NIC_STATE_INITED, &priv->state);
4242
4243         return ret;
4244
4245 err_uninit_ring:
4246         hns3_uninit_all_ring(priv);
4247 err_uninit_vector:
4248         hns3_nic_uninit_vector_data(priv);
4249 err_dealloc_vector:
4250         hns3_nic_dealloc_vector_data(priv);
4251 err_put_ring:
4252         hns3_put_ring_config(priv);
4253
4254         return ret;
4255 }
4256
4257 static int hns3_reset_notify_restore_enet(struct hnae3_handle *handle)
4258 {
4259         struct net_device *netdev = handle->kinfo.netdev;
4260         bool vlan_filter_enable;
4261         int ret;
4262
4263         ret = hns3_init_mac_addr(netdev, false);
4264         if (ret)
4265                 return ret;
4266
4267         ret = hns3_recover_hw_addr(netdev);
4268         if (ret)
4269                 return ret;
4270
4271         ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
4272         if (ret)
4273                 return ret;
4274
4275         vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
4276         hns3_enable_vlan_filter(netdev, vlan_filter_enable);
4277
4278         if (handle->ae_algo->ops->restore_vlan_table)
4279                 handle->ae_algo->ops->restore_vlan_table(handle);
4280
4281         return hns3_restore_fd_rules(netdev);
4282 }
4283
4284 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
4285 {
4286         struct net_device *netdev = handle->kinfo.netdev;
4287         struct hns3_nic_priv *priv = netdev_priv(netdev);
4288         int ret;
4289
4290         if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
4291                 netdev_warn(netdev, "already uninitialized\n");
4292                 return 0;
4293         }
4294
4295         hns3_force_clear_all_rx_ring(handle);
4296
4297         hns3_nic_uninit_vector_data(priv);
4298
4299         hns3_store_coal(priv);
4300
4301         ret = hns3_nic_dealloc_vector_data(priv);
4302         if (ret)
4303                 netdev_err(netdev, "dealloc vector error\n");
4304
4305         ret = hns3_uninit_all_ring(priv);
4306         if (ret)
4307                 netdev_err(netdev, "uninit ring error\n");
4308
4309         hns3_put_ring_config(priv);
4310
4311         return ret;
4312 }
4313
4314 static int hns3_reset_notify(struct hnae3_handle *handle,
4315                              enum hnae3_reset_notify_type type)
4316 {
4317         int ret = 0;
4318
4319         switch (type) {
4320         case HNAE3_UP_CLIENT:
4321                 ret = hns3_reset_notify_up_enet(handle);
4322                 break;
4323         case HNAE3_DOWN_CLIENT:
4324                 ret = hns3_reset_notify_down_enet(handle);
4325                 break;
4326         case HNAE3_INIT_CLIENT:
4327                 ret = hns3_reset_notify_init_enet(handle);
4328                 break;
4329         case HNAE3_UNINIT_CLIENT:
4330                 ret = hns3_reset_notify_uninit_enet(handle);
4331                 break;
4332         case HNAE3_RESTORE_CLIENT:
4333                 ret = hns3_reset_notify_restore_enet(handle);
4334                 break;
4335         default:
4336                 break;
4337         }
4338
4339         return ret;
4340 }
4341
4342 int hns3_set_channels(struct net_device *netdev,
4343                       struct ethtool_channels *ch)
4344 {
4345         struct hnae3_handle *h = hns3_get_handle(netdev);
4346         struct hnae3_knic_private_info *kinfo = &h->kinfo;
4347         bool rxfh_configured = netif_is_rxfh_configured(netdev);
4348         u32 new_tqp_num = ch->combined_count;
4349         u16 org_tqp_num;
4350         int ret;
4351
4352         if (ch->rx_count || ch->tx_count)
4353                 return -EINVAL;
4354
4355         if (new_tqp_num > hns3_get_max_available_channels(h) ||
4356             new_tqp_num < 1) {
4357                 dev_err(&netdev->dev,
4358                         "Change tqps fail, the tqp range is from 1 to %d",
4359                         hns3_get_max_available_channels(h));
4360                 return -EINVAL;
4361         }
4362
4363         if (kinfo->rss_size == new_tqp_num)
4364                 return 0;
4365
4366         ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
4367         if (ret)
4368                 return ret;
4369
4370         ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
4371         if (ret)
4372                 return ret;
4373
4374         org_tqp_num = h->kinfo.num_tqps;
4375         ret = h->ae_algo->ops->set_channels(h, new_tqp_num, rxfh_configured);
4376         if (ret) {
4377                 ret = h->ae_algo->ops->set_channels(h, org_tqp_num,
4378                                                     rxfh_configured);
4379                 if (ret) {
4380                         /* If revert to old tqp failed, fatal error occurred */
4381                         dev_err(&netdev->dev,
4382                                 "Revert to old tqp num fail, ret=%d", ret);
4383                         return ret;
4384                 }
4385                 dev_info(&netdev->dev,
4386                          "Change tqp num fail, Revert to old tqp num");
4387         }
4388         ret = hns3_reset_notify(h, HNAE3_INIT_CLIENT);
4389         if (ret)
4390                 return ret;
4391
4392         return hns3_reset_notify(h, HNAE3_UP_CLIENT);
4393 }
4394
4395 static const struct hnae3_client_ops client_ops = {
4396         .init_instance = hns3_client_init,
4397         .uninit_instance = hns3_client_uninit,
4398         .link_status_change = hns3_link_status_change,
4399         .setup_tc = hns3_client_setup_tc,
4400         .reset_notify = hns3_reset_notify,
4401 };
4402
4403 /* hns3_init_module - Driver registration routine
4404  * hns3_init_module is the first routine called when the driver is
4405  * loaded. All it does is register with the PCI subsystem.
4406  */
4407 static int __init hns3_init_module(void)
4408 {
4409         int ret;
4410
4411         pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
4412         pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
4413
4414         client.type = HNAE3_CLIENT_KNIC;
4415         snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
4416                  hns3_driver_name);
4417
4418         client.ops = &client_ops;
4419
4420         INIT_LIST_HEAD(&client.node);
4421
4422         hns3_dbg_register_debugfs(hns3_driver_name);
4423
4424         ret = hnae3_register_client(&client);
4425         if (ret)
4426                 goto err_reg_client;
4427
4428         ret = pci_register_driver(&hns3_driver);
4429         if (ret)
4430                 goto err_reg_driver;
4431
4432         return ret;
4433
4434 err_reg_driver:
4435         hnae3_unregister_client(&client);
4436 err_reg_client:
4437         hns3_dbg_unregister_debugfs();
4438         return ret;
4439 }
4440 module_init(hns3_init_module);
4441
4442 /* hns3_exit_module - Driver exit cleanup routine
4443  * hns3_exit_module is called just before the driver is removed
4444  * from memory.
4445  */
4446 static void __exit hns3_exit_module(void)
4447 {
4448         pci_unregister_driver(&hns3_driver);
4449         hnae3_unregister_client(&client);
4450         hns3_dbg_unregister_debugfs();
4451 }
4452 module_exit(hns3_exit_module);
4453
4454 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4455 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4456 MODULE_LICENSE("GPL");
4457 MODULE_ALIAS("pci:hns-nic");
4458 MODULE_VERSION(HNS3_MOD_VERSION);