9bbb53c934471d68061c379b61b9fde14997e0be
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #include <linux/if_vlan.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/skbuff.h>
13 #include <linux/sctp.h>
14 #include <linux/vermagic.h>
15 #include <net/gre.h>
16 #include <net/pkt_cls.h>
17 #include <net/vxlan.h>
18
19 #include "hnae3.h"
20 #include "hns3_enet.h"
21
22 static void hns3_clear_all_ring(struct hnae3_handle *h);
23 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
24 static void hns3_remove_hw_addr(struct net_device *netdev);
25
26 static const char hns3_driver_name[] = "hns3";
27 const char hns3_driver_version[] = VERMAGIC_STRING;
28 static const char hns3_driver_string[] =
29                         "Hisilicon Ethernet Network Driver for Hip08 Family";
30 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
31 static struct hnae3_client client;
32
33 /* hns3_pci_tbl - PCI Device ID Table
34  *
35  * Last entry must be all 0s
36  *
37  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
38  *   Class, Class Mask, private data (not used) }
39  */
40 static const struct pci_device_id hns3_pci_tbl[] = {
41         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
44          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
46          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
48          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
49         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
50          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
51         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
52          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
53         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
55          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
56         /* required last entry */
57         {0, }
58 };
59 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
60
61 static irqreturn_t hns3_irq_handle(int irq, void *vector)
62 {
63         struct hns3_enet_tqp_vector *tqp_vector = vector;
64
65         napi_schedule(&tqp_vector->napi);
66
67         return IRQ_HANDLED;
68 }
69
70 /* This callback function is used to set affinity changes to the irq affinity
71  * masks when the irq_set_affinity_notifier function is used.
72  */
73 static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
74                                          const cpumask_t *mask)
75 {
76         struct hns3_enet_tqp_vector *tqp_vectors =
77                 container_of(notify, struct hns3_enet_tqp_vector,
78                              affinity_notify);
79
80         tqp_vectors->affinity_mask = *mask;
81 }
82
83 static void hns3_nic_irq_affinity_release(struct kref *ref)
84 {
85 }
86
87 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
88 {
89         struct hns3_enet_tqp_vector *tqp_vectors;
90         unsigned int i;
91
92         for (i = 0; i < priv->vector_num; i++) {
93                 tqp_vectors = &priv->tqp_vector[i];
94
95                 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
96                         continue;
97
98                 /* clear the affinity notifier and affinity mask */
99                 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
100                 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
101
102                 /* release the irq resource */
103                 free_irq(tqp_vectors->vector_irq, tqp_vectors);
104                 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
105         }
106 }
107
108 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
109 {
110         struct hns3_enet_tqp_vector *tqp_vectors;
111         int txrx_int_idx = 0;
112         int rx_int_idx = 0;
113         int tx_int_idx = 0;
114         unsigned int i;
115         int ret;
116
117         for (i = 0; i < priv->vector_num; i++) {
118                 tqp_vectors = &priv->tqp_vector[i];
119
120                 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
121                         continue;
122
123                 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
124                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
125                                  "%s-%s-%d", priv->netdev->name, "TxRx",
126                                  txrx_int_idx++);
127                         txrx_int_idx++;
128                 } else if (tqp_vectors->rx_group.ring) {
129                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
130                                  "%s-%s-%d", priv->netdev->name, "Rx",
131                                  rx_int_idx++);
132                 } else if (tqp_vectors->tx_group.ring) {
133                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
134                                  "%s-%s-%d", priv->netdev->name, "Tx",
135                                  tx_int_idx++);
136                 } else {
137                         /* Skip this unused q_vector */
138                         continue;
139                 }
140
141                 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
142
143                 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
144                                   tqp_vectors->name,
145                                        tqp_vectors);
146                 if (ret) {
147                         netdev_err(priv->netdev, "request irq(%d) fail\n",
148                                    tqp_vectors->vector_irq);
149                         return ret;
150                 }
151
152                 tqp_vectors->affinity_notify.notify =
153                                         hns3_nic_irq_affinity_notify;
154                 tqp_vectors->affinity_notify.release =
155                                         hns3_nic_irq_affinity_release;
156                 irq_set_affinity_notifier(tqp_vectors->vector_irq,
157                                           &tqp_vectors->affinity_notify);
158                 irq_set_affinity_hint(tqp_vectors->vector_irq,
159                                       &tqp_vectors->affinity_mask);
160
161                 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
162         }
163
164         return 0;
165 }
166
167 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
168                                  u32 mask_en)
169 {
170         writel(mask_en, tqp_vector->mask_addr);
171 }
172
173 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
174 {
175         napi_enable(&tqp_vector->napi);
176
177         /* enable vector */
178         hns3_mask_vector_irq(tqp_vector, 1);
179 }
180
181 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
182 {
183         /* disable vector */
184         hns3_mask_vector_irq(tqp_vector, 0);
185
186         disable_irq(tqp_vector->vector_irq);
187         napi_disable(&tqp_vector->napi);
188 }
189
190 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
191                                  u32 rl_value)
192 {
193         u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
194
195         /* this defines the configuration for RL (Interrupt Rate Limiter).
196          * Rl defines rate of interrupts i.e. number of interrupts-per-second
197          * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
198          */
199
200         if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
201             !tqp_vector->rx_group.coal.gl_adapt_enable)
202                 /* According to the hardware, the range of rl_reg is
203                  * 0-59 and the unit is 4.
204                  */
205                 rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
206
207         writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
208 }
209
210 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
211                                     u32 gl_value)
212 {
213         u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
214
215         writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
216 }
217
218 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
219                                     u32 gl_value)
220 {
221         u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
222
223         writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
224 }
225
226 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
227                                    struct hns3_nic_priv *priv)
228 {
229         /* initialize the configuration for interrupt coalescing.
230          * 1. GL (Interrupt Gap Limiter)
231          * 2. RL (Interrupt Rate Limiter)
232          */
233
234         /* Default: enable interrupt coalescing self-adaptive and GL */
235         tqp_vector->tx_group.coal.gl_adapt_enable = 1;
236         tqp_vector->rx_group.coal.gl_adapt_enable = 1;
237
238         tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
239         tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
240
241         tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
242         tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
243         tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
244 }
245
246 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
247                                       struct hns3_nic_priv *priv)
248 {
249         struct hnae3_handle *h = priv->ae_handle;
250
251         hns3_set_vector_coalesce_tx_gl(tqp_vector,
252                                        tqp_vector->tx_group.coal.int_gl);
253         hns3_set_vector_coalesce_rx_gl(tqp_vector,
254                                        tqp_vector->rx_group.coal.int_gl);
255         hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
256 }
257
258 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
259 {
260         struct hnae3_handle *h = hns3_get_handle(netdev);
261         struct hnae3_knic_private_info *kinfo = &h->kinfo;
262         unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
263         int i, ret;
264
265         if (kinfo->num_tc <= 1) {
266                 netdev_reset_tc(netdev);
267         } else {
268                 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
269                 if (ret) {
270                         netdev_err(netdev,
271                                    "netdev_set_num_tc fail, ret=%d!\n", ret);
272                         return ret;
273                 }
274
275                 for (i = 0; i < HNAE3_MAX_TC; i++) {
276                         if (!kinfo->tc_info[i].enable)
277                                 continue;
278
279                         netdev_set_tc_queue(netdev,
280                                             kinfo->tc_info[i].tc,
281                                             kinfo->tc_info[i].tqp_count,
282                                             kinfo->tc_info[i].tqp_offset);
283                 }
284         }
285
286         ret = netif_set_real_num_tx_queues(netdev, queue_size);
287         if (ret) {
288                 netdev_err(netdev,
289                            "netif_set_real_num_tx_queues fail, ret=%d!\n",
290                            ret);
291                 return ret;
292         }
293
294         ret = netif_set_real_num_rx_queues(netdev, queue_size);
295         if (ret) {
296                 netdev_err(netdev,
297                            "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
298                 return ret;
299         }
300
301         return 0;
302 }
303
304 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
305 {
306         u16 alloc_tqps, max_rss_size, rss_size;
307
308         h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
309         rss_size = alloc_tqps / h->kinfo.num_tc;
310
311         return min_t(u16, rss_size, max_rss_size);
312 }
313
314 static int hns3_nic_net_up(struct net_device *netdev)
315 {
316         struct hns3_nic_priv *priv = netdev_priv(netdev);
317         struct hnae3_handle *h = priv->ae_handle;
318         int i, j;
319         int ret;
320
321         ret = hns3_nic_reset_all_ring(h);
322         if (ret)
323                 return ret;
324
325         /* get irq resource for all vectors */
326         ret = hns3_nic_init_irq(priv);
327         if (ret) {
328                 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
329                 return ret;
330         }
331
332         /* enable the vectors */
333         for (i = 0; i < priv->vector_num; i++)
334                 hns3_vector_enable(&priv->tqp_vector[i]);
335
336         /* start the ae_dev */
337         ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
338         if (ret)
339                 goto out_start_err;
340
341         clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
342
343         return 0;
344
345 out_start_err:
346         for (j = i - 1; j >= 0; j--)
347                 hns3_vector_disable(&priv->tqp_vector[j]);
348
349         hns3_nic_uninit_irq(priv);
350
351         return ret;
352 }
353
354 static int hns3_nic_net_open(struct net_device *netdev)
355 {
356         struct hns3_nic_priv *priv = netdev_priv(netdev);
357         struct hnae3_handle *h = hns3_get_handle(netdev);
358         struct hnae3_knic_private_info *kinfo;
359         int i, ret;
360
361         netif_carrier_off(netdev);
362
363         ret = hns3_nic_set_real_num_queue(netdev);
364         if (ret)
365                 return ret;
366
367         ret = hns3_nic_net_up(netdev);
368         if (ret) {
369                 netdev_err(netdev,
370                            "hns net up fail, ret=%d!\n", ret);
371                 return ret;
372         }
373
374         kinfo = &h->kinfo;
375         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
376                 netdev_set_prio_tc_map(netdev, i,
377                                        kinfo->prio_tc[i]);
378         }
379
380         priv->ae_handle->last_reset_time = jiffies;
381         return 0;
382 }
383
384 static void hns3_nic_net_down(struct net_device *netdev)
385 {
386         struct hns3_nic_priv *priv = netdev_priv(netdev);
387         const struct hnae3_ae_ops *ops;
388         int i;
389
390         if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
391                 return;
392
393         /* disable vectors */
394         for (i = 0; i < priv->vector_num; i++)
395                 hns3_vector_disable(&priv->tqp_vector[i]);
396
397         /* stop ae_dev */
398         ops = priv->ae_handle->ae_algo->ops;
399         if (ops->stop)
400                 ops->stop(priv->ae_handle);
401
402         /* free irq resources */
403         hns3_nic_uninit_irq(priv);
404
405         hns3_clear_all_ring(priv->ae_handle);
406 }
407
408 static int hns3_nic_net_stop(struct net_device *netdev)
409 {
410         netif_tx_stop_all_queues(netdev);
411         netif_carrier_off(netdev);
412
413         hns3_nic_net_down(netdev);
414
415         return 0;
416 }
417
418 static int hns3_nic_uc_sync(struct net_device *netdev,
419                             const unsigned char *addr)
420 {
421         struct hnae3_handle *h = hns3_get_handle(netdev);
422
423         if (h->ae_algo->ops->add_uc_addr)
424                 return h->ae_algo->ops->add_uc_addr(h, addr);
425
426         return 0;
427 }
428
429 static int hns3_nic_uc_unsync(struct net_device *netdev,
430                               const unsigned char *addr)
431 {
432         struct hnae3_handle *h = hns3_get_handle(netdev);
433
434         if (h->ae_algo->ops->rm_uc_addr)
435                 return h->ae_algo->ops->rm_uc_addr(h, addr);
436
437         return 0;
438 }
439
440 static int hns3_nic_mc_sync(struct net_device *netdev,
441                             const unsigned char *addr)
442 {
443         struct hnae3_handle *h = hns3_get_handle(netdev);
444
445         if (h->ae_algo->ops->add_mc_addr)
446                 return h->ae_algo->ops->add_mc_addr(h, addr);
447
448         return 0;
449 }
450
451 static int hns3_nic_mc_unsync(struct net_device *netdev,
452                               const unsigned char *addr)
453 {
454         struct hnae3_handle *h = hns3_get_handle(netdev);
455
456         if (h->ae_algo->ops->rm_mc_addr)
457                 return h->ae_algo->ops->rm_mc_addr(h, addr);
458
459         return 0;
460 }
461
462 static void hns3_nic_set_rx_mode(struct net_device *netdev)
463 {
464         struct hnae3_handle *h = hns3_get_handle(netdev);
465
466         if (h->ae_algo->ops->set_promisc_mode) {
467                 if (netdev->flags & IFF_PROMISC)
468                         h->ae_algo->ops->set_promisc_mode(h, true, true);
469                 else if (netdev->flags & IFF_ALLMULTI)
470                         h->ae_algo->ops->set_promisc_mode(h, false, true);
471                 else
472                         h->ae_algo->ops->set_promisc_mode(h, false, false);
473         }
474         if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
475                 netdev_err(netdev, "sync uc address fail\n");
476         if (netdev->flags & IFF_MULTICAST) {
477                 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
478                         netdev_err(netdev, "sync mc address fail\n");
479         }
480 }
481
482 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
483                         u16 *mss, u32 *type_cs_vlan_tso)
484 {
485         u32 l4_offset, hdr_len;
486         union l3_hdr_info l3;
487         union l4_hdr_info l4;
488         u32 l4_paylen;
489         int ret;
490
491         if (!skb_is_gso(skb))
492                 return 0;
493
494         ret = skb_cow_head(skb, 0);
495         if (ret)
496                 return ret;
497
498         l3.hdr = skb_network_header(skb);
499         l4.hdr = skb_transport_header(skb);
500
501         /* Software should clear the IPv4's checksum field when tso is
502          * needed.
503          */
504         if (l3.v4->version == 4)
505                 l3.v4->check = 0;
506
507         /* tunnel packet.*/
508         if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
509                                          SKB_GSO_GRE_CSUM |
510                                          SKB_GSO_UDP_TUNNEL |
511                                          SKB_GSO_UDP_TUNNEL_CSUM)) {
512                 if ((!(skb_shinfo(skb)->gso_type &
513                     SKB_GSO_PARTIAL)) &&
514                     (skb_shinfo(skb)->gso_type &
515                     SKB_GSO_UDP_TUNNEL_CSUM)) {
516                         /* Software should clear the udp's checksum
517                          * field when tso is needed.
518                          */
519                         l4.udp->check = 0;
520                 }
521                 /* reset l3&l4 pointers from outer to inner headers */
522                 l3.hdr = skb_inner_network_header(skb);
523                 l4.hdr = skb_inner_transport_header(skb);
524
525                 /* Software should clear the IPv4's checksum field when
526                  * tso is needed.
527                  */
528                 if (l3.v4->version == 4)
529                         l3.v4->check = 0;
530         }
531
532         /* normal or tunnel packet*/
533         l4_offset = l4.hdr - skb->data;
534         hdr_len = (l4.tcp->doff * 4) + l4_offset;
535
536         /* remove payload length from inner pseudo checksum when tso*/
537         l4_paylen = skb->len - l4_offset;
538         csum_replace_by_diff(&l4.tcp->check,
539                              (__force __wsum)htonl(l4_paylen));
540
541         /* find the txbd field values */
542         *paylen = skb->len - hdr_len;
543         hnae3_set_bit(*type_cs_vlan_tso,
544                       HNS3_TXD_TSO_B, 1);
545
546         /* get MSS for TSO */
547         *mss = skb_shinfo(skb)->gso_size;
548
549         return 0;
550 }
551
552 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
553                                 u8 *il4_proto)
554 {
555         union {
556                 struct iphdr *v4;
557                 struct ipv6hdr *v6;
558                 unsigned char *hdr;
559         } l3;
560         unsigned char *l4_hdr;
561         unsigned char *exthdr;
562         u8 l4_proto_tmp;
563         __be16 frag_off;
564
565         /* find outer header point */
566         l3.hdr = skb_network_header(skb);
567         l4_hdr = skb_transport_header(skb);
568
569         if (skb->protocol == htons(ETH_P_IPV6)) {
570                 exthdr = l3.hdr + sizeof(*l3.v6);
571                 l4_proto_tmp = l3.v6->nexthdr;
572                 if (l4_hdr != exthdr)
573                         ipv6_skip_exthdr(skb, exthdr - skb->data,
574                                          &l4_proto_tmp, &frag_off);
575         } else if (skb->protocol == htons(ETH_P_IP)) {
576                 l4_proto_tmp = l3.v4->protocol;
577         } else {
578                 return -EINVAL;
579         }
580
581         *ol4_proto = l4_proto_tmp;
582
583         /* tunnel packet */
584         if (!skb->encapsulation) {
585                 *il4_proto = 0;
586                 return 0;
587         }
588
589         /* find inner header point */
590         l3.hdr = skb_inner_network_header(skb);
591         l4_hdr = skb_inner_transport_header(skb);
592
593         if (l3.v6->version == 6) {
594                 exthdr = l3.hdr + sizeof(*l3.v6);
595                 l4_proto_tmp = l3.v6->nexthdr;
596                 if (l4_hdr != exthdr)
597                         ipv6_skip_exthdr(skb, exthdr - skb->data,
598                                          &l4_proto_tmp, &frag_off);
599         } else if (l3.v4->version == 4) {
600                 l4_proto_tmp = l3.v4->protocol;
601         }
602
603         *il4_proto = l4_proto_tmp;
604
605         return 0;
606 }
607
608 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
609                                 u8 il4_proto, u32 *type_cs_vlan_tso,
610                                 u32 *ol_type_vlan_len_msec)
611 {
612         union {
613                 struct iphdr *v4;
614                 struct ipv6hdr *v6;
615                 unsigned char *hdr;
616         } l3;
617         union {
618                 struct tcphdr *tcp;
619                 struct udphdr *udp;
620                 struct gre_base_hdr *gre;
621                 unsigned char *hdr;
622         } l4;
623         unsigned char *l2_hdr;
624         u8 l4_proto = ol4_proto;
625         u32 ol2_len;
626         u32 ol3_len;
627         u32 ol4_len;
628         u32 l2_len;
629         u32 l3_len;
630
631         l3.hdr = skb_network_header(skb);
632         l4.hdr = skb_transport_header(skb);
633
634         /* compute L2 header size for normal packet, defined in 2 Bytes */
635         l2_len = l3.hdr - skb->data;
636         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
637                         HNS3_TXD_L2LEN_S, l2_len >> 1);
638
639         /* tunnel packet*/
640         if (skb->encapsulation) {
641                 /* compute OL2 header size, defined in 2 Bytes */
642                 ol2_len = l2_len;
643                 hnae3_set_field(*ol_type_vlan_len_msec,
644                                 HNS3_TXD_L2LEN_M,
645                                 HNS3_TXD_L2LEN_S, ol2_len >> 1);
646
647                 /* compute OL3 header size, defined in 4 Bytes */
648                 ol3_len = l4.hdr - l3.hdr;
649                 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
650                                 HNS3_TXD_L3LEN_S, ol3_len >> 2);
651
652                 /* MAC in UDP, MAC in GRE (0x6558)*/
653                 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
654                         /* switch MAC header ptr from outer to inner header.*/
655                         l2_hdr = skb_inner_mac_header(skb);
656
657                         /* compute OL4 header size, defined in 4 Bytes. */
658                         ol4_len = l2_hdr - l4.hdr;
659                         hnae3_set_field(*ol_type_vlan_len_msec,
660                                         HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
661                                         ol4_len >> 2);
662
663                         /* switch IP header ptr from outer to inner header */
664                         l3.hdr = skb_inner_network_header(skb);
665
666                         /* compute inner l2 header size, defined in 2 Bytes. */
667                         l2_len = l3.hdr - l2_hdr;
668                         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
669                                         HNS3_TXD_L2LEN_S, l2_len >> 1);
670                 } else {
671                         /* skb packet types not supported by hardware,
672                          * txbd len fild doesn't be filled.
673                          */
674                         return;
675                 }
676
677                 /* switch L4 header pointer from outer to inner */
678                 l4.hdr = skb_inner_transport_header(skb);
679
680                 l4_proto = il4_proto;
681         }
682
683         /* compute inner(/normal) L3 header size, defined in 4 Bytes */
684         l3_len = l4.hdr - l3.hdr;
685         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
686                         HNS3_TXD_L3LEN_S, l3_len >> 2);
687
688         /* compute inner(/normal) L4 header size, defined in 4 Bytes */
689         switch (l4_proto) {
690         case IPPROTO_TCP:
691                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
692                                 HNS3_TXD_L4LEN_S, l4.tcp->doff);
693                 break;
694         case IPPROTO_SCTP:
695                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
696                                 HNS3_TXD_L4LEN_S,
697                                 (sizeof(struct sctphdr) >> 2));
698                 break;
699         case IPPROTO_UDP:
700                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
701                                 HNS3_TXD_L4LEN_S,
702                                 (sizeof(struct udphdr) >> 2));
703                 break;
704         default:
705                 /* skb packet types not supported by hardware,
706                  * txbd len fild doesn't be filled.
707                  */
708                 return;
709         }
710 }
711
712 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
713  * and it is udp packet, which has a dest port as the IANA assigned.
714  * the hardware is expected to do the checksum offload, but the
715  * hardware will not do the checksum offload when udp dest port is
716  * 4789.
717  */
718 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
719 {
720 #define IANA_VXLAN_PORT 4789
721         union {
722                 struct tcphdr *tcp;
723                 struct udphdr *udp;
724                 struct gre_base_hdr *gre;
725                 unsigned char *hdr;
726         } l4;
727
728         l4.hdr = skb_transport_header(skb);
729
730         if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
731                 return false;
732
733         skb_checksum_help(skb);
734
735         return true;
736 }
737
738 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
739                                    u8 il4_proto, u32 *type_cs_vlan_tso,
740                                    u32 *ol_type_vlan_len_msec)
741 {
742         union {
743                 struct iphdr *v4;
744                 struct ipv6hdr *v6;
745                 unsigned char *hdr;
746         } l3;
747         u32 l4_proto = ol4_proto;
748
749         l3.hdr = skb_network_header(skb);
750
751         /* define OL3 type and tunnel type(OL4).*/
752         if (skb->encapsulation) {
753                 /* define outer network header type.*/
754                 if (skb->protocol == htons(ETH_P_IP)) {
755                         if (skb_is_gso(skb))
756                                 hnae3_set_field(*ol_type_vlan_len_msec,
757                                                 HNS3_TXD_OL3T_M,
758                                                 HNS3_TXD_OL3T_S,
759                                                 HNS3_OL3T_IPV4_CSUM);
760                         else
761                                 hnae3_set_field(*ol_type_vlan_len_msec,
762                                                 HNS3_TXD_OL3T_M,
763                                                 HNS3_TXD_OL3T_S,
764                                                 HNS3_OL3T_IPV4_NO_CSUM);
765
766                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
767                         hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
768                                         HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
769                 }
770
771                 /* define tunnel type(OL4).*/
772                 switch (l4_proto) {
773                 case IPPROTO_UDP:
774                         hnae3_set_field(*ol_type_vlan_len_msec,
775                                         HNS3_TXD_TUNTYPE_M,
776                                         HNS3_TXD_TUNTYPE_S,
777                                         HNS3_TUN_MAC_IN_UDP);
778                         break;
779                 case IPPROTO_GRE:
780                         hnae3_set_field(*ol_type_vlan_len_msec,
781                                         HNS3_TXD_TUNTYPE_M,
782                                         HNS3_TXD_TUNTYPE_S,
783                                         HNS3_TUN_NVGRE);
784                         break;
785                 default:
786                         /* drop the skb tunnel packet if hardware don't support,
787                          * because hardware can't calculate csum when TSO.
788                          */
789                         if (skb_is_gso(skb))
790                                 return -EDOM;
791
792                         /* the stack computes the IP header already,
793                          * driver calculate l4 checksum when not TSO.
794                          */
795                         skb_checksum_help(skb);
796                         return 0;
797                 }
798
799                 l3.hdr = skb_inner_network_header(skb);
800                 l4_proto = il4_proto;
801         }
802
803         if (l3.v4->version == 4) {
804                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
805                                 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
806
807                 /* the stack computes the IP header already, the only time we
808                  * need the hardware to recompute it is in the case of TSO.
809                  */
810                 if (skb_is_gso(skb))
811                         hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
812         } else if (l3.v6->version == 6) {
813                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
814                                 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
815         }
816
817         switch (l4_proto) {
818         case IPPROTO_TCP:
819                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
820                 hnae3_set_field(*type_cs_vlan_tso,
821                                 HNS3_TXD_L4T_M,
822                                 HNS3_TXD_L4T_S,
823                                 HNS3_L4T_TCP);
824                 break;
825         case IPPROTO_UDP:
826                 if (hns3_tunnel_csum_bug(skb))
827                         break;
828
829                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
830                 hnae3_set_field(*type_cs_vlan_tso,
831                                 HNS3_TXD_L4T_M,
832                                 HNS3_TXD_L4T_S,
833                                 HNS3_L4T_UDP);
834                 break;
835         case IPPROTO_SCTP:
836                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
837                 hnae3_set_field(*type_cs_vlan_tso,
838                                 HNS3_TXD_L4T_M,
839                                 HNS3_TXD_L4T_S,
840                                 HNS3_L4T_SCTP);
841                 break;
842         default:
843                 /* drop the skb tunnel packet if hardware don't support,
844                  * because hardware can't calculate csum when TSO.
845                  */
846                 if (skb_is_gso(skb))
847                         return -EDOM;
848
849                 /* the stack computes the IP header already,
850                  * driver calculate l4 checksum when not TSO.
851                  */
852                 skb_checksum_help(skb);
853                 return 0;
854         }
855
856         return 0;
857 }
858
859 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
860 {
861         /* Config bd buffer end */
862         hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
863                         HNS3_TXD_BDTYPE_S, 0);
864         hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
865         hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
866         hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
867 }
868
869 static int hns3_fill_desc_vtags(struct sk_buff *skb,
870                                 struct hns3_enet_ring *tx_ring,
871                                 u32 *inner_vlan_flag,
872                                 u32 *out_vlan_flag,
873                                 u16 *inner_vtag,
874                                 u16 *out_vtag)
875 {
876 #define HNS3_TX_VLAN_PRIO_SHIFT 13
877
878         if (skb->protocol == htons(ETH_P_8021Q) &&
879             !(tx_ring->tqp->handle->kinfo.netdev->features &
880             NETIF_F_HW_VLAN_CTAG_TX)) {
881                 /* When HW VLAN acceleration is turned off, and the stack
882                  * sets the protocol to 802.1q, the driver just need to
883                  * set the protocol to the encapsulated ethertype.
884                  */
885                 skb->protocol = vlan_get_protocol(skb);
886                 return 0;
887         }
888
889         if (skb_vlan_tag_present(skb)) {
890                 u16 vlan_tag;
891
892                 vlan_tag = skb_vlan_tag_get(skb);
893                 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
894
895                 /* Based on hw strategy, use out_vtag in two layer tag case,
896                  * and use inner_vtag in one tag case.
897                  */
898                 if (skb->protocol == htons(ETH_P_8021Q)) {
899                         hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
900                         *out_vtag = vlan_tag;
901                 } else {
902                         hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
903                         *inner_vtag = vlan_tag;
904                 }
905         } else if (skb->protocol == htons(ETH_P_8021Q)) {
906                 struct vlan_ethhdr *vhdr;
907                 int rc;
908
909                 rc = skb_cow_head(skb, 0);
910                 if (rc < 0)
911                         return rc;
912                 vhdr = (struct vlan_ethhdr *)skb->data;
913                 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
914                                         << HNS3_TX_VLAN_PRIO_SHIFT);
915         }
916
917         skb->protocol = vlan_get_protocol(skb);
918         return 0;
919 }
920
921 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
922                           int size, dma_addr_t dma, int frag_end,
923                           enum hns_desc_type type)
924 {
925         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
926         struct hns3_desc *desc = &ring->desc[ring->next_to_use];
927         u32 ol_type_vlan_len_msec = 0;
928         u16 bdtp_fe_sc_vld_ra_ri = 0;
929         u32 type_cs_vlan_tso = 0;
930         struct sk_buff *skb;
931         u16 inner_vtag = 0;
932         u16 out_vtag = 0;
933         u32 paylen = 0;
934         u16 mss = 0;
935         u8 ol4_proto;
936         u8 il4_proto;
937         int ret;
938
939         /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
940         desc_cb->priv = priv;
941         desc_cb->length = size;
942         desc_cb->dma = dma;
943         desc_cb->type = type;
944
945         /* now, fill the descriptor */
946         desc->addr = cpu_to_le64(dma);
947         desc->tx.send_size = cpu_to_le16((u16)size);
948         hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
949         desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
950
951         if (type == DESC_TYPE_SKB) {
952                 skb = (struct sk_buff *)priv;
953                 paylen = skb->len;
954
955                 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
956                                            &ol_type_vlan_len_msec,
957                                            &inner_vtag, &out_vtag);
958                 if (unlikely(ret))
959                         return ret;
960
961                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
962                         skb_reset_mac_len(skb);
963
964                         ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
965                         if (ret)
966                                 return ret;
967                         hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
968                                             &type_cs_vlan_tso,
969                                             &ol_type_vlan_len_msec);
970                         ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
971                                                       &type_cs_vlan_tso,
972                                                       &ol_type_vlan_len_msec);
973                         if (ret)
974                                 return ret;
975
976                         ret = hns3_set_tso(skb, &paylen, &mss,
977                                            &type_cs_vlan_tso);
978                         if (ret)
979                                 return ret;
980                 }
981
982                 /* Set txbd */
983                 desc->tx.ol_type_vlan_len_msec =
984                         cpu_to_le32(ol_type_vlan_len_msec);
985                 desc->tx.type_cs_vlan_tso_len =
986                         cpu_to_le32(type_cs_vlan_tso);
987                 desc->tx.paylen = cpu_to_le32(paylen);
988                 desc->tx.mss = cpu_to_le16(mss);
989                 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
990                 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
991         }
992
993         /* move ring pointer to next.*/
994         ring_ptr_move_fw(ring, next_to_use);
995
996         return 0;
997 }
998
999 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
1000                               int size, dma_addr_t dma, int frag_end,
1001                               enum hns_desc_type type)
1002 {
1003         unsigned int frag_buf_num;
1004         unsigned int k;
1005         int sizeoflast;
1006         int ret;
1007
1008         frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1009         sizeoflast = size % HNS3_MAX_BD_SIZE;
1010         sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1011
1012         /* When the frag size is bigger than hardware, split this frag */
1013         for (k = 0; k < frag_buf_num; k++) {
1014                 ret = hns3_fill_desc(ring, priv,
1015                                      (k == frag_buf_num - 1) ?
1016                                 sizeoflast : HNS3_MAX_BD_SIZE,
1017                                 dma + HNS3_MAX_BD_SIZE * k,
1018                                 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
1019                                 (type == DESC_TYPE_SKB && !k) ?
1020                                         DESC_TYPE_SKB : DESC_TYPE_PAGE);
1021                 if (ret)
1022                         return ret;
1023         }
1024
1025         return 0;
1026 }
1027
1028 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1029                                    struct hns3_enet_ring *ring)
1030 {
1031         struct sk_buff *skb = *out_skb;
1032         struct skb_frag_struct *frag;
1033         int bdnum_for_frag;
1034         int frag_num;
1035         int buf_num;
1036         int size;
1037         int i;
1038
1039         size = skb_headlen(skb);
1040         buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1041
1042         frag_num = skb_shinfo(skb)->nr_frags;
1043         for (i = 0; i < frag_num; i++) {
1044                 frag = &skb_shinfo(skb)->frags[i];
1045                 size = skb_frag_size(frag);
1046                 bdnum_for_frag =
1047                         (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1048                 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
1049                         return -ENOMEM;
1050
1051                 buf_num += bdnum_for_frag;
1052         }
1053
1054         if (buf_num > ring_space(ring))
1055                 return -EBUSY;
1056
1057         *bnum = buf_num;
1058         return 0;
1059 }
1060
1061 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1062                                   struct hns3_enet_ring *ring)
1063 {
1064         struct sk_buff *skb = *out_skb;
1065         int buf_num;
1066
1067         /* No. of segments (plus a header) */
1068         buf_num = skb_shinfo(skb)->nr_frags + 1;
1069
1070         if (unlikely(ring_space(ring) < buf_num))
1071                 return -EBUSY;
1072
1073         *bnum = buf_num;
1074
1075         return 0;
1076 }
1077
1078 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
1079 {
1080         struct device *dev = ring_to_dev(ring);
1081         unsigned int i;
1082
1083         for (i = 0; i < ring->desc_num; i++) {
1084                 /* check if this is where we started */
1085                 if (ring->next_to_use == next_to_use_orig)
1086                         break;
1087
1088                 /* unmap the descriptor dma address */
1089                 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1090                         dma_unmap_single(dev,
1091                                          ring->desc_cb[ring->next_to_use].dma,
1092                                         ring->desc_cb[ring->next_to_use].length,
1093                                         DMA_TO_DEVICE);
1094                 else
1095                         dma_unmap_page(dev,
1096                                        ring->desc_cb[ring->next_to_use].dma,
1097                                        ring->desc_cb[ring->next_to_use].length,
1098                                        DMA_TO_DEVICE);
1099
1100                 /* rollback one */
1101                 ring_ptr_move_bw(ring, next_to_use);
1102         }
1103 }
1104
1105 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1106 {
1107         struct hns3_nic_priv *priv = netdev_priv(netdev);
1108         struct hns3_nic_ring_data *ring_data =
1109                 &tx_ring_data(priv, skb->queue_mapping);
1110         struct hns3_enet_ring *ring = ring_data->ring;
1111         struct device *dev = priv->dev;
1112         struct netdev_queue *dev_queue;
1113         struct skb_frag_struct *frag;
1114         int next_to_use_head;
1115         int next_to_use_frag;
1116         dma_addr_t dma;
1117         int buf_num;
1118         int seg_num;
1119         int size;
1120         int ret;
1121         int i;
1122
1123         /* Prefetch the data used later */
1124         prefetch(skb->data);
1125
1126         switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1127         case -EBUSY:
1128                 u64_stats_update_begin(&ring->syncp);
1129                 ring->stats.tx_busy++;
1130                 u64_stats_update_end(&ring->syncp);
1131
1132                 goto out_net_tx_busy;
1133         case -ENOMEM:
1134                 u64_stats_update_begin(&ring->syncp);
1135                 ring->stats.sw_err_cnt++;
1136                 u64_stats_update_end(&ring->syncp);
1137                 netdev_err(netdev, "no memory to xmit!\n");
1138
1139                 goto out_err_tx_ok;
1140         default:
1141                 break;
1142         }
1143
1144         /* No. of segments (plus a header) */
1145         seg_num = skb_shinfo(skb)->nr_frags + 1;
1146         /* Fill the first part */
1147         size = skb_headlen(skb);
1148
1149         next_to_use_head = ring->next_to_use;
1150
1151         dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1152         if (dma_mapping_error(dev, dma)) {
1153                 netdev_err(netdev, "TX head DMA map failed\n");
1154                 ring->stats.sw_err_cnt++;
1155                 goto out_err_tx_ok;
1156         }
1157
1158         ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1159                            DESC_TYPE_SKB);
1160         if (ret)
1161                 goto head_dma_map_err;
1162
1163         next_to_use_frag = ring->next_to_use;
1164         /* Fill the fragments */
1165         for (i = 1; i < seg_num; i++) {
1166                 frag = &skb_shinfo(skb)->frags[i - 1];
1167                 size = skb_frag_size(frag);
1168                 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1169                 if (dma_mapping_error(dev, dma)) {
1170                         netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1171                         ring->stats.sw_err_cnt++;
1172                         goto frag_dma_map_err;
1173                 }
1174                 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1175                                     seg_num - 1 == i ? 1 : 0,
1176                                     DESC_TYPE_PAGE);
1177
1178                 if (ret)
1179                         goto frag_dma_map_err;
1180         }
1181
1182         /* Complete translate all packets */
1183         dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1184         netdev_tx_sent_queue(dev_queue, skb->len);
1185
1186         wmb(); /* Commit all data before submit */
1187
1188         hnae3_queue_xmit(ring->tqp, buf_num);
1189
1190         return NETDEV_TX_OK;
1191
1192 frag_dma_map_err:
1193         hns_nic_dma_unmap(ring, next_to_use_frag);
1194
1195 head_dma_map_err:
1196         hns_nic_dma_unmap(ring, next_to_use_head);
1197
1198 out_err_tx_ok:
1199         dev_kfree_skb_any(skb);
1200         return NETDEV_TX_OK;
1201
1202 out_net_tx_busy:
1203         netif_stop_subqueue(netdev, ring_data->queue_index);
1204         smp_mb(); /* Commit all data before submit */
1205
1206         return NETDEV_TX_BUSY;
1207 }
1208
1209 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1210 {
1211         struct hnae3_handle *h = hns3_get_handle(netdev);
1212         struct sockaddr *mac_addr = p;
1213         int ret;
1214
1215         if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1216                 return -EADDRNOTAVAIL;
1217
1218         if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1219                 netdev_info(netdev, "already using mac address %pM\n",
1220                             mac_addr->sa_data);
1221                 return 0;
1222         }
1223
1224         ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1225         if (ret) {
1226                 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1227                 return ret;
1228         }
1229
1230         ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1231
1232         return 0;
1233 }
1234
1235 static int hns3_nic_do_ioctl(struct net_device *netdev,
1236                              struct ifreq *ifr, int cmd)
1237 {
1238         struct hnae3_handle *h = hns3_get_handle(netdev);
1239
1240         if (!netif_running(netdev))
1241                 return -EINVAL;
1242
1243         if (!h->ae_algo->ops->do_ioctl)
1244                 return -EOPNOTSUPP;
1245
1246         return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1247 }
1248
1249 static int hns3_nic_set_features(struct net_device *netdev,
1250                                  netdev_features_t features)
1251 {
1252         netdev_features_t changed = netdev->features ^ features;
1253         struct hns3_nic_priv *priv = netdev_priv(netdev);
1254         struct hnae3_handle *h = priv->ae_handle;
1255         int ret;
1256
1257         if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1258                 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1259                         priv->ops.fill_desc = hns3_fill_desc_tso;
1260                         priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1261                 } else {
1262                         priv->ops.fill_desc = hns3_fill_desc;
1263                         priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1264                 }
1265         }
1266
1267         if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1268             h->ae_algo->ops->enable_vlan_filter) {
1269                 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1270                         h->ae_algo->ops->enable_vlan_filter(h, true);
1271                 else
1272                         h->ae_algo->ops->enable_vlan_filter(h, false);
1273         }
1274
1275         if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1276             h->ae_algo->ops->enable_hw_strip_rxvtag) {
1277                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1278                         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1279                 else
1280                         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1281
1282                 if (ret)
1283                         return ret;
1284         }
1285
1286         if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1287                 if (features & NETIF_F_NTUPLE)
1288                         h->ae_algo->ops->enable_fd(h, true);
1289                 else
1290                         h->ae_algo->ops->enable_fd(h, false);
1291         }
1292
1293         netdev->features = features;
1294         return 0;
1295 }
1296
1297 static void hns3_nic_get_stats64(struct net_device *netdev,
1298                                  struct rtnl_link_stats64 *stats)
1299 {
1300         struct hns3_nic_priv *priv = netdev_priv(netdev);
1301         int queue_num = priv->ae_handle->kinfo.num_tqps;
1302         struct hnae3_handle *handle = priv->ae_handle;
1303         struct hns3_enet_ring *ring;
1304         unsigned int start;
1305         unsigned int idx;
1306         u64 tx_bytes = 0;
1307         u64 rx_bytes = 0;
1308         u64 tx_pkts = 0;
1309         u64 rx_pkts = 0;
1310         u64 tx_drop = 0;
1311         u64 rx_drop = 0;
1312
1313         if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1314                 return;
1315
1316         handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1317
1318         for (idx = 0; idx < queue_num; idx++) {
1319                 /* fetch the tx stats */
1320                 ring = priv->ring_data[idx].ring;
1321                 do {
1322                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1323                         tx_bytes += ring->stats.tx_bytes;
1324                         tx_pkts += ring->stats.tx_pkts;
1325                         tx_drop += ring->stats.tx_busy;
1326                         tx_drop += ring->stats.sw_err_cnt;
1327                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1328
1329                 /* fetch the rx stats */
1330                 ring = priv->ring_data[idx + queue_num].ring;
1331                 do {
1332                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1333                         rx_bytes += ring->stats.rx_bytes;
1334                         rx_pkts += ring->stats.rx_pkts;
1335                         rx_drop += ring->stats.non_vld_descs;
1336                         rx_drop += ring->stats.err_pkt_len;
1337                         rx_drop += ring->stats.l2_err;
1338                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1339         }
1340
1341         stats->tx_bytes = tx_bytes;
1342         stats->tx_packets = tx_pkts;
1343         stats->rx_bytes = rx_bytes;
1344         stats->rx_packets = rx_pkts;
1345
1346         stats->rx_errors = netdev->stats.rx_errors;
1347         stats->multicast = netdev->stats.multicast;
1348         stats->rx_length_errors = netdev->stats.rx_length_errors;
1349         stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1350         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1351
1352         stats->tx_errors = netdev->stats.tx_errors;
1353         stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1354         stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1355         stats->collisions = netdev->stats.collisions;
1356         stats->rx_over_errors = netdev->stats.rx_over_errors;
1357         stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1358         stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1359         stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1360         stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1361         stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1362         stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1363         stats->tx_window_errors = netdev->stats.tx_window_errors;
1364         stats->rx_compressed = netdev->stats.rx_compressed;
1365         stats->tx_compressed = netdev->stats.tx_compressed;
1366 }
1367
1368 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1369 {
1370         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1371         struct hnae3_handle *h = hns3_get_handle(netdev);
1372         struct hnae3_knic_private_info *kinfo = &h->kinfo;
1373         u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1374         u8 tc = mqprio_qopt->qopt.num_tc;
1375         u16 mode = mqprio_qopt->mode;
1376         u8 hw = mqprio_qopt->qopt.hw;
1377         bool if_running;
1378         int ret;
1379
1380         if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1381                mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1382                 return -EOPNOTSUPP;
1383
1384         if (tc > HNAE3_MAX_TC)
1385                 return -EINVAL;
1386
1387         if (!netdev)
1388                 return -EINVAL;
1389
1390         if_running = netif_running(netdev);
1391         if (if_running) {
1392                 hns3_nic_net_stop(netdev);
1393                 msleep(100);
1394         }
1395
1396         ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1397                 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1398         if (ret)
1399                 goto out;
1400
1401         ret = hns3_nic_set_real_num_queue(netdev);
1402
1403 out:
1404         if (if_running)
1405                 hns3_nic_net_open(netdev);
1406
1407         return ret;
1408 }
1409
1410 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1411                              void *type_data)
1412 {
1413         if (type != TC_SETUP_QDISC_MQPRIO)
1414                 return -EOPNOTSUPP;
1415
1416         return hns3_setup_tc(dev, type_data);
1417 }
1418
1419 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1420                                 __be16 proto, u16 vid)
1421 {
1422         struct hnae3_handle *h = hns3_get_handle(netdev);
1423         struct hns3_nic_priv *priv = netdev_priv(netdev);
1424         int ret = -EIO;
1425
1426         if (h->ae_algo->ops->set_vlan_filter)
1427                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1428
1429         if (!ret)
1430                 set_bit(vid, priv->active_vlans);
1431
1432         return ret;
1433 }
1434
1435 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1436                                  __be16 proto, u16 vid)
1437 {
1438         struct hnae3_handle *h = hns3_get_handle(netdev);
1439         struct hns3_nic_priv *priv = netdev_priv(netdev);
1440         int ret = -EIO;
1441
1442         if (h->ae_algo->ops->set_vlan_filter)
1443                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1444
1445         if (!ret)
1446                 clear_bit(vid, priv->active_vlans);
1447
1448         return ret;
1449 }
1450
1451 static void hns3_restore_vlan(struct net_device *netdev)
1452 {
1453         struct hns3_nic_priv *priv = netdev_priv(netdev);
1454         u16 vid;
1455         int ret;
1456
1457         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1458                 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1459                 if (ret)
1460                         netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1461                                     vid, ret);
1462         }
1463 }
1464
1465 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1466                                 u8 qos, __be16 vlan_proto)
1467 {
1468         struct hnae3_handle *h = hns3_get_handle(netdev);
1469         int ret = -EIO;
1470
1471         if (h->ae_algo->ops->set_vf_vlan_filter)
1472                 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1473                                                    qos, vlan_proto);
1474
1475         return ret;
1476 }
1477
1478 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1479 {
1480         struct hnae3_handle *h = hns3_get_handle(netdev);
1481         bool if_running = netif_running(netdev);
1482         int ret;
1483
1484         if (!h->ae_algo->ops->set_mtu)
1485                 return -EOPNOTSUPP;
1486
1487         /* if this was called with netdev up then bring netdevice down */
1488         if (if_running) {
1489                 (void)hns3_nic_net_stop(netdev);
1490                 msleep(100);
1491         }
1492
1493         ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1494         if (ret)
1495                 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1496                            ret);
1497         else
1498                 netdev->mtu = new_mtu;
1499
1500         /* if the netdev was running earlier, bring it up again */
1501         if (if_running && hns3_nic_net_open(netdev))
1502                 ret = -EINVAL;
1503
1504         return ret;
1505 }
1506
1507 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1508 {
1509         struct hns3_nic_priv *priv = netdev_priv(ndev);
1510         struct hns3_enet_ring *tx_ring = NULL;
1511         int timeout_queue = 0;
1512         int hw_head, hw_tail;
1513         int i;
1514
1515         /* Find the stopped queue the same way the stack does */
1516         for (i = 0; i < ndev->real_num_tx_queues; i++) {
1517                 struct netdev_queue *q;
1518                 unsigned long trans_start;
1519
1520                 q = netdev_get_tx_queue(ndev, i);
1521                 trans_start = q->trans_start;
1522                 if (netif_xmit_stopped(q) &&
1523                     time_after(jiffies,
1524                                (trans_start + ndev->watchdog_timeo))) {
1525                         timeout_queue = i;
1526                         break;
1527                 }
1528         }
1529
1530         if (i == ndev->num_tx_queues) {
1531                 netdev_info(ndev,
1532                             "no netdev TX timeout queue found, timeout count: %llu\n",
1533                             priv->tx_timeout_count);
1534                 return false;
1535         }
1536
1537         tx_ring = priv->ring_data[timeout_queue].ring;
1538
1539         hw_head = readl_relaxed(tx_ring->tqp->io_base +
1540                                 HNS3_RING_TX_RING_HEAD_REG);
1541         hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1542                                 HNS3_RING_TX_RING_TAIL_REG);
1543         netdev_info(ndev,
1544                     "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1545                     priv->tx_timeout_count,
1546                     timeout_queue,
1547                     tx_ring->next_to_use,
1548                     tx_ring->next_to_clean,
1549                     hw_head,
1550                     hw_tail,
1551                     readl(tx_ring->tqp_vector->mask_addr));
1552
1553         return true;
1554 }
1555
1556 static void hns3_nic_net_timeout(struct net_device *ndev)
1557 {
1558         struct hns3_nic_priv *priv = netdev_priv(ndev);
1559         struct hnae3_handle *h = priv->ae_handle;
1560
1561         if (!hns3_get_tx_timeo_queue_info(ndev))
1562                 return;
1563
1564         priv->tx_timeout_count++;
1565
1566         if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
1567                 return;
1568
1569         /* request the reset */
1570         if (h->ae_algo->ops->reset_event)
1571                 h->ae_algo->ops->reset_event(h);
1572 }
1573
1574 static const struct net_device_ops hns3_nic_netdev_ops = {
1575         .ndo_open               = hns3_nic_net_open,
1576         .ndo_stop               = hns3_nic_net_stop,
1577         .ndo_start_xmit         = hns3_nic_net_xmit,
1578         .ndo_tx_timeout         = hns3_nic_net_timeout,
1579         .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1580         .ndo_do_ioctl           = hns3_nic_do_ioctl,
1581         .ndo_change_mtu         = hns3_nic_change_mtu,
1582         .ndo_set_features       = hns3_nic_set_features,
1583         .ndo_get_stats64        = hns3_nic_get_stats64,
1584         .ndo_setup_tc           = hns3_nic_setup_tc,
1585         .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1586         .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1587         .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1588         .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1589 };
1590
1591 static bool hns3_is_phys_func(struct pci_dev *pdev)
1592 {
1593         u32 dev_id = pdev->device;
1594
1595         switch (dev_id) {
1596         case HNAE3_DEV_ID_GE:
1597         case HNAE3_DEV_ID_25GE:
1598         case HNAE3_DEV_ID_25GE_RDMA:
1599         case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1600         case HNAE3_DEV_ID_50GE_RDMA:
1601         case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1602         case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1603                 return true;
1604         case HNAE3_DEV_ID_100G_VF:
1605         case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1606                 return false;
1607         default:
1608                 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1609                          dev_id);
1610         }
1611
1612         return false;
1613 }
1614
1615 static void hns3_disable_sriov(struct pci_dev *pdev)
1616 {
1617         /* If our VFs are assigned we cannot shut down SR-IOV
1618          * without causing issues, so just leave the hardware
1619          * available but disabled
1620          */
1621         if (pci_vfs_assigned(pdev)) {
1622                 dev_warn(&pdev->dev,
1623                          "disabling driver while VFs are assigned\n");
1624                 return;
1625         }
1626
1627         pci_disable_sriov(pdev);
1628 }
1629
1630 static void hns3_get_dev_capability(struct pci_dev *pdev,
1631                                     struct hnae3_ae_dev *ae_dev)
1632 {
1633         if (pdev->revision >= 0x21)
1634                 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1635 }
1636
1637 /* hns3_probe - Device initialization routine
1638  * @pdev: PCI device information struct
1639  * @ent: entry in hns3_pci_tbl
1640  *
1641  * hns3_probe initializes a PF identified by a pci_dev structure.
1642  * The OS initialization, configuring of the PF private structure,
1643  * and a hardware reset occur.
1644  *
1645  * Returns 0 on success, negative on failure
1646  */
1647 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1648 {
1649         struct hnae3_ae_dev *ae_dev;
1650         int ret;
1651
1652         ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1653                               GFP_KERNEL);
1654         if (!ae_dev) {
1655                 ret = -ENOMEM;
1656                 return ret;
1657         }
1658
1659         ae_dev->pdev = pdev;
1660         ae_dev->flag = ent->driver_data;
1661         ae_dev->dev_type = HNAE3_DEV_KNIC;
1662         ae_dev->reset_type = HNAE3_NONE_RESET;
1663         hns3_get_dev_capability(pdev, ae_dev);
1664         pci_set_drvdata(pdev, ae_dev);
1665
1666         hnae3_register_ae_dev(ae_dev);
1667
1668         return 0;
1669 }
1670
1671 /* hns3_remove - Device removal routine
1672  * @pdev: PCI device information struct
1673  */
1674 static void hns3_remove(struct pci_dev *pdev)
1675 {
1676         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1677
1678         if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1679                 hns3_disable_sriov(pdev);
1680
1681         hnae3_unregister_ae_dev(ae_dev);
1682 }
1683
1684 /**
1685  * hns3_pci_sriov_configure
1686  * @pdev: pointer to a pci_dev structure
1687  * @num_vfs: number of VFs to allocate
1688  *
1689  * Enable or change the number of VFs. Called when the user updates the number
1690  * of VFs in sysfs.
1691  **/
1692 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1693 {
1694         int ret;
1695
1696         if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1697                 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1698                 return -EINVAL;
1699         }
1700
1701         if (num_vfs) {
1702                 ret = pci_enable_sriov(pdev, num_vfs);
1703                 if (ret)
1704                         dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1705                 else
1706                         return num_vfs;
1707         } else if (!pci_vfs_assigned(pdev)) {
1708                 pci_disable_sriov(pdev);
1709         } else {
1710                 dev_warn(&pdev->dev,
1711                          "Unable to free VFs because some are assigned to VMs.\n");
1712         }
1713
1714         return 0;
1715 }
1716
1717 static void hns3_shutdown(struct pci_dev *pdev)
1718 {
1719         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1720
1721         hnae3_unregister_ae_dev(ae_dev);
1722         devm_kfree(&pdev->dev, ae_dev);
1723         pci_set_drvdata(pdev, NULL);
1724
1725         if (system_state == SYSTEM_POWER_OFF)
1726                 pci_set_power_state(pdev, PCI_D3hot);
1727 }
1728
1729 static struct pci_driver hns3_driver = {
1730         .name     = hns3_driver_name,
1731         .id_table = hns3_pci_tbl,
1732         .probe    = hns3_probe,
1733         .remove   = hns3_remove,
1734         .shutdown = hns3_shutdown,
1735         .sriov_configure = hns3_pci_sriov_configure,
1736 };
1737
1738 /* set default feature to hns3 */
1739 static void hns3_set_default_feature(struct net_device *netdev)
1740 {
1741         struct hnae3_handle *h = hns3_get_handle(netdev);
1742         struct pci_dev *pdev = h->pdev;
1743
1744         netdev->priv_flags |= IFF_UNICAST_FLT;
1745
1746         netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1747                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1748                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1749                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1750                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1751
1752         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1753
1754         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1755
1756         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1757                 NETIF_F_HW_VLAN_CTAG_FILTER |
1758                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1759                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1760                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1761                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1762                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1763
1764         netdev->vlan_features |=
1765                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1766                 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1767                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1768                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1769                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1770
1771         netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1772                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1773                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1774                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1775                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1776                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1777
1778         if (pdev->revision >= 0x21) {
1779                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1780
1781                 if (!(h->flags & HNAE3_SUPPORT_VF)) {
1782                         netdev->hw_features |= NETIF_F_NTUPLE;
1783                         netdev->features |= NETIF_F_NTUPLE;
1784                 }
1785         }
1786 }
1787
1788 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1789                              struct hns3_desc_cb *cb)
1790 {
1791         unsigned int order = hnae3_page_order(ring);
1792         struct page *p;
1793
1794         p = dev_alloc_pages(order);
1795         if (!p)
1796                 return -ENOMEM;
1797
1798         cb->priv = p;
1799         cb->page_offset = 0;
1800         cb->reuse_flag = 0;
1801         cb->buf  = page_address(p);
1802         cb->length = hnae3_page_size(ring);
1803         cb->type = DESC_TYPE_PAGE;
1804
1805         return 0;
1806 }
1807
1808 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1809                              struct hns3_desc_cb *cb)
1810 {
1811         if (cb->type == DESC_TYPE_SKB)
1812                 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1813         else if (!HNAE3_IS_TX_RING(ring))
1814                 put_page((struct page *)cb->priv);
1815         memset(cb, 0, sizeof(*cb));
1816 }
1817
1818 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1819 {
1820         cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1821                                cb->length, ring_to_dma_dir(ring));
1822
1823         if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
1824                 return -EIO;
1825
1826         return 0;
1827 }
1828
1829 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1830                               struct hns3_desc_cb *cb)
1831 {
1832         if (cb->type == DESC_TYPE_SKB)
1833                 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1834                                  ring_to_dma_dir(ring));
1835         else
1836                 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1837                                ring_to_dma_dir(ring));
1838 }
1839
1840 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1841 {
1842         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1843         ring->desc[i].addr = 0;
1844 }
1845
1846 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1847 {
1848         struct hns3_desc_cb *cb = &ring->desc_cb[i];
1849
1850         if (!ring->desc_cb[i].dma)
1851                 return;
1852
1853         hns3_buffer_detach(ring, i);
1854         hns3_free_buffer(ring, cb);
1855 }
1856
1857 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1858 {
1859         int i;
1860
1861         for (i = 0; i < ring->desc_num; i++)
1862                 hns3_free_buffer_detach(ring, i);
1863 }
1864
1865 /* free desc along with its attached buffer */
1866 static void hns3_free_desc(struct hns3_enet_ring *ring)
1867 {
1868         int size = ring->desc_num * sizeof(ring->desc[0]);
1869
1870         hns3_free_buffers(ring);
1871
1872         if (ring->desc) {
1873                 dma_free_coherent(ring_to_dev(ring), size,
1874                                   ring->desc, ring->desc_dma_addr);
1875                 ring->desc = NULL;
1876         }
1877 }
1878
1879 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1880 {
1881         int size = ring->desc_num * sizeof(ring->desc[0]);
1882
1883         ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
1884                                          &ring->desc_dma_addr,
1885                                          GFP_KERNEL);
1886         if (!ring->desc)
1887                 return -ENOMEM;
1888
1889         return 0;
1890 }
1891
1892 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1893                                    struct hns3_desc_cb *cb)
1894 {
1895         int ret;
1896
1897         ret = hns3_alloc_buffer(ring, cb);
1898         if (ret)
1899                 goto out;
1900
1901         ret = hns3_map_buffer(ring, cb);
1902         if (ret)
1903                 goto out_with_buf;
1904
1905         return 0;
1906
1907 out_with_buf:
1908         hns3_free_buffer(ring, cb);
1909 out:
1910         return ret;
1911 }
1912
1913 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1914 {
1915         int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1916
1917         if (ret)
1918                 return ret;
1919
1920         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1921
1922         return 0;
1923 }
1924
1925 /* Allocate memory for raw pkg, and map with dma */
1926 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1927 {
1928         int i, j, ret;
1929
1930         for (i = 0; i < ring->desc_num; i++) {
1931                 ret = hns3_alloc_buffer_attach(ring, i);
1932                 if (ret)
1933                         goto out_buffer_fail;
1934         }
1935
1936         return 0;
1937
1938 out_buffer_fail:
1939         for (j = i - 1; j >= 0; j--)
1940                 hns3_free_buffer_detach(ring, j);
1941         return ret;
1942 }
1943
1944 /* detach a in-used buffer and replace with a reserved one  */
1945 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1946                                 struct hns3_desc_cb *res_cb)
1947 {
1948         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1949         ring->desc_cb[i] = *res_cb;
1950         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1951         ring->desc[i].rx.bd_base_info = 0;
1952 }
1953
1954 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1955 {
1956         ring->desc_cb[i].reuse_flag = 0;
1957         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1958                 + ring->desc_cb[i].page_offset);
1959         ring->desc[i].rx.bd_base_info = 0;
1960 }
1961
1962 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1963                                       int *pkts)
1964 {
1965         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1966
1967         (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1968         (*bytes) += desc_cb->length;
1969         /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
1970         hns3_free_buffer_detach(ring, ring->next_to_clean);
1971
1972         ring_ptr_move_fw(ring, next_to_clean);
1973 }
1974
1975 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1976 {
1977         int u = ring->next_to_use;
1978         int c = ring->next_to_clean;
1979
1980         if (unlikely(h > ring->desc_num))
1981                 return 0;
1982
1983         return u > c ? (h > c && h <= u) : (h > c || h <= u);
1984 }
1985
1986 void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
1987 {
1988         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1989         struct hns3_nic_priv *priv = netdev_priv(netdev);
1990         struct netdev_queue *dev_queue;
1991         int bytes, pkts;
1992         int head;
1993
1994         head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1995         rmb(); /* Make sure head is ready before touch any data */
1996
1997         if (is_ring_empty(ring) || head == ring->next_to_clean)
1998                 return; /* no data to poll */
1999
2000         if (unlikely(!is_valid_clean_head(ring, head))) {
2001                 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2002                            ring->next_to_use, ring->next_to_clean);
2003
2004                 u64_stats_update_begin(&ring->syncp);
2005                 ring->stats.io_err_cnt++;
2006                 u64_stats_update_end(&ring->syncp);
2007                 return;
2008         }
2009
2010         bytes = 0;
2011         pkts = 0;
2012         while (head != ring->next_to_clean) {
2013                 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
2014                 /* Issue prefetch for next Tx descriptor */
2015                 prefetch(&ring->desc_cb[ring->next_to_clean]);
2016         }
2017
2018         ring->tqp_vector->tx_group.total_bytes += bytes;
2019         ring->tqp_vector->tx_group.total_packets += pkts;
2020
2021         u64_stats_update_begin(&ring->syncp);
2022         ring->stats.tx_bytes += bytes;
2023         ring->stats.tx_pkts += pkts;
2024         u64_stats_update_end(&ring->syncp);
2025
2026         dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2027         netdev_tx_completed_queue(dev_queue, pkts, bytes);
2028
2029         if (unlikely(pkts && netif_carrier_ok(netdev) &&
2030                      (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2031                 /* Make sure that anybody stopping the queue after this
2032                  * sees the new next_to_clean.
2033                  */
2034                 smp_mb();
2035                 if (netif_tx_queue_stopped(dev_queue) &&
2036                     !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2037                         netif_tx_wake_queue(dev_queue);
2038                         ring->stats.restart_queue++;
2039                 }
2040         }
2041 }
2042
2043 static int hns3_desc_unused(struct hns3_enet_ring *ring)
2044 {
2045         int ntc = ring->next_to_clean;
2046         int ntu = ring->next_to_use;
2047
2048         return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2049 }
2050
2051 static void
2052 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2053 {
2054         struct hns3_desc_cb *desc_cb;
2055         struct hns3_desc_cb res_cbs;
2056         int i, ret;
2057
2058         for (i = 0; i < cleand_count; i++) {
2059                 desc_cb = &ring->desc_cb[ring->next_to_use];
2060                 if (desc_cb->reuse_flag) {
2061                         u64_stats_update_begin(&ring->syncp);
2062                         ring->stats.reuse_pg_cnt++;
2063                         u64_stats_update_end(&ring->syncp);
2064
2065                         hns3_reuse_buffer(ring, ring->next_to_use);
2066                 } else {
2067                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
2068                         if (ret) {
2069                                 u64_stats_update_begin(&ring->syncp);
2070                                 ring->stats.sw_err_cnt++;
2071                                 u64_stats_update_end(&ring->syncp);
2072
2073                                 netdev_err(ring->tqp->handle->kinfo.netdev,
2074                                            "hnae reserve buffer map failed.\n");
2075                                 break;
2076                         }
2077                         hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2078                 }
2079
2080                 ring_ptr_move_fw(ring, next_to_use);
2081         }
2082
2083         wmb(); /* Make all data has been write before submit */
2084         writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2085 }
2086
2087 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2088                                 struct hns3_enet_ring *ring, int pull_len,
2089                                 struct hns3_desc_cb *desc_cb)
2090 {
2091         struct hns3_desc *desc;
2092         u32 truesize;
2093         int size;
2094         int last_offset;
2095         bool twobufs;
2096
2097         twobufs = ((PAGE_SIZE < 8192) &&
2098                 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2099
2100         desc = &ring->desc[ring->next_to_clean];
2101         size = le16_to_cpu(desc->rx.size);
2102
2103         truesize = hnae3_buf_size(ring);
2104
2105         if (!twobufs)
2106                 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2107
2108         skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2109                         size - pull_len, truesize);
2110
2111          /* Avoid re-using remote pages,flag default unreuse */
2112         if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2113                 return;
2114
2115         if (twobufs) {
2116                 /* If we are only owner of page we can reuse it */
2117                 if (likely(page_count(desc_cb->priv) == 1)) {
2118                         /* Flip page offset to other buffer */
2119                         desc_cb->page_offset ^= truesize;
2120
2121                         desc_cb->reuse_flag = 1;
2122                         /* bump ref count on page before it is given*/
2123                         get_page(desc_cb->priv);
2124                 }
2125                 return;
2126         }
2127
2128         /* Move offset up to the next cache line */
2129         desc_cb->page_offset += truesize;
2130
2131         if (desc_cb->page_offset <= last_offset) {
2132                 desc_cb->reuse_flag = 1;
2133                 /* Bump ref count on page before it is given*/
2134                 get_page(desc_cb->priv);
2135         }
2136 }
2137
2138 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2139                              struct hns3_desc *desc)
2140 {
2141         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2142         int l3_type, l4_type;
2143         u32 bd_base_info;
2144         int ol4_type;
2145         u32 l234info;
2146
2147         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2148         l234info = le32_to_cpu(desc->rx.l234_info);
2149
2150         skb->ip_summed = CHECKSUM_NONE;
2151
2152         skb_checksum_none_assert(skb);
2153
2154         if (!(netdev->features & NETIF_F_RXCSUM))
2155                 return;
2156
2157         /* check if hardware has done checksum */
2158         if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2159                 return;
2160
2161         if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
2162                      hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
2163                      hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2164                      hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2165                 u64_stats_update_begin(&ring->syncp);
2166                 ring->stats.l3l4_csum_err++;
2167                 u64_stats_update_end(&ring->syncp);
2168
2169                 return;
2170         }
2171
2172         l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2173                                   HNS3_RXD_L3ID_S);
2174         l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2175                                   HNS3_RXD_L4ID_S);
2176
2177         ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2178                                    HNS3_RXD_OL4ID_S);
2179         switch (ol4_type) {
2180         case HNS3_OL4_TYPE_MAC_IN_UDP:
2181         case HNS3_OL4_TYPE_NVGRE:
2182                 skb->csum_level = 1;
2183                 /* fall through */
2184         case HNS3_OL4_TYPE_NO_TUN:
2185                 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2186                 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2187                      l3_type == HNS3_L3_TYPE_IPV6) &&
2188                     (l4_type == HNS3_L4_TYPE_UDP ||
2189                      l4_type == HNS3_L4_TYPE_TCP ||
2190                      l4_type == HNS3_L4_TYPE_SCTP))
2191                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2192                 break;
2193         default:
2194                 break;
2195         }
2196 }
2197
2198 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2199 {
2200         napi_gro_receive(&ring->tqp_vector->napi, skb);
2201 }
2202
2203 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2204                                 struct hns3_desc *desc, u32 l234info,
2205                                 u16 *vlan_tag)
2206 {
2207         struct pci_dev *pdev = ring->tqp->handle->pdev;
2208
2209         if (pdev->revision == 0x20) {
2210                 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2211                 if (!(*vlan_tag & VLAN_VID_MASK))
2212                         *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2213
2214                 return (*vlan_tag != 0);
2215         }
2216
2217 #define HNS3_STRP_OUTER_VLAN    0x1
2218 #define HNS3_STRP_INNER_VLAN    0x2
2219
2220         switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2221                                 HNS3_RXD_STRP_TAGP_S)) {
2222         case HNS3_STRP_OUTER_VLAN:
2223                 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2224                 return true;
2225         case HNS3_STRP_INNER_VLAN:
2226                 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2227                 return true;
2228         default:
2229                 return false;
2230         }
2231 }
2232
2233 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2234                                      struct sk_buff *skb)
2235 {
2236         struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2237         struct hnae3_handle *handle = ring->tqp->handle;
2238         enum pkt_hash_types rss_type;
2239
2240         if (le32_to_cpu(desc->rx.rss_hash))
2241                 rss_type = handle->kinfo.rss_type;
2242         else
2243                 rss_type = PKT_HASH_TYPE_NONE;
2244
2245         skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
2246 }
2247
2248 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2249                              struct sk_buff **out_skb, int *out_bnum)
2250 {
2251         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2252         struct hns3_desc_cb *desc_cb;
2253         struct hns3_desc *desc;
2254         struct sk_buff *skb;
2255         unsigned char *va;
2256         u32 bd_base_info;
2257         int pull_len;
2258         u32 l234info;
2259         int length;
2260         int bnum;
2261
2262         desc = &ring->desc[ring->next_to_clean];
2263         desc_cb = &ring->desc_cb[ring->next_to_clean];
2264
2265         prefetch(desc);
2266
2267         length = le16_to_cpu(desc->rx.size);
2268         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2269
2270         /* Check valid BD */
2271         if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
2272                 return -EFAULT;
2273
2274         va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2275
2276         /* Prefetch first cache line of first page
2277          * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2278          * line size is 64B so need to prefetch twice to make it 128B. But in
2279          * actual we can have greater size of caches with 128B Level 1 cache
2280          * lines. In such a case, single fetch would suffice to cache in the
2281          * relevant part of the header.
2282          */
2283         prefetch(va);
2284 #if L1_CACHE_BYTES < 128
2285         prefetch(va + L1_CACHE_BYTES);
2286 #endif
2287
2288         skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2289                                         HNS3_RX_HEAD_SIZE);
2290         if (unlikely(!skb)) {
2291                 netdev_err(netdev, "alloc rx skb fail\n");
2292
2293                 u64_stats_update_begin(&ring->syncp);
2294                 ring->stats.sw_err_cnt++;
2295                 u64_stats_update_end(&ring->syncp);
2296
2297                 return -ENOMEM;
2298         }
2299
2300         prefetchw(skb->data);
2301
2302         bnum = 1;
2303         if (length <= HNS3_RX_HEAD_SIZE) {
2304                 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2305
2306                 /* We can reuse buffer as-is, just make sure it is local */
2307                 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2308                         desc_cb->reuse_flag = 1;
2309                 else /* This page cannot be reused so discard it */
2310                         put_page(desc_cb->priv);
2311
2312                 ring_ptr_move_fw(ring, next_to_clean);
2313         } else {
2314                 u64_stats_update_begin(&ring->syncp);
2315                 ring->stats.seg_pkt_cnt++;
2316                 u64_stats_update_end(&ring->syncp);
2317
2318                 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2319
2320                 memcpy(__skb_put(skb, pull_len), va,
2321                        ALIGN(pull_len, sizeof(long)));
2322
2323                 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2324                 ring_ptr_move_fw(ring, next_to_clean);
2325
2326                 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2327                         desc = &ring->desc[ring->next_to_clean];
2328                         desc_cb = &ring->desc_cb[ring->next_to_clean];
2329                         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2330                         hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2331                         ring_ptr_move_fw(ring, next_to_clean);
2332                         bnum++;
2333                 }
2334         }
2335
2336         *out_bnum = bnum;
2337
2338         l234info = le32_to_cpu(desc->rx.l234_info);
2339
2340         /* Based on hw strategy, the tag offloaded will be stored at
2341          * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2342          * in one layer tag case.
2343          */
2344         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2345                 u16 vlan_tag;
2346
2347                 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2348                         __vlan_hwaccel_put_tag(skb,
2349                                                htons(ETH_P_8021Q),
2350                                                vlan_tag);
2351         }
2352
2353         if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2354                 u64_stats_update_begin(&ring->syncp);
2355                 ring->stats.non_vld_descs++;
2356                 u64_stats_update_end(&ring->syncp);
2357
2358                 dev_kfree_skb_any(skb);
2359                 return -EINVAL;
2360         }
2361
2362         if (unlikely((!desc->rx.pkt_len) ||
2363                      hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2364                 u64_stats_update_begin(&ring->syncp);
2365                 ring->stats.err_pkt_len++;
2366                 u64_stats_update_end(&ring->syncp);
2367
2368                 dev_kfree_skb_any(skb);
2369                 return -EFAULT;
2370         }
2371
2372         if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
2373                 u64_stats_update_begin(&ring->syncp);
2374                 ring->stats.l2_err++;
2375                 u64_stats_update_end(&ring->syncp);
2376
2377                 dev_kfree_skb_any(skb);
2378                 return -EFAULT;
2379         }
2380
2381         u64_stats_update_begin(&ring->syncp);
2382         ring->stats.rx_pkts++;
2383         ring->stats.rx_bytes += skb->len;
2384         u64_stats_update_end(&ring->syncp);
2385
2386         ring->tqp_vector->rx_group.total_bytes += skb->len;
2387
2388         hns3_rx_checksum(ring, skb, desc);
2389         hns3_set_rx_skb_rss_type(ring, skb);
2390
2391         return 0;
2392 }
2393
2394 int hns3_clean_rx_ring(
2395                 struct hns3_enet_ring *ring, int budget,
2396                 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2397 {
2398 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2399         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2400         int recv_pkts, recv_bds, clean_count, err;
2401         int unused_count = hns3_desc_unused(ring);
2402         struct sk_buff *skb = NULL;
2403         int num, bnum = 0;
2404
2405         num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2406         rmb(); /* Make sure num taken effect before the other data is touched */
2407
2408         recv_pkts = 0, recv_bds = 0, clean_count = 0;
2409         num -= unused_count;
2410
2411         while (recv_pkts < budget && recv_bds < num) {
2412                 /* Reuse or realloc buffers */
2413                 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2414                         hns3_nic_alloc_rx_buffers(ring,
2415                                                   clean_count + unused_count);
2416                         clean_count = 0;
2417                         unused_count = hns3_desc_unused(ring);
2418                 }
2419
2420                 /* Poll one pkt */
2421                 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2422                 if (unlikely(!skb)) /* This fault cannot be repaired */
2423                         goto out;
2424
2425                 recv_bds += bnum;
2426                 clean_count += bnum;
2427                 if (unlikely(err)) {  /* Do jump the err */
2428                         recv_pkts++;
2429                         continue;
2430                 }
2431
2432                 /* Do update ip stack process */
2433                 skb->protocol = eth_type_trans(skb, netdev);
2434                 rx_fn(ring, skb);
2435
2436                 recv_pkts++;
2437         }
2438
2439 out:
2440         /* Make all data has been write before submit */
2441         if (clean_count + unused_count > 0)
2442                 hns3_nic_alloc_rx_buffers(ring,
2443                                           clean_count + unused_count);
2444
2445         return recv_pkts;
2446 }
2447
2448 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2449 {
2450         struct hns3_enet_tqp_vector *tqp_vector =
2451                                         ring_group->ring->tqp_vector;
2452         enum hns3_flow_level_range new_flow_level;
2453         int packets_per_msecs;
2454         int bytes_per_msecs;
2455         u32 time_passed_ms;
2456         u16 new_int_gl;
2457
2458         if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
2459                 return false;
2460
2461         if (ring_group->total_packets == 0) {
2462                 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2463                 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2464                 return true;
2465         }
2466
2467         /* Simple throttlerate management
2468          * 0-10MB/s   lower     (50000 ints/s)
2469          * 10-20MB/s   middle    (20000 ints/s)
2470          * 20-1249MB/s high      (18000 ints/s)
2471          * > 40000pps  ultra     (8000 ints/s)
2472          */
2473         new_flow_level = ring_group->coal.flow_level;
2474         new_int_gl = ring_group->coal.int_gl;
2475         time_passed_ms =
2476                 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2477
2478         if (!time_passed_ms)
2479                 return false;
2480
2481         do_div(ring_group->total_packets, time_passed_ms);
2482         packets_per_msecs = ring_group->total_packets;
2483
2484         do_div(ring_group->total_bytes, time_passed_ms);
2485         bytes_per_msecs = ring_group->total_bytes;
2486
2487 #define HNS3_RX_LOW_BYTE_RATE 10000
2488 #define HNS3_RX_MID_BYTE_RATE 20000
2489
2490         switch (new_flow_level) {
2491         case HNS3_FLOW_LOW:
2492                 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2493                         new_flow_level = HNS3_FLOW_MID;
2494                 break;
2495         case HNS3_FLOW_MID:
2496                 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2497                         new_flow_level = HNS3_FLOW_HIGH;
2498                 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2499                         new_flow_level = HNS3_FLOW_LOW;
2500                 break;
2501         case HNS3_FLOW_HIGH:
2502         case HNS3_FLOW_ULTRA:
2503         default:
2504                 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2505                         new_flow_level = HNS3_FLOW_MID;
2506                 break;
2507         }
2508
2509 #define HNS3_RX_ULTRA_PACKET_RATE 40
2510
2511         if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2512             &tqp_vector->rx_group == ring_group)
2513                 new_flow_level = HNS3_FLOW_ULTRA;
2514
2515         switch (new_flow_level) {
2516         case HNS3_FLOW_LOW:
2517                 new_int_gl = HNS3_INT_GL_50K;
2518                 break;
2519         case HNS3_FLOW_MID:
2520                 new_int_gl = HNS3_INT_GL_20K;
2521                 break;
2522         case HNS3_FLOW_HIGH:
2523                 new_int_gl = HNS3_INT_GL_18K;
2524                 break;
2525         case HNS3_FLOW_ULTRA:
2526                 new_int_gl = HNS3_INT_GL_8K;
2527                 break;
2528         default:
2529                 break;
2530         }
2531
2532         ring_group->total_bytes = 0;
2533         ring_group->total_packets = 0;
2534         ring_group->coal.flow_level = new_flow_level;
2535         if (new_int_gl != ring_group->coal.int_gl) {
2536                 ring_group->coal.int_gl = new_int_gl;
2537                 return true;
2538         }
2539         return false;
2540 }
2541
2542 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2543 {
2544         struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2545         struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2546         bool rx_update, tx_update;
2547
2548         if (tqp_vector->int_adapt_down > 0) {
2549                 tqp_vector->int_adapt_down--;
2550                 return;
2551         }
2552
2553         if (rx_group->coal.gl_adapt_enable) {
2554                 rx_update = hns3_get_new_int_gl(rx_group);
2555                 if (rx_update)
2556                         hns3_set_vector_coalesce_rx_gl(tqp_vector,
2557                                                        rx_group->coal.int_gl);
2558         }
2559
2560         if (tx_group->coal.gl_adapt_enable) {
2561                 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2562                 if (tx_update)
2563                         hns3_set_vector_coalesce_tx_gl(tqp_vector,
2564                                                        tx_group->coal.int_gl);
2565         }
2566
2567         tqp_vector->last_jiffies = jiffies;
2568         tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
2569 }
2570
2571 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2572 {
2573         struct hns3_enet_ring *ring;
2574         int rx_pkt_total = 0;
2575
2576         struct hns3_enet_tqp_vector *tqp_vector =
2577                 container_of(napi, struct hns3_enet_tqp_vector, napi);
2578         bool clean_complete = true;
2579         int rx_budget;
2580
2581         /* Since the actual Tx work is minimal, we can give the Tx a larger
2582          * budget and be more aggressive about cleaning up the Tx descriptors.
2583          */
2584         hns3_for_each_ring(ring, tqp_vector->tx_group)
2585                 hns3_clean_tx_ring(ring);
2586
2587         /* make sure rx ring budget not smaller than 1 */
2588         rx_budget = max(budget / tqp_vector->num_tqps, 1);
2589
2590         hns3_for_each_ring(ring, tqp_vector->rx_group) {
2591                 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2592                                                     hns3_rx_skb);
2593
2594                 if (rx_cleaned >= rx_budget)
2595                         clean_complete = false;
2596
2597                 rx_pkt_total += rx_cleaned;
2598         }
2599
2600         tqp_vector->rx_group.total_packets += rx_pkt_total;
2601
2602         if (!clean_complete)
2603                 return budget;
2604
2605         napi_complete(napi);
2606         hns3_update_new_int_gl(tqp_vector);
2607         hns3_mask_vector_irq(tqp_vector, 1);
2608
2609         return rx_pkt_total;
2610 }
2611
2612 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2613                                       struct hnae3_ring_chain_node *head)
2614 {
2615         struct pci_dev *pdev = tqp_vector->handle->pdev;
2616         struct hnae3_ring_chain_node *cur_chain = head;
2617         struct hnae3_ring_chain_node *chain;
2618         struct hns3_enet_ring *tx_ring;
2619         struct hns3_enet_ring *rx_ring;
2620
2621         tx_ring = tqp_vector->tx_group.ring;
2622         if (tx_ring) {
2623                 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2624                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2625                               HNAE3_RING_TYPE_TX);
2626                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2627                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2628
2629                 cur_chain->next = NULL;
2630
2631                 while (tx_ring->next) {
2632                         tx_ring = tx_ring->next;
2633
2634                         chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2635                                              GFP_KERNEL);
2636                         if (!chain)
2637                                 return -ENOMEM;
2638
2639                         cur_chain->next = chain;
2640                         chain->tqp_index = tx_ring->tqp->tqp_index;
2641                         hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2642                                       HNAE3_RING_TYPE_TX);
2643                         hnae3_set_field(chain->int_gl_idx,
2644                                         HNAE3_RING_GL_IDX_M,
2645                                         HNAE3_RING_GL_IDX_S,
2646                                         HNAE3_RING_GL_TX);
2647
2648                         cur_chain = chain;
2649                 }
2650         }
2651
2652         rx_ring = tqp_vector->rx_group.ring;
2653         if (!tx_ring && rx_ring) {
2654                 cur_chain->next = NULL;
2655                 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2656                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2657                               HNAE3_RING_TYPE_RX);
2658                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2659                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2660
2661                 rx_ring = rx_ring->next;
2662         }
2663
2664         while (rx_ring) {
2665                 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2666                 if (!chain)
2667                         return -ENOMEM;
2668
2669                 cur_chain->next = chain;
2670                 chain->tqp_index = rx_ring->tqp->tqp_index;
2671                 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2672                               HNAE3_RING_TYPE_RX);
2673                 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2674                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2675
2676                 cur_chain = chain;
2677
2678                 rx_ring = rx_ring->next;
2679         }
2680
2681         return 0;
2682 }
2683
2684 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2685                                         struct hnae3_ring_chain_node *head)
2686 {
2687         struct pci_dev *pdev = tqp_vector->handle->pdev;
2688         struct hnae3_ring_chain_node *chain_tmp, *chain;
2689
2690         chain = head->next;
2691
2692         while (chain) {
2693                 chain_tmp = chain->next;
2694                 devm_kfree(&pdev->dev, chain);
2695                 chain = chain_tmp;
2696         }
2697 }
2698
2699 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2700                                    struct hns3_enet_ring *ring)
2701 {
2702         ring->next = group->ring;
2703         group->ring = ring;
2704
2705         group->count++;
2706 }
2707
2708 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
2709 {
2710         struct pci_dev *pdev = priv->ae_handle->pdev;
2711         struct hns3_enet_tqp_vector *tqp_vector;
2712         int num_vectors = priv->vector_num;
2713         int numa_node;
2714         int vector_i;
2715
2716         numa_node = dev_to_node(&pdev->dev);
2717
2718         for (vector_i = 0; vector_i < num_vectors; vector_i++) {
2719                 tqp_vector = &priv->tqp_vector[vector_i];
2720                 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
2721                                 &tqp_vector->affinity_mask);
2722         }
2723 }
2724
2725 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2726 {
2727         struct hnae3_ring_chain_node vector_ring_chain;
2728         struct hnae3_handle *h = priv->ae_handle;
2729         struct hns3_enet_tqp_vector *tqp_vector;
2730         int ret = 0;
2731         u16 i;
2732
2733         hns3_nic_set_cpumask(priv);
2734
2735         for (i = 0; i < priv->vector_num; i++) {
2736                 tqp_vector = &priv->tqp_vector[i];
2737                 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2738                 tqp_vector->num_tqps = 0;
2739         }
2740
2741         for (i = 0; i < h->kinfo.num_tqps; i++) {
2742                 u16 vector_i = i % priv->vector_num;
2743                 u16 tqp_num = h->kinfo.num_tqps;
2744
2745                 tqp_vector = &priv->tqp_vector[vector_i];
2746
2747                 hns3_add_ring_to_group(&tqp_vector->tx_group,
2748                                        priv->ring_data[i].ring);
2749
2750                 hns3_add_ring_to_group(&tqp_vector->rx_group,
2751                                        priv->ring_data[i + tqp_num].ring);
2752
2753                 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2754                 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2755                 tqp_vector->num_tqps++;
2756         }
2757
2758         for (i = 0; i < priv->vector_num; i++) {
2759                 tqp_vector = &priv->tqp_vector[i];
2760
2761                 tqp_vector->rx_group.total_bytes = 0;
2762                 tqp_vector->rx_group.total_packets = 0;
2763                 tqp_vector->tx_group.total_bytes = 0;
2764                 tqp_vector->tx_group.total_packets = 0;
2765                 tqp_vector->handle = h;
2766
2767                 ret = hns3_get_vector_ring_chain(tqp_vector,
2768                                                  &vector_ring_chain);
2769                 if (ret)
2770                         return ret;
2771
2772                 ret = h->ae_algo->ops->map_ring_to_vector(h,
2773                         tqp_vector->vector_irq, &vector_ring_chain);
2774
2775                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2776
2777                 if (ret)
2778                         return ret;
2779
2780                 netif_napi_add(priv->netdev, &tqp_vector->napi,
2781                                hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2782         }
2783
2784         return 0;
2785 }
2786
2787 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2788 {
2789         struct hnae3_handle *h = priv->ae_handle;
2790         struct hns3_enet_tqp_vector *tqp_vector;
2791         struct hnae3_vector_info *vector;
2792         struct pci_dev *pdev = h->pdev;
2793         u16 tqp_num = h->kinfo.num_tqps;
2794         u16 vector_num;
2795         int ret = 0;
2796         u16 i;
2797
2798         /* RSS size, cpu online and vector_num should be the same */
2799         /* Should consider 2p/4p later */
2800         vector_num = min_t(u16, num_online_cpus(), tqp_num);
2801         vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2802                               GFP_KERNEL);
2803         if (!vector)
2804                 return -ENOMEM;
2805
2806         vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2807
2808         priv->vector_num = vector_num;
2809         priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2810                 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2811                              GFP_KERNEL);
2812         if (!priv->tqp_vector) {
2813                 ret = -ENOMEM;
2814                 goto out;
2815         }
2816
2817         for (i = 0; i < priv->vector_num; i++) {
2818                 tqp_vector = &priv->tqp_vector[i];
2819                 tqp_vector->idx = i;
2820                 tqp_vector->mask_addr = vector[i].io_addr;
2821                 tqp_vector->vector_irq = vector[i].vector;
2822                 hns3_vector_gl_rl_init(tqp_vector, priv);
2823         }
2824
2825 out:
2826         devm_kfree(&pdev->dev, vector);
2827         return ret;
2828 }
2829
2830 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2831 {
2832         group->ring = NULL;
2833         group->count = 0;
2834 }
2835
2836 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2837 {
2838         struct hnae3_ring_chain_node vector_ring_chain;
2839         struct hnae3_handle *h = priv->ae_handle;
2840         struct hns3_enet_tqp_vector *tqp_vector;
2841         int i, ret;
2842
2843         for (i = 0; i < priv->vector_num; i++) {
2844                 tqp_vector = &priv->tqp_vector[i];
2845
2846                 ret = hns3_get_vector_ring_chain(tqp_vector,
2847                                                  &vector_ring_chain);
2848                 if (ret)
2849                         return ret;
2850
2851                 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2852                         tqp_vector->vector_irq, &vector_ring_chain);
2853                 if (ret)
2854                         return ret;
2855
2856                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2857
2858                 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2859                         (void)irq_set_affinity_hint(
2860                                 priv->tqp_vector[i].vector_irq,
2861                                                     NULL);
2862                         free_irq(priv->tqp_vector[i].vector_irq,
2863                                  &priv->tqp_vector[i]);
2864                 }
2865
2866                 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2867                 hns3_clear_ring_group(&tqp_vector->rx_group);
2868                 hns3_clear_ring_group(&tqp_vector->tx_group);
2869                 netif_napi_del(&priv->tqp_vector[i].napi);
2870         }
2871
2872         return 0;
2873 }
2874
2875 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2876 {
2877         struct hnae3_handle *h = priv->ae_handle;
2878         struct pci_dev *pdev = h->pdev;
2879         int i, ret;
2880
2881         for (i = 0; i < priv->vector_num; i++) {
2882                 struct hns3_enet_tqp_vector *tqp_vector;
2883
2884                 tqp_vector = &priv->tqp_vector[i];
2885                 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2886                 if (ret)
2887                         return ret;
2888         }
2889
2890         devm_kfree(&pdev->dev, priv->tqp_vector);
2891         return 0;
2892 }
2893
2894 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2895                              int ring_type)
2896 {
2897         struct hns3_nic_ring_data *ring_data = priv->ring_data;
2898         int queue_num = priv->ae_handle->kinfo.num_tqps;
2899         struct pci_dev *pdev = priv->ae_handle->pdev;
2900         struct hns3_enet_ring *ring;
2901
2902         ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2903         if (!ring)
2904                 return -ENOMEM;
2905
2906         if (ring_type == HNAE3_RING_TYPE_TX) {
2907                 ring_data[q->tqp_index].ring = ring;
2908                 ring_data[q->tqp_index].queue_index = q->tqp_index;
2909                 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2910         } else {
2911                 ring_data[q->tqp_index + queue_num].ring = ring;
2912                 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2913                 ring->io_base = q->io_base;
2914         }
2915
2916         hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2917
2918         ring->tqp = q;
2919         ring->desc = NULL;
2920         ring->desc_cb = NULL;
2921         ring->dev = priv->dev;
2922         ring->desc_dma_addr = 0;
2923         ring->buf_size = q->buf_size;
2924         ring->desc_num = q->desc_num;
2925         ring->next_to_use = 0;
2926         ring->next_to_clean = 0;
2927
2928         return 0;
2929 }
2930
2931 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2932                               struct hns3_nic_priv *priv)
2933 {
2934         int ret;
2935
2936         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2937         if (ret)
2938                 return ret;
2939
2940         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2941         if (ret)
2942                 return ret;
2943
2944         return 0;
2945 }
2946
2947 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2948 {
2949         struct hnae3_handle *h = priv->ae_handle;
2950         struct pci_dev *pdev = h->pdev;
2951         int i, ret;
2952
2953         priv->ring_data =  devm_kzalloc(&pdev->dev,
2954                                         array3_size(h->kinfo.num_tqps,
2955                                                     sizeof(*priv->ring_data),
2956                                                     2),
2957                                         GFP_KERNEL);
2958         if (!priv->ring_data)
2959                 return -ENOMEM;
2960
2961         for (i = 0; i < h->kinfo.num_tqps; i++) {
2962                 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2963                 if (ret)
2964                         goto err;
2965         }
2966
2967         return 0;
2968 err:
2969         devm_kfree(&pdev->dev, priv->ring_data);
2970         return ret;
2971 }
2972
2973 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2974 {
2975         struct hnae3_handle *h = priv->ae_handle;
2976         int i;
2977
2978         for (i = 0; i < h->kinfo.num_tqps; i++) {
2979                 devm_kfree(priv->dev, priv->ring_data[i].ring);
2980                 devm_kfree(priv->dev,
2981                            priv->ring_data[i + h->kinfo.num_tqps].ring);
2982         }
2983         devm_kfree(priv->dev, priv->ring_data);
2984 }
2985
2986 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2987 {
2988         int ret;
2989
2990         if (ring->desc_num <= 0 || ring->buf_size <= 0)
2991                 return -EINVAL;
2992
2993         ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2994                                 GFP_KERNEL);
2995         if (!ring->desc_cb) {
2996                 ret = -ENOMEM;
2997                 goto out;
2998         }
2999
3000         ret = hns3_alloc_desc(ring);
3001         if (ret)
3002                 goto out_with_desc_cb;
3003
3004         if (!HNAE3_IS_TX_RING(ring)) {
3005                 ret = hns3_alloc_ring_buffers(ring);
3006                 if (ret)
3007                         goto out_with_desc;
3008         }
3009
3010         return 0;
3011
3012 out_with_desc:
3013         hns3_free_desc(ring);
3014 out_with_desc_cb:
3015         kfree(ring->desc_cb);
3016         ring->desc_cb = NULL;
3017 out:
3018         return ret;
3019 }
3020
3021 static void hns3_fini_ring(struct hns3_enet_ring *ring)
3022 {
3023         hns3_free_desc(ring);
3024         kfree(ring->desc_cb);
3025         ring->desc_cb = NULL;
3026         ring->next_to_clean = 0;
3027         ring->next_to_use = 0;
3028 }
3029
3030 static int hns3_buf_size2type(u32 buf_size)
3031 {
3032         int bd_size_type;
3033
3034         switch (buf_size) {
3035         case 512:
3036                 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3037                 break;
3038         case 1024:
3039                 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3040                 break;
3041         case 2048:
3042                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3043                 break;
3044         case 4096:
3045                 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3046                 break;
3047         default:
3048                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3049         }
3050
3051         return bd_size_type;
3052 }
3053
3054 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3055 {
3056         dma_addr_t dma = ring->desc_dma_addr;
3057         struct hnae3_queue *q = ring->tqp;
3058
3059         if (!HNAE3_IS_TX_RING(ring)) {
3060                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3061                                (u32)dma);
3062                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3063                                (u32)((dma >> 31) >> 1));
3064
3065                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3066                                hns3_buf_size2type(ring->buf_size));
3067                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3068                                ring->desc_num / 8 - 1);
3069
3070         } else {
3071                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3072                                (u32)dma);
3073                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3074                                (u32)((dma >> 31) >> 1));
3075
3076                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3077                                ring->desc_num / 8 - 1);
3078         }
3079 }
3080
3081 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3082 {
3083         struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3084         int i;
3085
3086         for (i = 0; i < HNAE3_MAX_TC; i++) {
3087                 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3088                 int j;
3089
3090                 if (!tc_info->enable)
3091                         continue;
3092
3093                 for (j = 0; j < tc_info->tqp_count; j++) {
3094                         struct hnae3_queue *q;
3095
3096                         q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3097                         hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3098                                        tc_info->tc);
3099                 }
3100         }
3101 }
3102
3103 int hns3_init_all_ring(struct hns3_nic_priv *priv)
3104 {
3105         struct hnae3_handle *h = priv->ae_handle;
3106         int ring_num = h->kinfo.num_tqps * 2;
3107         int i, j;
3108         int ret;
3109
3110         for (i = 0; i < ring_num; i++) {
3111                 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3112                 if (ret) {
3113                         dev_err(priv->dev,
3114                                 "Alloc ring memory fail! ret=%d\n", ret);
3115                         goto out_when_alloc_ring_memory;
3116                 }
3117
3118                 u64_stats_init(&priv->ring_data[i].ring->syncp);
3119         }
3120
3121         return 0;
3122
3123 out_when_alloc_ring_memory:
3124         for (j = i - 1; j >= 0; j--)
3125                 hns3_fini_ring(priv->ring_data[j].ring);
3126
3127         return -ENOMEM;
3128 }
3129
3130 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3131 {
3132         struct hnae3_handle *h = priv->ae_handle;
3133         int i;
3134
3135         for (i = 0; i < h->kinfo.num_tqps; i++) {
3136                 if (h->ae_algo->ops->reset_queue)
3137                         h->ae_algo->ops->reset_queue(h, i);
3138
3139                 hns3_fini_ring(priv->ring_data[i].ring);
3140                 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3141         }
3142         return 0;
3143 }
3144
3145 /* Set mac addr if it is configured. or leave it to the AE driver */
3146 static void hns3_init_mac_addr(struct net_device *netdev, bool init)
3147 {
3148         struct hns3_nic_priv *priv = netdev_priv(netdev);
3149         struct hnae3_handle *h = priv->ae_handle;
3150         u8 mac_addr_temp[ETH_ALEN];
3151
3152         if (h->ae_algo->ops->get_mac_addr && init) {
3153                 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3154                 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3155         }
3156
3157         /* Check if the MAC address is valid, if not get a random one */
3158         if (!is_valid_ether_addr(netdev->dev_addr)) {
3159                 eth_hw_addr_random(netdev);
3160                 dev_warn(priv->dev, "using random MAC address %pM\n",
3161                          netdev->dev_addr);
3162         }
3163
3164         if (h->ae_algo->ops->set_mac_addr)
3165                 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3166
3167 }
3168
3169 static int hns3_restore_fd_rules(struct net_device *netdev)
3170 {
3171         struct hnae3_handle *h = hns3_get_handle(netdev);
3172         int ret = 0;
3173
3174         if (h->ae_algo->ops->restore_fd_rules)
3175                 ret = h->ae_algo->ops->restore_fd_rules(h);
3176
3177         return ret;
3178 }
3179
3180 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3181 {
3182         struct hnae3_handle *h = hns3_get_handle(netdev);
3183
3184         if (h->ae_algo->ops->del_all_fd_entries)
3185                 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3186 }
3187
3188 static void hns3_nic_set_priv_ops(struct net_device *netdev)
3189 {
3190         struct hns3_nic_priv *priv = netdev_priv(netdev);
3191
3192         if ((netdev->features & NETIF_F_TSO) ||
3193             (netdev->features & NETIF_F_TSO6)) {
3194                 priv->ops.fill_desc = hns3_fill_desc_tso;
3195                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3196         } else {
3197                 priv->ops.fill_desc = hns3_fill_desc;
3198                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3199         }
3200 }
3201
3202 static int hns3_client_init(struct hnae3_handle *handle)
3203 {
3204         struct pci_dev *pdev = handle->pdev;
3205         u16 alloc_tqps, max_rss_size;
3206         struct hns3_nic_priv *priv;
3207         struct net_device *netdev;
3208         int ret;
3209
3210         handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3211                                                     &max_rss_size);
3212         netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3213         if (!netdev)
3214                 return -ENOMEM;
3215
3216         priv = netdev_priv(netdev);
3217         priv->dev = &pdev->dev;
3218         priv->netdev = netdev;
3219         priv->ae_handle = handle;
3220         priv->ae_handle->last_reset_time = jiffies;
3221         priv->tx_timeout_count = 0;
3222
3223         handle->kinfo.netdev = netdev;
3224         handle->priv = (void *)priv;
3225
3226         hns3_init_mac_addr(netdev, true);
3227
3228         hns3_set_default_feature(netdev);
3229
3230         netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3231         netdev->priv_flags |= IFF_UNICAST_FLT;
3232         netdev->netdev_ops = &hns3_nic_netdev_ops;
3233         SET_NETDEV_DEV(netdev, &pdev->dev);
3234         hns3_ethtool_set_ops(netdev);
3235         hns3_nic_set_priv_ops(netdev);
3236
3237         /* Carrier off reporting is important to ethtool even BEFORE open */
3238         netif_carrier_off(netdev);
3239
3240         if (handle->flags & HNAE3_SUPPORT_VF)
3241                 handle->reset_level = HNAE3_VF_RESET;
3242         else
3243                 handle->reset_level = HNAE3_FUNC_RESET;
3244
3245         ret = hns3_get_ring_config(priv);
3246         if (ret) {
3247                 ret = -ENOMEM;
3248                 goto out_get_ring_cfg;
3249         }
3250
3251         ret = hns3_nic_alloc_vector_data(priv);
3252         if (ret) {
3253                 ret = -ENOMEM;
3254                 goto out_alloc_vector_data;
3255         }
3256
3257         ret = hns3_nic_init_vector_data(priv);
3258         if (ret) {
3259                 ret = -ENOMEM;
3260                 goto out_init_vector_data;
3261         }
3262
3263         ret = hns3_init_all_ring(priv);
3264         if (ret) {
3265                 ret = -ENOMEM;
3266                 goto out_init_ring_data;
3267         }
3268
3269         ret = register_netdev(netdev);
3270         if (ret) {
3271                 dev_err(priv->dev, "probe register netdev fail!\n");
3272                 goto out_reg_netdev_fail;
3273         }
3274
3275         hns3_dcbnl_setup(handle);
3276
3277         /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3278         netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3279
3280         return ret;
3281
3282 out_reg_netdev_fail:
3283 out_init_ring_data:
3284         (void)hns3_nic_uninit_vector_data(priv);
3285 out_init_vector_data:
3286         hns3_nic_dealloc_vector_data(priv);
3287 out_alloc_vector_data:
3288         priv->ring_data = NULL;
3289 out_get_ring_cfg:
3290         priv->ae_handle = NULL;
3291         free_netdev(netdev);
3292         return ret;
3293 }
3294
3295 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3296 {
3297         struct net_device *netdev = handle->kinfo.netdev;
3298         struct hns3_nic_priv *priv = netdev_priv(netdev);
3299         int ret;
3300
3301         hns3_remove_hw_addr(netdev);
3302
3303         if (netdev->reg_state != NETREG_UNINITIALIZED)
3304                 unregister_netdev(netdev);
3305
3306         hns3_del_all_fd_rules(netdev, true);
3307
3308         hns3_force_clear_all_rx_ring(handle);
3309
3310         ret = hns3_nic_uninit_vector_data(priv);
3311         if (ret)
3312                 netdev_err(netdev, "uninit vector error\n");
3313
3314         ret = hns3_nic_dealloc_vector_data(priv);
3315         if (ret)
3316                 netdev_err(netdev, "dealloc vector error\n");
3317
3318         ret = hns3_uninit_all_ring(priv);
3319         if (ret)
3320                 netdev_err(netdev, "uninit ring error\n");
3321
3322         hns3_put_ring_config(priv);
3323
3324         priv->ring_data = NULL;
3325
3326         free_netdev(netdev);
3327 }
3328
3329 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3330 {
3331         struct net_device *netdev = handle->kinfo.netdev;
3332
3333         if (!netdev)
3334                 return;
3335
3336         if (linkup) {
3337                 netif_carrier_on(netdev);
3338                 netif_tx_wake_all_queues(netdev);
3339                 netdev_info(netdev, "link up\n");
3340         } else {
3341                 netif_carrier_off(netdev);
3342                 netif_tx_stop_all_queues(netdev);
3343                 netdev_info(netdev, "link down\n");
3344         }
3345 }
3346
3347 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3348 {
3349         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3350         struct net_device *ndev = kinfo->netdev;
3351         bool if_running;
3352         int ret;
3353
3354         if (tc > HNAE3_MAX_TC)
3355                 return -EINVAL;
3356
3357         if (!ndev)
3358                 return -ENODEV;
3359
3360         if_running = netif_running(ndev);
3361
3362         if (if_running) {
3363                 (void)hns3_nic_net_stop(ndev);
3364                 msleep(100);
3365         }
3366
3367         ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3368                 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3369         if (ret)
3370                 goto err_out;
3371
3372         ret = hns3_nic_set_real_num_queue(ndev);
3373
3374 err_out:
3375         if (if_running)
3376                 (void)hns3_nic_net_open(ndev);
3377
3378         return ret;
3379 }
3380
3381 static void hns3_recover_hw_addr(struct net_device *ndev)
3382 {
3383         struct netdev_hw_addr_list *list;
3384         struct netdev_hw_addr *ha, *tmp;
3385
3386         /* go through and sync uc_addr entries to the device */
3387         list = &ndev->uc;
3388         list_for_each_entry_safe(ha, tmp, &list->list, list)
3389                 hns3_nic_uc_sync(ndev, ha->addr);
3390
3391         /* go through and sync mc_addr entries to the device */
3392         list = &ndev->mc;
3393         list_for_each_entry_safe(ha, tmp, &list->list, list)
3394                 hns3_nic_mc_sync(ndev, ha->addr);
3395 }
3396
3397 static void hns3_remove_hw_addr(struct net_device *netdev)
3398 {
3399         struct netdev_hw_addr_list *list;
3400         struct netdev_hw_addr *ha, *tmp;
3401
3402         hns3_nic_uc_unsync(netdev, netdev->dev_addr);
3403
3404         /* go through and unsync uc_addr entries to the device */
3405         list = &netdev->uc;
3406         list_for_each_entry_safe(ha, tmp, &list->list, list)
3407                 hns3_nic_uc_unsync(netdev, ha->addr);
3408
3409         /* go through and unsync mc_addr entries to the device */
3410         list = &netdev->mc;
3411         list_for_each_entry_safe(ha, tmp, &list->list, list)
3412                 if (ha->refcount > 1)
3413                         hns3_nic_mc_unsync(netdev, ha->addr);
3414 }
3415
3416 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3417 {
3418         while (ring->next_to_clean != ring->next_to_use) {
3419                 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3420                 hns3_free_buffer_detach(ring, ring->next_to_clean);
3421                 ring_ptr_move_fw(ring, next_to_clean);
3422         }
3423 }
3424
3425 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3426 {
3427         struct hns3_desc_cb res_cbs;
3428         int ret;
3429
3430         while (ring->next_to_use != ring->next_to_clean) {
3431                 /* When a buffer is not reused, it's memory has been
3432                  * freed in hns3_handle_rx_bd or will be freed by
3433                  * stack, so we need to replace the buffer here.
3434                  */
3435                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3436                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
3437                         if (ret) {
3438                                 u64_stats_update_begin(&ring->syncp);
3439                                 ring->stats.sw_err_cnt++;
3440                                 u64_stats_update_end(&ring->syncp);
3441                                 /* if alloc new buffer fail, exit directly
3442                                  * and reclear in up flow.
3443                                  */
3444                                 netdev_warn(ring->tqp->handle->kinfo.netdev,
3445                                             "reserve buffer map failed, ret = %d\n",
3446                                             ret);
3447                                 return ret;
3448                         }
3449                         hns3_replace_buffer(ring, ring->next_to_use,
3450                                             &res_cbs);
3451                 }
3452                 ring_ptr_move_fw(ring, next_to_use);
3453         }
3454
3455         return 0;
3456 }
3457
3458 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
3459 {
3460         while (ring->next_to_use != ring->next_to_clean) {
3461                 /* When a buffer is not reused, it's memory has been
3462                  * freed in hns3_handle_rx_bd or will be freed by
3463                  * stack, so only need to unmap the buffer here.
3464                  */
3465                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3466                         hns3_unmap_buffer(ring,
3467                                           &ring->desc_cb[ring->next_to_use]);
3468                         ring->desc_cb[ring->next_to_use].dma = 0;
3469                 }
3470
3471                 ring_ptr_move_fw(ring, next_to_use);
3472         }
3473 }
3474
3475 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3476 {
3477         struct net_device *ndev = h->kinfo.netdev;
3478         struct hns3_nic_priv *priv = netdev_priv(ndev);
3479         struct hns3_enet_ring *ring;
3480         u32 i;
3481
3482         for (i = 0; i < h->kinfo.num_tqps; i++) {
3483                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3484                 hns3_force_clear_rx_ring(ring);
3485         }
3486 }
3487
3488 static void hns3_clear_all_ring(struct hnae3_handle *h)
3489 {
3490         struct net_device *ndev = h->kinfo.netdev;
3491         struct hns3_nic_priv *priv = netdev_priv(ndev);
3492         u32 i;
3493
3494         for (i = 0; i < h->kinfo.num_tqps; i++) {
3495                 struct netdev_queue *dev_queue;
3496                 struct hns3_enet_ring *ring;
3497
3498                 ring = priv->ring_data[i].ring;
3499                 hns3_clear_tx_ring(ring);
3500                 dev_queue = netdev_get_tx_queue(ndev,
3501                                                 priv->ring_data[i].queue_index);
3502                 netdev_tx_reset_queue(dev_queue);
3503
3504                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3505                 /* Continue to clear other rings even if clearing some
3506                  * rings failed.
3507                  */
3508                 hns3_clear_rx_ring(ring);
3509         }
3510 }
3511
3512 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3513 {
3514         struct net_device *ndev = h->kinfo.netdev;
3515         struct hns3_nic_priv *priv = netdev_priv(ndev);
3516         struct hns3_enet_ring *rx_ring;
3517         int i, j;
3518         int ret;
3519
3520         for (i = 0; i < h->kinfo.num_tqps; i++) {
3521                 h->ae_algo->ops->reset_queue(h, i);
3522                 hns3_init_ring_hw(priv->ring_data[i].ring);
3523
3524                 /* We need to clear tx ring here because self test will
3525                  * use the ring and will not run down before up
3526                  */
3527                 hns3_clear_tx_ring(priv->ring_data[i].ring);
3528                 priv->ring_data[i].ring->next_to_clean = 0;
3529                 priv->ring_data[i].ring->next_to_use = 0;
3530
3531                 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3532                 hns3_init_ring_hw(rx_ring);
3533                 ret = hns3_clear_rx_ring(rx_ring);
3534                 if (ret)
3535                         return ret;
3536
3537                 /* We can not know the hardware head and tail when this
3538                  * function is called in reset flow, so we reuse all desc.
3539                  */
3540                 for (j = 0; j < rx_ring->desc_num; j++)
3541                         hns3_reuse_buffer(rx_ring, j);
3542
3543                 rx_ring->next_to_clean = 0;
3544                 rx_ring->next_to_use = 0;
3545         }
3546
3547         hns3_init_tx_ring_tc(priv);
3548
3549         return 0;
3550 }
3551
3552 static void hns3_store_coal(struct hns3_nic_priv *priv)
3553 {
3554         /* ethtool only support setting and querying one coal
3555          * configuation for now, so save the vector 0' coal
3556          * configuation here in order to restore it.
3557          */
3558         memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
3559                sizeof(struct hns3_enet_coalesce));
3560         memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
3561                sizeof(struct hns3_enet_coalesce));
3562 }
3563
3564 static void hns3_restore_coal(struct hns3_nic_priv *priv)
3565 {
3566         u16 vector_num = priv->vector_num;
3567         int i;
3568
3569         for (i = 0; i < vector_num; i++) {
3570                 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
3571                        sizeof(struct hns3_enet_coalesce));
3572                 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
3573                        sizeof(struct hns3_enet_coalesce));
3574         }
3575 }
3576
3577 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3578 {
3579         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3580         struct net_device *ndev = kinfo->netdev;
3581
3582         if (!netif_running(ndev))
3583                 return 0;
3584
3585         return hns3_nic_net_stop(ndev);
3586 }
3587
3588 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3589 {
3590         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3591         int ret = 0;
3592
3593         if (netif_running(kinfo->netdev)) {
3594                 ret = hns3_nic_net_up(kinfo->netdev);
3595                 if (ret) {
3596                         netdev_err(kinfo->netdev,
3597                                    "hns net up fail, ret=%d!\n", ret);
3598                         return ret;
3599                 }
3600                 handle->last_reset_time = jiffies;
3601         }
3602
3603         return ret;
3604 }
3605
3606 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3607 {
3608         struct net_device *netdev = handle->kinfo.netdev;
3609         struct hns3_nic_priv *priv = netdev_priv(netdev);
3610         int ret;
3611
3612         hns3_init_mac_addr(netdev, false);
3613         hns3_nic_set_rx_mode(netdev);
3614         hns3_recover_hw_addr(netdev);
3615
3616         /* Hardware table is only clear when pf resets */
3617         if (!(handle->flags & HNAE3_SUPPORT_VF))
3618                 hns3_restore_vlan(netdev);
3619
3620         hns3_restore_fd_rules(netdev);
3621
3622         /* Carrier off reporting is important to ethtool even BEFORE open */
3623         netif_carrier_off(netdev);
3624
3625         hns3_restore_coal(priv);
3626
3627         ret = hns3_nic_init_vector_data(priv);
3628         if (ret)
3629                 return ret;
3630
3631         ret = hns3_init_all_ring(priv);
3632         if (ret) {
3633                 hns3_nic_uninit_vector_data(priv);
3634                 priv->ring_data = NULL;
3635         }
3636
3637         return ret;
3638 }
3639
3640 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3641 {
3642         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
3643         struct net_device *netdev = handle->kinfo.netdev;
3644         struct hns3_nic_priv *priv = netdev_priv(netdev);
3645         int ret;
3646
3647         hns3_force_clear_all_rx_ring(handle);
3648
3649         ret = hns3_nic_uninit_vector_data(priv);
3650         if (ret) {
3651                 netdev_err(netdev, "uninit vector error\n");
3652                 return ret;
3653         }
3654
3655         hns3_store_coal(priv);
3656
3657         ret = hns3_uninit_all_ring(priv);
3658         if (ret)
3659                 netdev_err(netdev, "uninit ring error\n");
3660
3661         /* it is cumbersome for hardware to pick-and-choose entries for deletion
3662          * from table space. Hence, for function reset software intervention is
3663          * required to delete the entries
3664          */
3665         if (hns3_dev_ongoing_func_reset(ae_dev)) {
3666                 hns3_remove_hw_addr(netdev);
3667                 hns3_del_all_fd_rules(netdev, false);
3668         }
3669
3670         return ret;
3671 }
3672
3673 static int hns3_reset_notify(struct hnae3_handle *handle,
3674                              enum hnae3_reset_notify_type type)
3675 {
3676         int ret = 0;
3677
3678         switch (type) {
3679         case HNAE3_UP_CLIENT:
3680                 ret = hns3_reset_notify_up_enet(handle);
3681                 break;
3682         case HNAE3_DOWN_CLIENT:
3683                 ret = hns3_reset_notify_down_enet(handle);
3684                 break;
3685         case HNAE3_INIT_CLIENT:
3686                 ret = hns3_reset_notify_init_enet(handle);
3687                 break;
3688         case HNAE3_UNINIT_CLIENT:
3689                 ret = hns3_reset_notify_uninit_enet(handle);
3690                 break;
3691         default:
3692                 break;
3693         }
3694
3695         return ret;
3696 }
3697
3698 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
3699 {
3700         struct hns3_nic_priv *priv = netdev_priv(netdev);
3701         struct hnae3_handle *h = hns3_get_handle(netdev);
3702         int ret;
3703
3704         ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3705         if (ret)
3706                 return ret;
3707
3708         ret = hns3_get_ring_config(priv);
3709         if (ret)
3710                 return ret;
3711
3712         ret = hns3_nic_alloc_vector_data(priv);
3713         if (ret)
3714                 goto err_alloc_vector;
3715
3716         hns3_restore_coal(priv);
3717
3718         ret = hns3_nic_init_vector_data(priv);
3719         if (ret)
3720                 goto err_uninit_vector;
3721
3722         ret = hns3_init_all_ring(priv);
3723         if (ret)
3724                 goto err_put_ring;
3725
3726         return 0;
3727
3728 err_put_ring:
3729         hns3_put_ring_config(priv);
3730 err_uninit_vector:
3731         hns3_nic_uninit_vector_data(priv);
3732 err_alloc_vector:
3733         hns3_nic_dealloc_vector_data(priv);
3734         return ret;
3735 }
3736
3737 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3738 {
3739         return (new_tqp_num / num_tc) * num_tc;
3740 }
3741
3742 int hns3_set_channels(struct net_device *netdev,
3743                       struct ethtool_channels *ch)
3744 {
3745         struct hns3_nic_priv *priv = netdev_priv(netdev);
3746         struct hnae3_handle *h = hns3_get_handle(netdev);
3747         struct hnae3_knic_private_info *kinfo = &h->kinfo;
3748         bool if_running = netif_running(netdev);
3749         u32 new_tqp_num = ch->combined_count;
3750         u16 org_tqp_num;
3751         int ret;
3752
3753         if (ch->rx_count || ch->tx_count)
3754                 return -EINVAL;
3755
3756         if (new_tqp_num > hns3_get_max_available_channels(h) ||
3757             new_tqp_num < kinfo->num_tc) {
3758                 dev_err(&netdev->dev,
3759                         "Change tqps fail, the tqp range is from %d to %d",
3760                         kinfo->num_tc,
3761                         hns3_get_max_available_channels(h));
3762                 return -EINVAL;
3763         }
3764
3765         new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3766         if (kinfo->num_tqps == new_tqp_num)
3767                 return 0;
3768
3769         if (if_running)
3770                 hns3_nic_net_stop(netdev);
3771
3772         ret = hns3_nic_uninit_vector_data(priv);
3773         if (ret) {
3774                 dev_err(&netdev->dev,
3775                         "Unbind vector with tqp fail, nothing is changed");
3776                 goto open_netdev;
3777         }
3778
3779         hns3_store_coal(priv);
3780
3781         hns3_nic_dealloc_vector_data(priv);
3782
3783         hns3_uninit_all_ring(priv);
3784         hns3_put_ring_config(priv);
3785
3786         org_tqp_num = h->kinfo.num_tqps;
3787         ret = hns3_modify_tqp_num(netdev, new_tqp_num);
3788         if (ret) {
3789                 ret = hns3_modify_tqp_num(netdev, org_tqp_num);
3790                 if (ret) {
3791                         /* If revert to old tqp failed, fatal error occurred */
3792                         dev_err(&netdev->dev,
3793                                 "Revert to old tqp num fail, ret=%d", ret);
3794                         return ret;
3795                 }
3796                 dev_info(&netdev->dev,
3797                          "Change tqp num fail, Revert to old tqp num");
3798         }
3799
3800 open_netdev:
3801         if (if_running)
3802                 hns3_nic_net_open(netdev);
3803
3804         return ret;
3805 }
3806
3807 static const struct hnae3_client_ops client_ops = {
3808         .init_instance = hns3_client_init,
3809         .uninit_instance = hns3_client_uninit,
3810         .link_status_change = hns3_link_status_change,
3811         .setup_tc = hns3_client_setup_tc,
3812         .reset_notify = hns3_reset_notify,
3813 };
3814
3815 /* hns3_init_module - Driver registration routine
3816  * hns3_init_module is the first routine called when the driver is
3817  * loaded. All it does is register with the PCI subsystem.
3818  */
3819 static int __init hns3_init_module(void)
3820 {
3821         int ret;
3822
3823         pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3824         pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3825
3826         client.type = HNAE3_CLIENT_KNIC;
3827         snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3828                  hns3_driver_name);
3829
3830         client.ops = &client_ops;
3831
3832         INIT_LIST_HEAD(&client.node);
3833
3834         ret = hnae3_register_client(&client);
3835         if (ret)
3836                 return ret;
3837
3838         ret = pci_register_driver(&hns3_driver);
3839         if (ret)
3840                 hnae3_unregister_client(&client);
3841
3842         return ret;
3843 }
3844 module_init(hns3_init_module);
3845
3846 /* hns3_exit_module - Driver exit cleanup routine
3847  * hns3_exit_module is called just before the driver is removed
3848  * from memory.
3849  */
3850 static void __exit hns3_exit_module(void)
3851 {
3852         pci_unregister_driver(&hns3_driver);
3853         hnae3_unregister_client(&client);
3854 }
3855 module_exit(hns3_exit_module);
3856
3857 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3858 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3859 MODULE_LICENSE("GPL");
3860 MODULE_ALIAS("pci:hns-nic");
3861 MODULE_VERSION(HNS3_MOD_VERSION);