b12730a23c25289fbd65bf2e4a9e7e1959f2794c
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3pf / hns3_enet.c
1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
14 #include <linux/ip.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
21 #include <net/gre.h>
22 #include <net/vxlan.h>
23
24 #include "hnae3.h"
25 #include "hns3_enet.h"
26
27 const char hns3_driver_name[] = "hns3";
28 const char hns3_driver_version[] = VERMAGIC_STRING;
29 static const char hns3_driver_string[] =
30                         "Hisilicon Ethernet Network Driver for Hip08 Family";
31 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32 static struct hnae3_client client;
33
34 /* hns3_pci_tbl - PCI Device ID Table
35  *
36  * Last entry must be all 0s
37  *
38  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39  *   Class, Class Mask, private data (not used) }
40  */
41 static const struct pci_device_id hns3_pci_tbl[] = {
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
45         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49         /* required last entry */
50         {0, }
51 };
52 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
53
54 static irqreturn_t hns3_irq_handle(int irq, void *dev)
55 {
56         struct hns3_enet_tqp_vector *tqp_vector = dev;
57
58         napi_schedule(&tqp_vector->napi);
59
60         return IRQ_HANDLED;
61 }
62
63 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
64 {
65         struct hns3_enet_tqp_vector *tqp_vectors;
66         unsigned int i;
67
68         for (i = 0; i < priv->vector_num; i++) {
69                 tqp_vectors = &priv->tqp_vector[i];
70
71                 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
72                         continue;
73
74                 /* release the irq resource */
75                 free_irq(tqp_vectors->vector_irq, tqp_vectors);
76                 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
77         }
78 }
79
80 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
81 {
82         struct hns3_enet_tqp_vector *tqp_vectors;
83         int txrx_int_idx = 0;
84         int rx_int_idx = 0;
85         int tx_int_idx = 0;
86         unsigned int i;
87         int ret;
88
89         for (i = 0; i < priv->vector_num; i++) {
90                 tqp_vectors = &priv->tqp_vector[i];
91
92                 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
93                         continue;
94
95                 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
96                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
97                                  "%s-%s-%d", priv->netdev->name, "TxRx",
98                                  txrx_int_idx++);
99                         txrx_int_idx++;
100                 } else if (tqp_vectors->rx_group.ring) {
101                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
102                                  "%s-%s-%d", priv->netdev->name, "Rx",
103                                  rx_int_idx++);
104                 } else if (tqp_vectors->tx_group.ring) {
105                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
106                                  "%s-%s-%d", priv->netdev->name, "Tx",
107                                  tx_int_idx++);
108                 } else {
109                         /* Skip this unused q_vector */
110                         continue;
111                 }
112
113                 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
114
115                 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
116                                   tqp_vectors->name,
117                                        tqp_vectors);
118                 if (ret) {
119                         netdev_err(priv->netdev, "request irq(%d) fail\n",
120                                    tqp_vectors->vector_irq);
121                         return ret;
122                 }
123
124                 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
125         }
126
127         return 0;
128 }
129
130 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
131                                  u32 mask_en)
132 {
133         writel(mask_en, tqp_vector->mask_addr);
134 }
135
136 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
137 {
138         napi_enable(&tqp_vector->napi);
139
140         /* enable vector */
141         hns3_mask_vector_irq(tqp_vector, 1);
142 }
143
144 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
145 {
146         /* disable vector */
147         hns3_mask_vector_irq(tqp_vector, 0);
148
149         disable_irq(tqp_vector->vector_irq);
150         napi_disable(&tqp_vector->napi);
151 }
152
153 static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
154                                        u32 gl_value)
155 {
156         /* this defines the configuration for GL (Interrupt Gap Limiter)
157          * GL defines inter interrupt gap.
158          * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
159          */
160         writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
161         writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
162         writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
163 }
164
165 static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
166                                        u32 rl_value)
167 {
168         /* this defines the configuration for RL (Interrupt Rate Limiter).
169          * Rl defines rate of interrupts i.e. number of interrupts-per-second
170          * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
171          */
172         writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
173 }
174
175 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
176 {
177         /* initialize the configuration for interrupt coalescing.
178          * 1. GL (Interrupt Gap Limiter)
179          * 2. RL (Interrupt Rate Limiter)
180          */
181
182         /* Default :enable interrupt coalesce */
183         tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
184         tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
185         hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
186         /* for now we are disabling Interrupt RL - we
187          * will re-enable later
188          */
189         hns3_set_vector_coalesc_rl(tqp_vector, 0);
190         tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
191         tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
192 }
193
194 static int hns3_nic_net_up(struct net_device *netdev)
195 {
196         struct hns3_nic_priv *priv = netdev_priv(netdev);
197         struct hnae3_handle *h = priv->ae_handle;
198         int i, j;
199         int ret;
200
201         /* get irq resource for all vectors */
202         ret = hns3_nic_init_irq(priv);
203         if (ret) {
204                 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
205                 return ret;
206         }
207
208         /* enable the vectors */
209         for (i = 0; i < priv->vector_num; i++)
210                 hns3_vector_enable(&priv->tqp_vector[i]);
211
212         /* start the ae_dev */
213         ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
214         if (ret)
215                 goto out_start_err;
216
217         return 0;
218
219 out_start_err:
220         for (j = i - 1; j >= 0; j--)
221                 hns3_vector_disable(&priv->tqp_vector[j]);
222
223         hns3_nic_uninit_irq(priv);
224
225         return ret;
226 }
227
228 static int hns3_nic_net_open(struct net_device *netdev)
229 {
230         struct hns3_nic_priv *priv = netdev_priv(netdev);
231         struct hnae3_handle *h = priv->ae_handle;
232         int ret;
233
234         netif_carrier_off(netdev);
235
236         ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps);
237         if (ret) {
238                 netdev_err(netdev,
239                            "netif_set_real_num_tx_queues fail, ret=%d!\n",
240                            ret);
241                 return ret;
242         }
243
244         ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps);
245         if (ret) {
246                 netdev_err(netdev,
247                            "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
248                 return ret;
249         }
250
251         ret = hns3_nic_net_up(netdev);
252         if (ret) {
253                 netdev_err(netdev,
254                            "hns net up fail, ret=%d!\n", ret);
255                 return ret;
256         }
257
258         return 0;
259 }
260
261 static void hns3_nic_net_down(struct net_device *netdev)
262 {
263         struct hns3_nic_priv *priv = netdev_priv(netdev);
264         const struct hnae3_ae_ops *ops;
265         int i;
266
267         /* stop ae_dev */
268         ops = priv->ae_handle->ae_algo->ops;
269         if (ops->stop)
270                 ops->stop(priv->ae_handle);
271
272         /* disable vectors */
273         for (i = 0; i < priv->vector_num; i++)
274                 hns3_vector_disable(&priv->tqp_vector[i]);
275
276         /* free irq resources */
277         hns3_nic_uninit_irq(priv);
278 }
279
280 static int hns3_nic_net_stop(struct net_device *netdev)
281 {
282         netif_tx_stop_all_queues(netdev);
283         netif_carrier_off(netdev);
284
285         hns3_nic_net_down(netdev);
286
287         return 0;
288 }
289
290 void hns3_set_multicast_list(struct net_device *netdev)
291 {
292         struct hns3_nic_priv *priv = netdev_priv(netdev);
293         struct hnae3_handle *h = priv->ae_handle;
294         struct netdev_hw_addr *ha = NULL;
295
296         if (h->ae_algo->ops->set_mc_addr) {
297                 netdev_for_each_mc_addr(ha, netdev)
298                         if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
299                                 netdev_err(netdev, "set multicast fail\n");
300         }
301 }
302
303 static int hns3_nic_uc_sync(struct net_device *netdev,
304                             const unsigned char *addr)
305 {
306         struct hns3_nic_priv *priv = netdev_priv(netdev);
307         struct hnae3_handle *h = priv->ae_handle;
308
309         if (h->ae_algo->ops->add_uc_addr)
310                 return h->ae_algo->ops->add_uc_addr(h, addr);
311
312         return 0;
313 }
314
315 static int hns3_nic_uc_unsync(struct net_device *netdev,
316                               const unsigned char *addr)
317 {
318         struct hns3_nic_priv *priv = netdev_priv(netdev);
319         struct hnae3_handle *h = priv->ae_handle;
320
321         if (h->ae_algo->ops->rm_uc_addr)
322                 return h->ae_algo->ops->rm_uc_addr(h, addr);
323
324         return 0;
325 }
326
327 static int hns3_nic_mc_sync(struct net_device *netdev,
328                             const unsigned char *addr)
329 {
330         struct hns3_nic_priv *priv = netdev_priv(netdev);
331         struct hnae3_handle *h = priv->ae_handle;
332
333         if (h->ae_algo->ops->add_mc_addr)
334                 return h->ae_algo->ops->add_mc_addr(h, addr);
335
336         return 0;
337 }
338
339 static int hns3_nic_mc_unsync(struct net_device *netdev,
340                               const unsigned char *addr)
341 {
342         struct hns3_nic_priv *priv = netdev_priv(netdev);
343         struct hnae3_handle *h = priv->ae_handle;
344
345         if (h->ae_algo->ops->rm_mc_addr)
346                 return h->ae_algo->ops->rm_mc_addr(h, addr);
347
348         return 0;
349 }
350
351 void hns3_nic_set_rx_mode(struct net_device *netdev)
352 {
353         struct hns3_nic_priv *priv = netdev_priv(netdev);
354         struct hnae3_handle *h = priv->ae_handle;
355
356         if (h->ae_algo->ops->set_promisc_mode) {
357                 if (netdev->flags & IFF_PROMISC)
358                         h->ae_algo->ops->set_promisc_mode(h, 1);
359                 else
360                         h->ae_algo->ops->set_promisc_mode(h, 0);
361         }
362         if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
363                 netdev_err(netdev, "sync uc address fail\n");
364         if (netdev->flags & IFF_MULTICAST)
365                 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
366                         netdev_err(netdev, "sync mc address fail\n");
367 }
368
369 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
370                         u16 *mss, u32 *type_cs_vlan_tso)
371 {
372         u32 l4_offset, hdr_len;
373         union l3_hdr_info l3;
374         union l4_hdr_info l4;
375         u32 l4_paylen;
376         int ret;
377
378         if (!skb_is_gso(skb))
379                 return 0;
380
381         ret = skb_cow_head(skb, 0);
382         if (ret)
383                 return ret;
384
385         l3.hdr = skb_network_header(skb);
386         l4.hdr = skb_transport_header(skb);
387
388         /* Software should clear the IPv4's checksum field when tso is
389          * needed.
390          */
391         if (l3.v4->version == 4)
392                 l3.v4->check = 0;
393
394         /* tunnel packet.*/
395         if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
396                                          SKB_GSO_GRE_CSUM |
397                                          SKB_GSO_UDP_TUNNEL |
398                                          SKB_GSO_UDP_TUNNEL_CSUM)) {
399                 if ((!(skb_shinfo(skb)->gso_type &
400                     SKB_GSO_PARTIAL)) &&
401                     (skb_shinfo(skb)->gso_type &
402                     SKB_GSO_UDP_TUNNEL_CSUM)) {
403                         /* Software should clear the udp's checksum
404                          * field when tso is needed.
405                          */
406                         l4.udp->check = 0;
407                 }
408                 /* reset l3&l4 pointers from outer to inner headers */
409                 l3.hdr = skb_inner_network_header(skb);
410                 l4.hdr = skb_inner_transport_header(skb);
411
412                 /* Software should clear the IPv4's checksum field when
413                  * tso is needed.
414                  */
415                 if (l3.v4->version == 4)
416                         l3.v4->check = 0;
417         }
418
419         /* normal or tunnel packet*/
420         l4_offset = l4.hdr - skb->data;
421         hdr_len = (l4.tcp->doff * 4) + l4_offset;
422
423         /* remove payload length from inner pseudo checksum when tso*/
424         l4_paylen = skb->len - l4_offset;
425         csum_replace_by_diff(&l4.tcp->check,
426                              (__force __wsum)htonl(l4_paylen));
427
428         /* find the txbd field values */
429         *paylen = skb->len - hdr_len;
430         hnae_set_bit(*type_cs_vlan_tso,
431                      HNS3_TXD_TSO_B, 1);
432
433         /* get MSS for TSO */
434         *mss = skb_shinfo(skb)->gso_size;
435
436         return 0;
437 }
438
439 static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
440                                  u8 *il4_proto)
441 {
442         union {
443                 struct iphdr *v4;
444                 struct ipv6hdr *v6;
445                 unsigned char *hdr;
446         } l3;
447         unsigned char *l4_hdr;
448         unsigned char *exthdr;
449         u8 l4_proto_tmp;
450         __be16 frag_off;
451
452         /* find outer header point */
453         l3.hdr = skb_network_header(skb);
454         l4_hdr = skb_inner_transport_header(skb);
455
456         if (skb->protocol == htons(ETH_P_IPV6)) {
457                 exthdr = l3.hdr + sizeof(*l3.v6);
458                 l4_proto_tmp = l3.v6->nexthdr;
459                 if (l4_hdr != exthdr)
460                         ipv6_skip_exthdr(skb, exthdr - skb->data,
461                                          &l4_proto_tmp, &frag_off);
462         } else if (skb->protocol == htons(ETH_P_IP)) {
463                 l4_proto_tmp = l3.v4->protocol;
464         }
465
466         *ol4_proto = l4_proto_tmp;
467
468         /* tunnel packet */
469         if (!skb->encapsulation) {
470                 *il4_proto = 0;
471                 return;
472         }
473
474         /* find inner header point */
475         l3.hdr = skb_inner_network_header(skb);
476         l4_hdr = skb_inner_transport_header(skb);
477
478         if (l3.v6->version == 6) {
479                 exthdr = l3.hdr + sizeof(*l3.v6);
480                 l4_proto_tmp = l3.v6->nexthdr;
481                 if (l4_hdr != exthdr)
482                         ipv6_skip_exthdr(skb, exthdr - skb->data,
483                                          &l4_proto_tmp, &frag_off);
484         } else if (l3.v4->version == 4) {
485                 l4_proto_tmp = l3.v4->protocol;
486         }
487
488         *il4_proto = l4_proto_tmp;
489 }
490
491 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
492                                 u8 il4_proto, u32 *type_cs_vlan_tso,
493                                 u32 *ol_type_vlan_len_msec)
494 {
495         union {
496                 struct iphdr *v4;
497                 struct ipv6hdr *v6;
498                 unsigned char *hdr;
499         } l3;
500         union {
501                 struct tcphdr *tcp;
502                 struct udphdr *udp;
503                 struct gre_base_hdr *gre;
504                 unsigned char *hdr;
505         } l4;
506         unsigned char *l2_hdr;
507         u8 l4_proto = ol4_proto;
508         u32 ol2_len;
509         u32 ol3_len;
510         u32 ol4_len;
511         u32 l2_len;
512         u32 l3_len;
513
514         l3.hdr = skb_network_header(skb);
515         l4.hdr = skb_transport_header(skb);
516
517         /* compute L2 header size for normal packet, defined in 2 Bytes */
518         l2_len = l3.hdr - skb->data;
519         hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
520                        HNS3_TXD_L2LEN_S, l2_len >> 1);
521
522         /* tunnel packet*/
523         if (skb->encapsulation) {
524                 /* compute OL2 header size, defined in 2 Bytes */
525                 ol2_len = l2_len;
526                 hnae_set_field(*ol_type_vlan_len_msec,
527                                HNS3_TXD_L2LEN_M,
528                                HNS3_TXD_L2LEN_S, ol2_len >> 1);
529
530                 /* compute OL3 header size, defined in 4 Bytes */
531                 ol3_len = l4.hdr - l3.hdr;
532                 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
533                                HNS3_TXD_L3LEN_S, ol3_len >> 2);
534
535                 /* MAC in UDP, MAC in GRE (0x6558)*/
536                 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
537                         /* switch MAC header ptr from outer to inner header.*/
538                         l2_hdr = skb_inner_mac_header(skb);
539
540                         /* compute OL4 header size, defined in 4 Bytes. */
541                         ol4_len = l2_hdr - l4.hdr;
542                         hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
543                                        HNS3_TXD_L4LEN_S, ol4_len >> 2);
544
545                         /* switch IP header ptr from outer to inner header */
546                         l3.hdr = skb_inner_network_header(skb);
547
548                         /* compute inner l2 header size, defined in 2 Bytes. */
549                         l2_len = l3.hdr - l2_hdr;
550                         hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
551                                        HNS3_TXD_L2LEN_S, l2_len >> 1);
552                 } else {
553                         /* skb packet types not supported by hardware,
554                          * txbd len fild doesn't be filled.
555                          */
556                         return;
557                 }
558
559                 /* switch L4 header pointer from outer to inner */
560                 l4.hdr = skb_inner_transport_header(skb);
561
562                 l4_proto = il4_proto;
563         }
564
565         /* compute inner(/normal) L3 header size, defined in 4 Bytes */
566         l3_len = l4.hdr - l3.hdr;
567         hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
568                        HNS3_TXD_L3LEN_S, l3_len >> 2);
569
570         /* compute inner(/normal) L4 header size, defined in 4 Bytes */
571         switch (l4_proto) {
572         case IPPROTO_TCP:
573                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
574                                HNS3_TXD_L4LEN_S, l4.tcp->doff);
575                 break;
576         case IPPROTO_SCTP:
577                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
578                                HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
579                 break;
580         case IPPROTO_UDP:
581                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
582                                HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
583                 break;
584         default:
585                 /* skb packet types not supported by hardware,
586                  * txbd len fild doesn't be filled.
587                  */
588                 return;
589         }
590 }
591
592 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
593                                    u8 il4_proto, u32 *type_cs_vlan_tso,
594                                    u32 *ol_type_vlan_len_msec)
595 {
596         union {
597                 struct iphdr *v4;
598                 struct ipv6hdr *v6;
599                 unsigned char *hdr;
600         } l3;
601         u32 l4_proto = ol4_proto;
602
603         l3.hdr = skb_network_header(skb);
604
605         /* define OL3 type and tunnel type(OL4).*/
606         if (skb->encapsulation) {
607                 /* define outer network header type.*/
608                 if (skb->protocol == htons(ETH_P_IP)) {
609                         if (skb_is_gso(skb))
610                                 hnae_set_field(*ol_type_vlan_len_msec,
611                                                HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
612                                                HNS3_OL3T_IPV4_CSUM);
613                         else
614                                 hnae_set_field(*ol_type_vlan_len_msec,
615                                                HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
616                                                HNS3_OL3T_IPV4_NO_CSUM);
617
618                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
619                         hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
620                                        HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
621                 }
622
623                 /* define tunnel type(OL4).*/
624                 switch (l4_proto) {
625                 case IPPROTO_UDP:
626                         hnae_set_field(*ol_type_vlan_len_msec,
627                                        HNS3_TXD_TUNTYPE_M,
628                                        HNS3_TXD_TUNTYPE_S,
629                                        HNS3_TUN_MAC_IN_UDP);
630                         break;
631                 case IPPROTO_GRE:
632                         hnae_set_field(*ol_type_vlan_len_msec,
633                                        HNS3_TXD_TUNTYPE_M,
634                                        HNS3_TXD_TUNTYPE_S,
635                                        HNS3_TUN_NVGRE);
636                         break;
637                 default:
638                         /* drop the skb tunnel packet if hardware don't support,
639                          * because hardware can't calculate csum when TSO.
640                          */
641                         if (skb_is_gso(skb))
642                                 return -EDOM;
643
644                         /* the stack computes the IP header already,
645                          * driver calculate l4 checksum when not TSO.
646                          */
647                         skb_checksum_help(skb);
648                         return 0;
649                 }
650
651                 l3.hdr = skb_inner_network_header(skb);
652                 l4_proto = il4_proto;
653         }
654
655         if (l3.v4->version == 4) {
656                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
657                                HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
658
659                 /* the stack computes the IP header already, the only time we
660                  * need the hardware to recompute it is in the case of TSO.
661                  */
662                 if (skb_is_gso(skb))
663                         hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
664
665                 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
666         } else if (l3.v6->version == 6) {
667                 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
668                                HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
669                 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
670         }
671
672         switch (l4_proto) {
673         case IPPROTO_TCP:
674                 hnae_set_field(*type_cs_vlan_tso,
675                                HNS3_TXD_L4T_M,
676                                HNS3_TXD_L4T_S,
677                                HNS3_L4T_TCP);
678                 break;
679         case IPPROTO_UDP:
680                 hnae_set_field(*type_cs_vlan_tso,
681                                HNS3_TXD_L4T_M,
682                                HNS3_TXD_L4T_S,
683                                HNS3_L4T_UDP);
684                 break;
685         case IPPROTO_SCTP:
686                 hnae_set_field(*type_cs_vlan_tso,
687                                HNS3_TXD_L4T_M,
688                                HNS3_TXD_L4T_S,
689                                HNS3_L4T_SCTP);
690                 break;
691         default:
692                 /* drop the skb tunnel packet if hardware don't support,
693                  * because hardware can't calculate csum when TSO.
694                  */
695                 if (skb_is_gso(skb))
696                         return -EDOM;
697
698                 /* the stack computes the IP header already,
699                  * driver calculate l4 checksum when not TSO.
700                  */
701                 skb_checksum_help(skb);
702                 return 0;
703         }
704
705         return 0;
706 }
707
708 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
709 {
710         /* Config bd buffer end */
711         hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
712                        HNS3_TXD_BDTYPE_M, 0);
713         hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
714         hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
715         hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1);
716 }
717
718 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
719                           int size, dma_addr_t dma, int frag_end,
720                           enum hns_desc_type type)
721 {
722         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
723         struct hns3_desc *desc = &ring->desc[ring->next_to_use];
724         u32 ol_type_vlan_len_msec = 0;
725         u16 bdtp_fe_sc_vld_ra_ri = 0;
726         u32 type_cs_vlan_tso = 0;
727         struct sk_buff *skb;
728         u32 paylen = 0;
729         u16 mss = 0;
730         __be16 protocol;
731         u8 ol4_proto;
732         u8 il4_proto;
733         int ret;
734
735         /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
736         desc_cb->priv = priv;
737         desc_cb->length = size;
738         desc_cb->dma = dma;
739         desc_cb->type = type;
740
741         /* now, fill the descriptor */
742         desc->addr = cpu_to_le64(dma);
743         desc->tx.send_size = cpu_to_le16((u16)size);
744         hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
745         desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
746
747         if (type == DESC_TYPE_SKB) {
748                 skb = (struct sk_buff *)priv;
749                 paylen = cpu_to_le16(skb->len);
750
751                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
752                         skb_reset_mac_len(skb);
753                         protocol = skb->protocol;
754
755                         /* vlan packet*/
756                         if (protocol == htons(ETH_P_8021Q)) {
757                                 protocol = vlan_get_protocol(skb);
758                                 skb->protocol = protocol;
759                         }
760                         hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
761                         hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
762                                             &type_cs_vlan_tso,
763                                             &ol_type_vlan_len_msec);
764                         ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
765                                                       &type_cs_vlan_tso,
766                                                       &ol_type_vlan_len_msec);
767                         if (ret)
768                                 return ret;
769
770                         ret = hns3_set_tso(skb, &paylen, &mss,
771                                            &type_cs_vlan_tso);
772                         if (ret)
773                                 return ret;
774                 }
775
776                 /* Set txbd */
777                 desc->tx.ol_type_vlan_len_msec =
778                         cpu_to_le32(ol_type_vlan_len_msec);
779                 desc->tx.type_cs_vlan_tso_len =
780                         cpu_to_le32(type_cs_vlan_tso);
781                 desc->tx.paylen = cpu_to_le16(paylen);
782                 desc->tx.mss = cpu_to_le16(mss);
783         }
784
785         /* move ring pointer to next.*/
786         ring_ptr_move_fw(ring, next_to_use);
787
788         return 0;
789 }
790
791 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
792                               int size, dma_addr_t dma, int frag_end,
793                               enum hns_desc_type type)
794 {
795         unsigned int frag_buf_num;
796         unsigned int k;
797         int sizeoflast;
798         int ret;
799
800         frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
801         sizeoflast = size % HNS3_MAX_BD_SIZE;
802         sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
803
804         /* When the frag size is bigger than hardware, split this frag */
805         for (k = 0; k < frag_buf_num; k++) {
806                 ret = hns3_fill_desc(ring, priv,
807                                      (k == frag_buf_num - 1) ?
808                                 sizeoflast : HNS3_MAX_BD_SIZE,
809                                 dma + HNS3_MAX_BD_SIZE * k,
810                                 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
811                                 (type == DESC_TYPE_SKB && !k) ?
812                                         DESC_TYPE_SKB : DESC_TYPE_PAGE);
813                 if (ret)
814                         return ret;
815         }
816
817         return 0;
818 }
819
820 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
821                                    struct hns3_enet_ring *ring)
822 {
823         struct sk_buff *skb = *out_skb;
824         struct skb_frag_struct *frag;
825         int bdnum_for_frag;
826         int frag_num;
827         int buf_num;
828         int size;
829         int i;
830
831         size = skb_headlen(skb);
832         buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
833
834         frag_num = skb_shinfo(skb)->nr_frags;
835         for (i = 0; i < frag_num; i++) {
836                 frag = &skb_shinfo(skb)->frags[i];
837                 size = skb_frag_size(frag);
838                 bdnum_for_frag =
839                         (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
840                 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
841                         return -ENOMEM;
842
843                 buf_num += bdnum_for_frag;
844         }
845
846         if (buf_num > ring_space(ring))
847                 return -EBUSY;
848
849         *bnum = buf_num;
850         return 0;
851 }
852
853 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
854                                   struct hns3_enet_ring *ring)
855 {
856         struct sk_buff *skb = *out_skb;
857         int buf_num;
858
859         /* No. of segments (plus a header) */
860         buf_num = skb_shinfo(skb)->nr_frags + 1;
861
862         if (buf_num > ring_space(ring))
863                 return -EBUSY;
864
865         *bnum = buf_num;
866
867         return 0;
868 }
869
870 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
871 {
872         struct device *dev = ring_to_dev(ring);
873         unsigned int i;
874
875         for (i = 0; i < ring->desc_num; i++) {
876                 /* check if this is where we started */
877                 if (ring->next_to_use == next_to_use_orig)
878                         break;
879
880                 /* unmap the descriptor dma address */
881                 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
882                         dma_unmap_single(dev,
883                                          ring->desc_cb[ring->next_to_use].dma,
884                                         ring->desc_cb[ring->next_to_use].length,
885                                         DMA_TO_DEVICE);
886                 else
887                         dma_unmap_page(dev,
888                                        ring->desc_cb[ring->next_to_use].dma,
889                                        ring->desc_cb[ring->next_to_use].length,
890                                        DMA_TO_DEVICE);
891
892                 /* rollback one */
893                 ring_ptr_move_bw(ring, next_to_use);
894         }
895 }
896
897 static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
898                                      struct net_device *netdev)
899 {
900         struct hns3_nic_priv *priv = netdev_priv(netdev);
901         struct hns3_nic_ring_data *ring_data =
902                 &tx_ring_data(priv, skb->queue_mapping);
903         struct hns3_enet_ring *ring = ring_data->ring;
904         struct device *dev = priv->dev;
905         struct netdev_queue *dev_queue;
906         struct skb_frag_struct *frag;
907         int next_to_use_head;
908         int next_to_use_frag;
909         dma_addr_t dma;
910         int buf_num;
911         int seg_num;
912         int size;
913         int ret;
914         int i;
915
916         /* Prefetch the data used later */
917         prefetch(skb->data);
918
919         switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
920         case -EBUSY:
921                 u64_stats_update_begin(&ring->syncp);
922                 ring->stats.tx_busy++;
923                 u64_stats_update_end(&ring->syncp);
924
925                 goto out_net_tx_busy;
926         case -ENOMEM:
927                 u64_stats_update_begin(&ring->syncp);
928                 ring->stats.sw_err_cnt++;
929                 u64_stats_update_end(&ring->syncp);
930                 netdev_err(netdev, "no memory to xmit!\n");
931
932                 goto out_err_tx_ok;
933         default:
934                 break;
935         }
936
937         /* No. of segments (plus a header) */
938         seg_num = skb_shinfo(skb)->nr_frags + 1;
939         /* Fill the first part */
940         size = skb_headlen(skb);
941
942         next_to_use_head = ring->next_to_use;
943
944         dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
945         if (dma_mapping_error(dev, dma)) {
946                 netdev_err(netdev, "TX head DMA map failed\n");
947                 ring->stats.sw_err_cnt++;
948                 goto out_err_tx_ok;
949         }
950
951         ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
952                            DESC_TYPE_SKB);
953         if (ret)
954                 goto head_dma_map_err;
955
956         next_to_use_frag = ring->next_to_use;
957         /* Fill the fragments */
958         for (i = 1; i < seg_num; i++) {
959                 frag = &skb_shinfo(skb)->frags[i - 1];
960                 size = skb_frag_size(frag);
961                 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
962                 if (dma_mapping_error(dev, dma)) {
963                         netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
964                         ring->stats.sw_err_cnt++;
965                         goto frag_dma_map_err;
966                 }
967                 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
968                                     seg_num - 1 == i ? 1 : 0,
969                                     DESC_TYPE_PAGE);
970
971                 if (ret)
972                         goto frag_dma_map_err;
973         }
974
975         /* Complete translate all packets */
976         dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
977         netdev_tx_sent_queue(dev_queue, skb->len);
978
979         wmb(); /* Commit all data before submit */
980
981         hnae_queue_xmit(ring->tqp, buf_num);
982
983         return NETDEV_TX_OK;
984
985 frag_dma_map_err:
986         hns_nic_dma_unmap(ring, next_to_use_frag);
987
988 head_dma_map_err:
989         hns_nic_dma_unmap(ring, next_to_use_head);
990
991 out_err_tx_ok:
992         dev_kfree_skb_any(skb);
993         return NETDEV_TX_OK;
994
995 out_net_tx_busy:
996         netif_stop_subqueue(netdev, ring_data->queue_index);
997         smp_mb(); /* Commit all data before submit */
998
999         return NETDEV_TX_BUSY;
1000 }
1001
1002 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1003 {
1004         struct hns3_nic_priv *priv = netdev_priv(netdev);
1005         struct hnae3_handle *h = priv->ae_handle;
1006         struct sockaddr *mac_addr = p;
1007         int ret;
1008
1009         if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1010                 return -EADDRNOTAVAIL;
1011
1012         ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1013         if (ret) {
1014                 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1015                 return ret;
1016         }
1017
1018         ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1019
1020         return 0;
1021 }
1022
1023 static int hns3_nic_set_features(struct net_device *netdev,
1024                                  netdev_features_t features)
1025 {
1026         struct hns3_nic_priv *priv = netdev_priv(netdev);
1027
1028         if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1029                 priv->ops.fill_desc = hns3_fill_desc_tso;
1030                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1031         } else {
1032                 priv->ops.fill_desc = hns3_fill_desc;
1033                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1034         }
1035
1036         netdev->features = features;
1037         return 0;
1038 }
1039
1040 static void
1041 hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1042 {
1043         struct hns3_nic_priv *priv = netdev_priv(netdev);
1044         int queue_num = priv->ae_handle->kinfo.num_tqps;
1045         struct hns3_enet_ring *ring;
1046         unsigned int start;
1047         unsigned int idx;
1048         u64 tx_bytes = 0;
1049         u64 rx_bytes = 0;
1050         u64 tx_pkts = 0;
1051         u64 rx_pkts = 0;
1052
1053         for (idx = 0; idx < queue_num; idx++) {
1054                 /* fetch the tx stats */
1055                 ring = priv->ring_data[idx].ring;
1056                 do {
1057                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1058                         tx_bytes += ring->stats.tx_bytes;
1059                         tx_pkts += ring->stats.tx_pkts;
1060                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1061
1062                 /* fetch the rx stats */
1063                 ring = priv->ring_data[idx + queue_num].ring;
1064                 do {
1065                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1066                         rx_bytes += ring->stats.rx_bytes;
1067                         rx_pkts += ring->stats.rx_pkts;
1068                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1069         }
1070
1071         stats->tx_bytes = tx_bytes;
1072         stats->tx_packets = tx_pkts;
1073         stats->rx_bytes = rx_bytes;
1074         stats->rx_packets = rx_pkts;
1075
1076         stats->rx_errors = netdev->stats.rx_errors;
1077         stats->multicast = netdev->stats.multicast;
1078         stats->rx_length_errors = netdev->stats.rx_length_errors;
1079         stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1080         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1081
1082         stats->tx_errors = netdev->stats.tx_errors;
1083         stats->rx_dropped = netdev->stats.rx_dropped;
1084         stats->tx_dropped = netdev->stats.tx_dropped;
1085         stats->collisions = netdev->stats.collisions;
1086         stats->rx_over_errors = netdev->stats.rx_over_errors;
1087         stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1088         stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1089         stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1090         stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1091         stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1092         stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1093         stats->tx_window_errors = netdev->stats.tx_window_errors;
1094         stats->rx_compressed = netdev->stats.rx_compressed;
1095         stats->tx_compressed = netdev->stats.tx_compressed;
1096 }
1097
1098 static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1099                                  enum hns3_udp_tnl_type type)
1100 {
1101         struct hns3_nic_priv *priv = netdev_priv(netdev);
1102         struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1103         struct hnae3_handle *h = priv->ae_handle;
1104
1105         if (udp_tnl->used && udp_tnl->dst_port == port) {
1106                 udp_tnl->used++;
1107                 return;
1108         }
1109
1110         if (udp_tnl->used) {
1111                 netdev_warn(netdev,
1112                             "UDP tunnel [%d], port [%d] offload\n", type, port);
1113                 return;
1114         }
1115
1116         udp_tnl->dst_port = port;
1117         udp_tnl->used = 1;
1118         /* TBD send command to hardware to add port */
1119         if (h->ae_algo->ops->add_tunnel_udp)
1120                 h->ae_algo->ops->add_tunnel_udp(h, port);
1121 }
1122
1123 static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1124                                  enum hns3_udp_tnl_type type)
1125 {
1126         struct hns3_nic_priv *priv = netdev_priv(netdev);
1127         struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1128         struct hnae3_handle *h = priv->ae_handle;
1129
1130         if (!udp_tnl->used || udp_tnl->dst_port != port) {
1131                 netdev_warn(netdev,
1132                             "Invalid UDP tunnel port %d\n", port);
1133                 return;
1134         }
1135
1136         udp_tnl->used--;
1137         if (udp_tnl->used)
1138                 return;
1139
1140         udp_tnl->dst_port = 0;
1141         /* TBD send command to hardware to del port  */
1142         if (h->ae_algo->ops->del_tunnel_udp)
1143                 h->ae_algo->ops->del_tunnel_udp(h, port);
1144 }
1145
1146 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1147  * @netdev: This physical ports's netdev
1148  * @ti: Tunnel information
1149  */
1150 static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1151                                     struct udp_tunnel_info *ti)
1152 {
1153         u16 port_n = ntohs(ti->port);
1154
1155         switch (ti->type) {
1156         case UDP_TUNNEL_TYPE_VXLAN:
1157                 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1158                 break;
1159         case UDP_TUNNEL_TYPE_GENEVE:
1160                 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1161                 break;
1162         default:
1163                 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1164                 break;
1165         }
1166 }
1167
1168 static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1169                                     struct udp_tunnel_info *ti)
1170 {
1171         u16 port_n = ntohs(ti->port);
1172
1173         switch (ti->type) {
1174         case UDP_TUNNEL_TYPE_VXLAN:
1175                 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1176                 break;
1177         case UDP_TUNNEL_TYPE_GENEVE:
1178                 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1179                 break;
1180         default:
1181                 break;
1182         }
1183 }
1184
1185 static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1186 {
1187         struct hns3_nic_priv *priv = netdev_priv(netdev);
1188         struct hnae3_handle *h = priv->ae_handle;
1189         struct hnae3_knic_private_info *kinfo = &h->kinfo;
1190         unsigned int i;
1191         int ret;
1192
1193         if (tc > HNAE3_MAX_TC)
1194                 return -EINVAL;
1195
1196         if (kinfo->num_tc == tc)
1197                 return 0;
1198
1199         if (!netdev)
1200                 return -EINVAL;
1201
1202         if (!tc) {
1203                 netdev_reset_tc(netdev);
1204                 return 0;
1205         }
1206
1207         /* Set num_tc for netdev */
1208         ret = netdev_set_num_tc(netdev, tc);
1209         if (ret)
1210                 return ret;
1211
1212         /* Set per TC queues for the VSI */
1213         for (i = 0; i < HNAE3_MAX_TC; i++) {
1214                 if (kinfo->tc_info[i].enable)
1215                         netdev_set_tc_queue(netdev,
1216                                             kinfo->tc_info[i].tc,
1217                                             kinfo->tc_info[i].tqp_count,
1218                                             kinfo->tc_info[i].tqp_offset);
1219         }
1220
1221         return 0;
1222 }
1223
1224 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1225                              void *type_data)
1226 {
1227         struct tc_mqprio_qopt *mqprio = type_data;
1228
1229         if (type != TC_SETUP_MQPRIO)
1230                 return -EOPNOTSUPP;
1231
1232         return hns3_setup_tc(dev, mqprio->num_tc);
1233 }
1234
1235 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1236                                 __be16 proto, u16 vid)
1237 {
1238         struct hns3_nic_priv *priv = netdev_priv(netdev);
1239         struct hnae3_handle *h = priv->ae_handle;
1240         int ret = -EIO;
1241
1242         if (h->ae_algo->ops->set_vlan_filter)
1243                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1244
1245         return ret;
1246 }
1247
1248 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1249                                  __be16 proto, u16 vid)
1250 {
1251         struct hns3_nic_priv *priv = netdev_priv(netdev);
1252         struct hnae3_handle *h = priv->ae_handle;
1253         int ret = -EIO;
1254
1255         if (h->ae_algo->ops->set_vlan_filter)
1256                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1257
1258         return ret;
1259 }
1260
1261 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1262                                 u8 qos, __be16 vlan_proto)
1263 {
1264         struct hns3_nic_priv *priv = netdev_priv(netdev);
1265         struct hnae3_handle *h = priv->ae_handle;
1266         int ret = -EIO;
1267
1268         if (h->ae_algo->ops->set_vf_vlan_filter)
1269                 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1270                                                    qos, vlan_proto);
1271
1272         return ret;
1273 }
1274
1275 static const struct net_device_ops hns3_nic_netdev_ops = {
1276         .ndo_open               = hns3_nic_net_open,
1277         .ndo_stop               = hns3_nic_net_stop,
1278         .ndo_start_xmit         = hns3_nic_net_xmit,
1279         .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1280         .ndo_set_features       = hns3_nic_set_features,
1281         .ndo_get_stats64        = hns3_nic_get_stats64,
1282         .ndo_setup_tc           = hns3_nic_setup_tc,
1283         .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1284         .ndo_udp_tunnel_add     = hns3_nic_udp_tunnel_add,
1285         .ndo_udp_tunnel_del     = hns3_nic_udp_tunnel_del,
1286         .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1287         .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1288         .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1289 };
1290
1291 /* hns3_probe - Device initialization routine
1292  * @pdev: PCI device information struct
1293  * @ent: entry in hns3_pci_tbl
1294  *
1295  * hns3_probe initializes a PF identified by a pci_dev structure.
1296  * The OS initialization, configuring of the PF private structure,
1297  * and a hardware reset occur.
1298  *
1299  * Returns 0 on success, negative on failure
1300  */
1301 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1302 {
1303         struct hnae3_ae_dev *ae_dev;
1304         int ret;
1305
1306         ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1307                               GFP_KERNEL);
1308         if (!ae_dev) {
1309                 ret = -ENOMEM;
1310                 return ret;
1311         }
1312
1313         ae_dev->pdev = pdev;
1314         ae_dev->dev_type = HNAE3_DEV_KNIC;
1315         pci_set_drvdata(pdev, ae_dev);
1316
1317         return hnae3_register_ae_dev(ae_dev);
1318 }
1319
1320 /* hns3_remove - Device removal routine
1321  * @pdev: PCI device information struct
1322  */
1323 static void hns3_remove(struct pci_dev *pdev)
1324 {
1325         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1326
1327         hnae3_unregister_ae_dev(ae_dev);
1328
1329         devm_kfree(&pdev->dev, ae_dev);
1330
1331         pci_set_drvdata(pdev, NULL);
1332 }
1333
1334 static struct pci_driver hns3_driver = {
1335         .name     = hns3_driver_name,
1336         .id_table = hns3_pci_tbl,
1337         .probe    = hns3_probe,
1338         .remove   = hns3_remove,
1339 };
1340
1341 /* set default feature to hns3 */
1342 static void hns3_set_default_feature(struct net_device *netdev)
1343 {
1344         netdev->priv_flags |= IFF_UNICAST_FLT;
1345
1346         netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1347                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1348                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1349                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1350                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1351
1352         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1353
1354         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1355
1356         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1357                 NETIF_F_HW_VLAN_CTAG_FILTER |
1358                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1359                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1360                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1361                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1362
1363         netdev->vlan_features |=
1364                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1365                 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1366                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1367                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1368                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1369
1370         netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1371                 NETIF_F_HW_VLAN_CTAG_FILTER |
1372                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1373                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1374                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1375                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1376 }
1377
1378 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1379                              struct hns3_desc_cb *cb)
1380 {
1381         unsigned int order = hnae_page_order(ring);
1382         struct page *p;
1383
1384         p = dev_alloc_pages(order);
1385         if (!p)
1386                 return -ENOMEM;
1387
1388         cb->priv = p;
1389         cb->page_offset = 0;
1390         cb->reuse_flag = 0;
1391         cb->buf  = page_address(p);
1392         cb->length = hnae_page_size(ring);
1393         cb->type = DESC_TYPE_PAGE;
1394
1395         memset(cb->buf, 0, cb->length);
1396
1397         return 0;
1398 }
1399
1400 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1401                              struct hns3_desc_cb *cb)
1402 {
1403         if (cb->type == DESC_TYPE_SKB)
1404                 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1405         else if (!HNAE3_IS_TX_RING(ring))
1406                 put_page((struct page *)cb->priv);
1407         memset(cb, 0, sizeof(*cb));
1408 }
1409
1410 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1411 {
1412         cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1413                                cb->length, ring_to_dma_dir(ring));
1414
1415         if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1416                 return -EIO;
1417
1418         return 0;
1419 }
1420
1421 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1422                               struct hns3_desc_cb *cb)
1423 {
1424         if (cb->type == DESC_TYPE_SKB)
1425                 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1426                                  ring_to_dma_dir(ring));
1427         else
1428                 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1429                                ring_to_dma_dir(ring));
1430 }
1431
1432 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1433 {
1434         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1435         ring->desc[i].addr = 0;
1436 }
1437
1438 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1439 {
1440         struct hns3_desc_cb *cb = &ring->desc_cb[i];
1441
1442         if (!ring->desc_cb[i].dma)
1443                 return;
1444
1445         hns3_buffer_detach(ring, i);
1446         hns3_free_buffer(ring, cb);
1447 }
1448
1449 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1450 {
1451         int i;
1452
1453         for (i = 0; i < ring->desc_num; i++)
1454                 hns3_free_buffer_detach(ring, i);
1455 }
1456
1457 /* free desc along with its attached buffer */
1458 static void hns3_free_desc(struct hns3_enet_ring *ring)
1459 {
1460         hns3_free_buffers(ring);
1461
1462         dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1463                          ring->desc_num * sizeof(ring->desc[0]),
1464                          DMA_BIDIRECTIONAL);
1465         ring->desc_dma_addr = 0;
1466         kfree(ring->desc);
1467         ring->desc = NULL;
1468 }
1469
1470 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1471 {
1472         int size = ring->desc_num * sizeof(ring->desc[0]);
1473
1474         ring->desc = kzalloc(size, GFP_KERNEL);
1475         if (!ring->desc)
1476                 return -ENOMEM;
1477
1478         ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1479                                              size, DMA_BIDIRECTIONAL);
1480         if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1481                 ring->desc_dma_addr = 0;
1482                 kfree(ring->desc);
1483                 ring->desc = NULL;
1484                 return -ENOMEM;
1485         }
1486
1487         return 0;
1488 }
1489
1490 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1491                                    struct hns3_desc_cb *cb)
1492 {
1493         int ret;
1494
1495         ret = hns3_alloc_buffer(ring, cb);
1496         if (ret)
1497                 goto out;
1498
1499         ret = hns3_map_buffer(ring, cb);
1500         if (ret)
1501                 goto out_with_buf;
1502
1503         return 0;
1504
1505 out_with_buf:
1506         hns3_free_buffers(ring);
1507 out:
1508         return ret;
1509 }
1510
1511 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1512 {
1513         int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1514
1515         if (ret)
1516                 return ret;
1517
1518         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1519
1520         return 0;
1521 }
1522
1523 /* Allocate memory for raw pkg, and map with dma */
1524 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1525 {
1526         int i, j, ret;
1527
1528         for (i = 0; i < ring->desc_num; i++) {
1529                 ret = hns3_alloc_buffer_attach(ring, i);
1530                 if (ret)
1531                         goto out_buffer_fail;
1532         }
1533
1534         return 0;
1535
1536 out_buffer_fail:
1537         for (j = i - 1; j >= 0; j--)
1538                 hns3_free_buffer_detach(ring, j);
1539         return ret;
1540 }
1541
1542 /* detach a in-used buffer and replace with a reserved one  */
1543 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1544                                 struct hns3_desc_cb *res_cb)
1545 {
1546         hns3_map_buffer(ring, &ring->desc_cb[i]);
1547         ring->desc_cb[i] = *res_cb;
1548         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1549 }
1550
1551 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1552 {
1553         ring->desc_cb[i].reuse_flag = 0;
1554         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1555                 + ring->desc_cb[i].page_offset);
1556 }
1557
1558 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1559                                       int *pkts)
1560 {
1561         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1562
1563         (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1564         (*bytes) += desc_cb->length;
1565         /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1566         hns3_free_buffer_detach(ring, ring->next_to_clean);
1567
1568         ring_ptr_move_fw(ring, next_to_clean);
1569 }
1570
1571 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1572 {
1573         int u = ring->next_to_use;
1574         int c = ring->next_to_clean;
1575
1576         if (unlikely(h > ring->desc_num))
1577                 return 0;
1578
1579         return u > c ? (h > c && h <= u) : (h > c || h <= u);
1580 }
1581
1582 int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1583 {
1584         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1585         struct netdev_queue *dev_queue;
1586         int bytes, pkts;
1587         int head;
1588
1589         head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1590         rmb(); /* Make sure head is ready before touch any data */
1591
1592         if (is_ring_empty(ring) || head == ring->next_to_clean)
1593                 return 0; /* no data to poll */
1594
1595         if (!is_valid_clean_head(ring, head)) {
1596                 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1597                            ring->next_to_use, ring->next_to_clean);
1598
1599                 u64_stats_update_begin(&ring->syncp);
1600                 ring->stats.io_err_cnt++;
1601                 u64_stats_update_end(&ring->syncp);
1602                 return -EIO;
1603         }
1604
1605         bytes = 0;
1606         pkts = 0;
1607         while (head != ring->next_to_clean && budget) {
1608                 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1609                 /* Issue prefetch for next Tx descriptor */
1610                 prefetch(&ring->desc_cb[ring->next_to_clean]);
1611                 budget--;
1612         }
1613
1614         ring->tqp_vector->tx_group.total_bytes += bytes;
1615         ring->tqp_vector->tx_group.total_packets += pkts;
1616
1617         u64_stats_update_begin(&ring->syncp);
1618         ring->stats.tx_bytes += bytes;
1619         ring->stats.tx_pkts += pkts;
1620         u64_stats_update_end(&ring->syncp);
1621
1622         dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1623         netdev_tx_completed_queue(dev_queue, pkts, bytes);
1624
1625         if (unlikely(pkts && netif_carrier_ok(netdev) &&
1626                      (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1627                 /* Make sure that anybody stopping the queue after this
1628                  * sees the new next_to_clean.
1629                  */
1630                 smp_mb();
1631                 if (netif_tx_queue_stopped(dev_queue)) {
1632                         netif_tx_wake_queue(dev_queue);
1633                         ring->stats.restart_queue++;
1634                 }
1635         }
1636
1637         return !!budget;
1638 }
1639
1640 static int hns3_desc_unused(struct hns3_enet_ring *ring)
1641 {
1642         int ntc = ring->next_to_clean;
1643         int ntu = ring->next_to_use;
1644
1645         return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1646 }
1647
1648 static void
1649 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1650 {
1651         struct hns3_desc_cb *desc_cb;
1652         struct hns3_desc_cb res_cbs;
1653         int i, ret;
1654
1655         for (i = 0; i < cleand_count; i++) {
1656                 desc_cb = &ring->desc_cb[ring->next_to_use];
1657                 if (desc_cb->reuse_flag) {
1658                         u64_stats_update_begin(&ring->syncp);
1659                         ring->stats.reuse_pg_cnt++;
1660                         u64_stats_update_end(&ring->syncp);
1661
1662                         hns3_reuse_buffer(ring, ring->next_to_use);
1663                 } else {
1664                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
1665                         if (ret) {
1666                                 u64_stats_update_begin(&ring->syncp);
1667                                 ring->stats.sw_err_cnt++;
1668                                 u64_stats_update_end(&ring->syncp);
1669
1670                                 netdev_err(ring->tqp->handle->kinfo.netdev,
1671                                            "hnae reserve buffer map failed.\n");
1672                                 break;
1673                         }
1674                         hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1675                 }
1676
1677                 ring_ptr_move_fw(ring, next_to_use);
1678         }
1679
1680         wmb(); /* Make all data has been write before submit */
1681         writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1682 }
1683
1684 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1685  * @data: pointer to the start of the headers
1686  * @max: total length of section to find headers in
1687  *
1688  * This function is meant to determine the length of headers that will
1689  * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
1690  * motivation of doing this is to only perform one pull for IPv4 TCP
1691  * packets so that we can do basic things like calculating the gso_size
1692  * based on the average data per packet.
1693  */
1694 static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1695                                          unsigned int max_size)
1696 {
1697         unsigned char *network;
1698         u8 hlen;
1699
1700         /* This should never happen, but better safe than sorry */
1701         if (max_size < ETH_HLEN)
1702                 return max_size;
1703
1704         /* Initialize network frame pointer */
1705         network = data;
1706
1707         /* Set first protocol and move network header forward */
1708         network += ETH_HLEN;
1709
1710         /* Handle any vlan tag if present */
1711         if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1712                 == HNS3_RX_FLAG_VLAN_PRESENT) {
1713                 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1714                         return max_size;
1715
1716                 network += VLAN_HLEN;
1717         }
1718
1719         /* Handle L3 protocols */
1720         if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1721                 == HNS3_RX_FLAG_L3ID_IPV4) {
1722                 if ((typeof(max_size))(network - data) >
1723                     (max_size - sizeof(struct iphdr)))
1724                         return max_size;
1725
1726                 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1727                 hlen = (network[0] & 0x0F) << 2;
1728
1729                 /* Verify hlen meets minimum size requirements */
1730                 if (hlen < sizeof(struct iphdr))
1731                         return network - data;
1732
1733                 /* Record next protocol if header is present */
1734         } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1735                 == HNS3_RX_FLAG_L3ID_IPV6) {
1736                 if ((typeof(max_size))(network - data) >
1737                     (max_size - sizeof(struct ipv6hdr)))
1738                         return max_size;
1739
1740                 /* Record next protocol */
1741                 hlen = sizeof(struct ipv6hdr);
1742         } else {
1743                 return network - data;
1744         }
1745
1746         /* Relocate pointer to start of L4 header */
1747         network += hlen;
1748
1749         /* Finally sort out TCP/UDP */
1750         if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1751                 == HNS3_RX_FLAG_L4ID_TCP) {
1752                 if ((typeof(max_size))(network - data) >
1753                     (max_size - sizeof(struct tcphdr)))
1754                         return max_size;
1755
1756                 /* Access doff as a u8 to avoid unaligned access on ia64 */
1757                 hlen = (network[12] & 0xF0) >> 2;
1758
1759                 /* Verify hlen meets minimum size requirements */
1760                 if (hlen < sizeof(struct tcphdr))
1761                         return network - data;
1762
1763                 network += hlen;
1764         } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1765                 == HNS3_RX_FLAG_L4ID_UDP) {
1766                 if ((typeof(max_size))(network - data) >
1767                     (max_size - sizeof(struct udphdr)))
1768                         return max_size;
1769
1770                 network += sizeof(struct udphdr);
1771         }
1772
1773         /* If everything has gone correctly network should be the
1774          * data section of the packet and will be the end of the header.
1775          * If not then it probably represents the end of the last recognized
1776          * header.
1777          */
1778         if ((typeof(max_size))(network - data) < max_size)
1779                 return network - data;
1780         else
1781                 return max_size;
1782 }
1783
1784 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1785                                 struct hns3_enet_ring *ring, int pull_len,
1786                                 struct hns3_desc_cb *desc_cb)
1787 {
1788         struct hns3_desc *desc;
1789         int truesize, size;
1790         int last_offset;
1791         bool twobufs;
1792
1793         twobufs = ((PAGE_SIZE < 8192) &&
1794                 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1795
1796         desc = &ring->desc[ring->next_to_clean];
1797         size = le16_to_cpu(desc->rx.size);
1798
1799         if (twobufs) {
1800                 truesize = hnae_buf_size(ring);
1801         } else {
1802                 truesize = ALIGN(size, L1_CACHE_BYTES);
1803                 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1804         }
1805
1806         skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1807                         size - pull_len, truesize - pull_len);
1808
1809          /* Avoid re-using remote pages,flag default unreuse */
1810         if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1811                 return;
1812
1813         if (twobufs) {
1814                 /* If we are only owner of page we can reuse it */
1815                 if (likely(page_count(desc_cb->priv) == 1)) {
1816                         /* Flip page offset to other buffer */
1817                         desc_cb->page_offset ^= truesize;
1818
1819                         desc_cb->reuse_flag = 1;
1820                         /* bump ref count on page before it is given*/
1821                         get_page(desc_cb->priv);
1822                 }
1823                 return;
1824         }
1825
1826         /* Move offset up to the next cache line */
1827         desc_cb->page_offset += truesize;
1828
1829         if (desc_cb->page_offset <= last_offset) {
1830                 desc_cb->reuse_flag = 1;
1831                 /* Bump ref count on page before it is given*/
1832                 get_page(desc_cb->priv);
1833         }
1834 }
1835
1836 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1837                              struct hns3_desc *desc)
1838 {
1839         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1840         int l3_type, l4_type;
1841         u32 bd_base_info;
1842         int ol4_type;
1843         u32 l234info;
1844
1845         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1846         l234info = le32_to_cpu(desc->rx.l234_info);
1847
1848         skb->ip_summed = CHECKSUM_NONE;
1849
1850         skb_checksum_none_assert(skb);
1851
1852         if (!(netdev->features & NETIF_F_RXCSUM))
1853                 return;
1854
1855         /* check if hardware has done checksum */
1856         if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1857                 return;
1858
1859         if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1860                      hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1861                      hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1862                      hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1863                 netdev_err(netdev, "L3/L4 error pkt\n");
1864                 u64_stats_update_begin(&ring->syncp);
1865                 ring->stats.l3l4_csum_err++;
1866                 u64_stats_update_end(&ring->syncp);
1867
1868                 return;
1869         }
1870
1871         l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1872                                  HNS3_RXD_L3ID_S);
1873         l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1874                                  HNS3_RXD_L4ID_S);
1875
1876         ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1877         switch (ol4_type) {
1878         case HNS3_OL4_TYPE_MAC_IN_UDP:
1879         case HNS3_OL4_TYPE_NVGRE:
1880                 skb->csum_level = 1;
1881         case HNS3_OL4_TYPE_NO_TUN:
1882                 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1883                 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1884                     (l3_type == HNS3_L3_TYPE_IPV6 &&
1885                      (l4_type == HNS3_L4_TYPE_UDP ||
1886                       l4_type == HNS3_L4_TYPE_TCP ||
1887                       l4_type == HNS3_L4_TYPE_SCTP)))
1888                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1889                 break;
1890         }
1891 }
1892
1893 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1894                              struct sk_buff **out_skb, int *out_bnum)
1895 {
1896         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1897         struct hns3_desc_cb *desc_cb;
1898         struct hns3_desc *desc;
1899         struct sk_buff *skb;
1900         unsigned char *va;
1901         u32 bd_base_info;
1902         int pull_len;
1903         u32 l234info;
1904         int length;
1905         int bnum;
1906
1907         desc = &ring->desc[ring->next_to_clean];
1908         desc_cb = &ring->desc_cb[ring->next_to_clean];
1909
1910         prefetch(desc);
1911
1912         length = le16_to_cpu(desc->rx.pkt_len);
1913         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1914         l234info = le32_to_cpu(desc->rx.l234_info);
1915
1916         /* Check valid BD */
1917         if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1918                 return -EFAULT;
1919
1920         va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1921
1922         /* Prefetch first cache line of first page
1923          * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1924          * line size is 64B so need to prefetch twice to make it 128B. But in
1925          * actual we can have greater size of caches with 128B Level 1 cache
1926          * lines. In such a case, single fetch would suffice to cache in the
1927          * relevant part of the header.
1928          */
1929         prefetch(va);
1930 #if L1_CACHE_BYTES < 128
1931         prefetch(va + L1_CACHE_BYTES);
1932 #endif
1933
1934         skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1935                                         HNS3_RX_HEAD_SIZE);
1936         if (unlikely(!skb)) {
1937                 netdev_err(netdev, "alloc rx skb fail\n");
1938
1939                 u64_stats_update_begin(&ring->syncp);
1940                 ring->stats.sw_err_cnt++;
1941                 u64_stats_update_end(&ring->syncp);
1942
1943                 return -ENOMEM;
1944         }
1945
1946         prefetchw(skb->data);
1947
1948         bnum = 1;
1949         if (length <= HNS3_RX_HEAD_SIZE) {
1950                 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
1951
1952                 /* We can reuse buffer as-is, just make sure it is local */
1953                 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
1954                         desc_cb->reuse_flag = 1;
1955                 else /* This page cannot be reused so discard it */
1956                         put_page(desc_cb->priv);
1957
1958                 ring_ptr_move_fw(ring, next_to_clean);
1959         } else {
1960                 u64_stats_update_begin(&ring->syncp);
1961                 ring->stats.seg_pkt_cnt++;
1962                 u64_stats_update_end(&ring->syncp);
1963
1964                 pull_len = hns3_nic_get_headlen(va, l234info,
1965                                                 HNS3_RX_HEAD_SIZE);
1966                 memcpy(__skb_put(skb, pull_len), va,
1967                        ALIGN(pull_len, sizeof(long)));
1968
1969                 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
1970                 ring_ptr_move_fw(ring, next_to_clean);
1971
1972                 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
1973                         desc = &ring->desc[ring->next_to_clean];
1974                         desc_cb = &ring->desc_cb[ring->next_to_clean];
1975                         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1976                         hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
1977                         ring_ptr_move_fw(ring, next_to_clean);
1978                         bnum++;
1979                 }
1980         }
1981
1982         *out_bnum = bnum;
1983
1984         if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
1985                 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
1986                            ((u64 *)desc)[0], ((u64 *)desc)[1]);
1987                 u64_stats_update_begin(&ring->syncp);
1988                 ring->stats.non_vld_descs++;
1989                 u64_stats_update_end(&ring->syncp);
1990
1991                 dev_kfree_skb_any(skb);
1992                 return -EINVAL;
1993         }
1994
1995         if (unlikely((!desc->rx.pkt_len) ||
1996                      hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
1997                 netdev_err(netdev, "truncated pkt\n");
1998                 u64_stats_update_begin(&ring->syncp);
1999                 ring->stats.err_pkt_len++;
2000                 u64_stats_update_end(&ring->syncp);
2001
2002                 dev_kfree_skb_any(skb);
2003                 return -EFAULT;
2004         }
2005
2006         if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2007                 netdev_err(netdev, "L2 error pkt\n");
2008                 u64_stats_update_begin(&ring->syncp);
2009                 ring->stats.l2_err++;
2010                 u64_stats_update_end(&ring->syncp);
2011
2012                 dev_kfree_skb_any(skb);
2013                 return -EFAULT;
2014         }
2015
2016         u64_stats_update_begin(&ring->syncp);
2017         ring->stats.rx_pkts++;
2018         ring->stats.rx_bytes += skb->len;
2019         u64_stats_update_end(&ring->syncp);
2020
2021         ring->tqp_vector->rx_group.total_bytes += skb->len;
2022
2023         hns3_rx_checksum(ring, skb, desc);
2024         return 0;
2025 }
2026
2027 static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2028 {
2029 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2030         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2031         int recv_pkts, recv_bds, clean_count, err;
2032         int unused_count = hns3_desc_unused(ring);
2033         struct sk_buff *skb = NULL;
2034         int num, bnum = 0;
2035
2036         num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2037         rmb(); /* Make sure num taken effect before the other data is touched */
2038
2039         recv_pkts = 0, recv_bds = 0, clean_count = 0;
2040         num -= unused_count;
2041
2042         while (recv_pkts < budget && recv_bds < num) {
2043                 /* Reuse or realloc buffers */
2044                 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2045                         hns3_nic_alloc_rx_buffers(ring,
2046                                                   clean_count + unused_count);
2047                         clean_count = 0;
2048                         unused_count = hns3_desc_unused(ring);
2049                 }
2050
2051                 /* Poll one pkt */
2052                 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2053                 if (unlikely(!skb)) /* This fault cannot be repaired */
2054                         goto out;
2055
2056                 recv_bds += bnum;
2057                 clean_count += bnum;
2058                 if (unlikely(err)) {  /* Do jump the err */
2059                         recv_pkts++;
2060                         continue;
2061                 }
2062
2063                 /* Do update ip stack process */
2064                 skb->protocol = eth_type_trans(skb, netdev);
2065                 (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2066
2067                 recv_pkts++;
2068         }
2069
2070 out:
2071         /* Make all data has been write before submit */
2072         if (clean_count + unused_count > 0)
2073                 hns3_nic_alloc_rx_buffers(ring,
2074                                           clean_count + unused_count);
2075
2076         return recv_pkts;
2077 }
2078
2079 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2080 {
2081 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2082         enum hns3_flow_level_range new_flow_level;
2083         struct hns3_enet_tqp_vector *tqp_vector;
2084         int packets_per_secs;
2085         int bytes_per_usecs;
2086         u16 new_int_gl;
2087         int usecs;
2088
2089         if (!ring_group->int_gl)
2090                 return false;
2091
2092         if (ring_group->total_packets == 0) {
2093                 ring_group->int_gl = HNS3_INT_GL_50K;
2094                 ring_group->flow_level = HNS3_FLOW_LOW;
2095                 return true;
2096         }
2097
2098         /* Simple throttlerate management
2099          * 0-10MB/s   lower     (50000 ints/s)
2100          * 10-20MB/s   middle    (20000 ints/s)
2101          * 20-1249MB/s high      (18000 ints/s)
2102          * > 40000pps  ultra     (8000 ints/s)
2103          */
2104         new_flow_level = ring_group->flow_level;
2105         new_int_gl = ring_group->int_gl;
2106         tqp_vector = ring_group->ring->tqp_vector;
2107         usecs = (ring_group->int_gl << 1);
2108         bytes_per_usecs = ring_group->total_bytes / usecs;
2109         /* 1000000 microseconds */
2110         packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2111
2112         switch (new_flow_level) {
2113         case HNS3_FLOW_LOW:
2114                 if (bytes_per_usecs > 10)
2115                         new_flow_level = HNS3_FLOW_MID;
2116                 break;
2117         case HNS3_FLOW_MID:
2118                 if (bytes_per_usecs > 20)
2119                         new_flow_level = HNS3_FLOW_HIGH;
2120                 else if (bytes_per_usecs <= 10)
2121                         new_flow_level = HNS3_FLOW_LOW;
2122                 break;
2123         case HNS3_FLOW_HIGH:
2124         case HNS3_FLOW_ULTRA:
2125         default:
2126                 if (bytes_per_usecs <= 20)
2127                         new_flow_level = HNS3_FLOW_MID;
2128                 break;
2129         }
2130
2131         if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2132             (&tqp_vector->rx_group == ring_group))
2133                 new_flow_level = HNS3_FLOW_ULTRA;
2134
2135         switch (new_flow_level) {
2136         case HNS3_FLOW_LOW:
2137                 new_int_gl = HNS3_INT_GL_50K;
2138                 break;
2139         case HNS3_FLOW_MID:
2140                 new_int_gl = HNS3_INT_GL_20K;
2141                 break;
2142         case HNS3_FLOW_HIGH:
2143                 new_int_gl = HNS3_INT_GL_18K;
2144                 break;
2145         case HNS3_FLOW_ULTRA:
2146                 new_int_gl = HNS3_INT_GL_8K;
2147                 break;
2148         default:
2149                 break;
2150         }
2151
2152         ring_group->total_bytes = 0;
2153         ring_group->total_packets = 0;
2154         ring_group->flow_level = new_flow_level;
2155         if (new_int_gl != ring_group->int_gl) {
2156                 ring_group->int_gl = new_int_gl;
2157                 return true;
2158         }
2159         return false;
2160 }
2161
2162 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2163 {
2164         u16 rx_int_gl, tx_int_gl;
2165         bool rx, tx;
2166
2167         rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2168         tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2169         rx_int_gl = tqp_vector->rx_group.int_gl;
2170         tx_int_gl = tqp_vector->tx_group.int_gl;
2171         if (rx && tx) {
2172                 if (rx_int_gl > tx_int_gl) {
2173                         tqp_vector->tx_group.int_gl = rx_int_gl;
2174                         tqp_vector->tx_group.flow_level =
2175                                 tqp_vector->rx_group.flow_level;
2176                         hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2177                 } else {
2178                         tqp_vector->rx_group.int_gl = tx_int_gl;
2179                         tqp_vector->rx_group.flow_level =
2180                                 tqp_vector->tx_group.flow_level;
2181                         hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2182                 }
2183         }
2184 }
2185
2186 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2187 {
2188         struct hns3_enet_ring *ring;
2189         int rx_pkt_total = 0;
2190
2191         struct hns3_enet_tqp_vector *tqp_vector =
2192                 container_of(napi, struct hns3_enet_tqp_vector, napi);
2193         bool clean_complete = true;
2194         int rx_budget;
2195
2196         /* Since the actual Tx work is minimal, we can give the Tx a larger
2197          * budget and be more aggressive about cleaning up the Tx descriptors.
2198          */
2199         hns3_for_each_ring(ring, tqp_vector->tx_group) {
2200                 if (!hns3_clean_tx_ring(ring, budget))
2201                         clean_complete = false;
2202         }
2203
2204         /* make sure rx ring budget not smaller than 1 */
2205         rx_budget = max(budget / tqp_vector->num_tqps, 1);
2206
2207         hns3_for_each_ring(ring, tqp_vector->rx_group) {
2208                 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2209
2210                 if (rx_cleaned >= rx_budget)
2211                         clean_complete = false;
2212
2213                 rx_pkt_total += rx_cleaned;
2214         }
2215
2216         tqp_vector->rx_group.total_packets += rx_pkt_total;
2217
2218         if (!clean_complete)
2219                 return budget;
2220
2221         napi_complete(napi);
2222         hns3_update_new_int_gl(tqp_vector);
2223         hns3_mask_vector_irq(tqp_vector, 1);
2224
2225         return rx_pkt_total;
2226 }
2227
2228 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2229                                       struct hnae3_ring_chain_node *head)
2230 {
2231         struct pci_dev *pdev = tqp_vector->handle->pdev;
2232         struct hnae3_ring_chain_node *cur_chain = head;
2233         struct hnae3_ring_chain_node *chain;
2234         struct hns3_enet_ring *tx_ring;
2235         struct hns3_enet_ring *rx_ring;
2236
2237         tx_ring = tqp_vector->tx_group.ring;
2238         if (tx_ring) {
2239                 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2240                 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2241                              HNAE3_RING_TYPE_TX);
2242
2243                 cur_chain->next = NULL;
2244
2245                 while (tx_ring->next) {
2246                         tx_ring = tx_ring->next;
2247
2248                         chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2249                                              GFP_KERNEL);
2250                         if (!chain)
2251                                 return -ENOMEM;
2252
2253                         cur_chain->next = chain;
2254                         chain->tqp_index = tx_ring->tqp->tqp_index;
2255                         hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2256                                      HNAE3_RING_TYPE_TX);
2257
2258                         cur_chain = chain;
2259                 }
2260         }
2261
2262         rx_ring = tqp_vector->rx_group.ring;
2263         if (!tx_ring && rx_ring) {
2264                 cur_chain->next = NULL;
2265                 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2266                 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2267                              HNAE3_RING_TYPE_RX);
2268
2269                 rx_ring = rx_ring->next;
2270         }
2271
2272         while (rx_ring) {
2273                 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2274                 if (!chain)
2275                         return -ENOMEM;
2276
2277                 cur_chain->next = chain;
2278                 chain->tqp_index = rx_ring->tqp->tqp_index;
2279                 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2280                              HNAE3_RING_TYPE_RX);
2281                 cur_chain = chain;
2282
2283                 rx_ring = rx_ring->next;
2284         }
2285
2286         return 0;
2287 }
2288
2289 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2290                                         struct hnae3_ring_chain_node *head)
2291 {
2292         struct pci_dev *pdev = tqp_vector->handle->pdev;
2293         struct hnae3_ring_chain_node *chain_tmp, *chain;
2294
2295         chain = head->next;
2296
2297         while (chain) {
2298                 chain_tmp = chain->next;
2299                 devm_kfree(&pdev->dev, chain);
2300                 chain = chain_tmp;
2301         }
2302 }
2303
2304 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2305                                    struct hns3_enet_ring *ring)
2306 {
2307         ring->next = group->ring;
2308         group->ring = ring;
2309
2310         group->count++;
2311 }
2312
2313 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2314 {
2315         struct hnae3_ring_chain_node vector_ring_chain;
2316         struct hnae3_handle *h = priv->ae_handle;
2317         struct hns3_enet_tqp_vector *tqp_vector;
2318         struct hnae3_vector_info *vector;
2319         struct pci_dev *pdev = h->pdev;
2320         u16 tqp_num = h->kinfo.num_tqps;
2321         u16 vector_num;
2322         int ret = 0;
2323         u16 i;
2324
2325         /* RSS size, cpu online and vector_num should be the same */
2326         /* Should consider 2p/4p later */
2327         vector_num = min_t(u16, num_online_cpus(), tqp_num);
2328         vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2329                               GFP_KERNEL);
2330         if (!vector)
2331                 return -ENOMEM;
2332
2333         vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2334
2335         priv->vector_num = vector_num;
2336         priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2337                 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2338                              GFP_KERNEL);
2339         if (!priv->tqp_vector)
2340                 return -ENOMEM;
2341
2342         for (i = 0; i < tqp_num; i++) {
2343                 u16 vector_i = i % vector_num;
2344
2345                 tqp_vector = &priv->tqp_vector[vector_i];
2346
2347                 hns3_add_ring_to_group(&tqp_vector->tx_group,
2348                                        priv->ring_data[i].ring);
2349
2350                 hns3_add_ring_to_group(&tqp_vector->rx_group,
2351                                        priv->ring_data[i + tqp_num].ring);
2352
2353                 tqp_vector->idx = vector_i;
2354                 tqp_vector->mask_addr = vector[vector_i].io_addr;
2355                 tqp_vector->vector_irq = vector[vector_i].vector;
2356                 tqp_vector->num_tqps++;
2357
2358                 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2359                 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2360         }
2361
2362         for (i = 0; i < vector_num; i++) {
2363                 tqp_vector = &priv->tqp_vector[i];
2364
2365                 tqp_vector->rx_group.total_bytes = 0;
2366                 tqp_vector->rx_group.total_packets = 0;
2367                 tqp_vector->tx_group.total_bytes = 0;
2368                 tqp_vector->tx_group.total_packets = 0;
2369                 hns3_vector_gl_rl_init(tqp_vector);
2370                 tqp_vector->handle = h;
2371
2372                 ret = hns3_get_vector_ring_chain(tqp_vector,
2373                                                  &vector_ring_chain);
2374                 if (ret)
2375                         goto out;
2376
2377                 ret = h->ae_algo->ops->map_ring_to_vector(h,
2378                         tqp_vector->vector_irq, &vector_ring_chain);
2379                 if (ret)
2380                         goto out;
2381
2382                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2383
2384                 netif_napi_add(priv->netdev, &tqp_vector->napi,
2385                                hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2386         }
2387
2388 out:
2389         devm_kfree(&pdev->dev, vector);
2390         return ret;
2391 }
2392
2393 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2394 {
2395         struct hnae3_ring_chain_node vector_ring_chain;
2396         struct hnae3_handle *h = priv->ae_handle;
2397         struct hns3_enet_tqp_vector *tqp_vector;
2398         struct pci_dev *pdev = h->pdev;
2399         int i, ret;
2400
2401         for (i = 0; i < priv->vector_num; i++) {
2402                 tqp_vector = &priv->tqp_vector[i];
2403
2404                 ret = hns3_get_vector_ring_chain(tqp_vector,
2405                                                  &vector_ring_chain);
2406                 if (ret)
2407                         return ret;
2408
2409                 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2410                         tqp_vector->vector_irq, &vector_ring_chain);
2411                 if (ret)
2412                         return ret;
2413
2414                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2415
2416                 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2417                         (void)irq_set_affinity_hint(
2418                                 priv->tqp_vector[i].vector_irq,
2419                                                     NULL);
2420                         devm_free_irq(&pdev->dev,
2421                                       priv->tqp_vector[i].vector_irq,
2422                                       &priv->tqp_vector[i]);
2423                 }
2424
2425                 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2426
2427                 netif_napi_del(&priv->tqp_vector[i].napi);
2428         }
2429
2430         devm_kfree(&pdev->dev, priv->tqp_vector);
2431
2432         return 0;
2433 }
2434
2435 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2436                              int ring_type)
2437 {
2438         struct hns3_nic_ring_data *ring_data = priv->ring_data;
2439         int queue_num = priv->ae_handle->kinfo.num_tqps;
2440         struct pci_dev *pdev = priv->ae_handle->pdev;
2441         struct hns3_enet_ring *ring;
2442
2443         ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2444         if (!ring)
2445                 return -ENOMEM;
2446
2447         if (ring_type == HNAE3_RING_TYPE_TX) {
2448                 ring_data[q->tqp_index].ring = ring;
2449                 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2450         } else {
2451                 ring_data[q->tqp_index + queue_num].ring = ring;
2452                 ring->io_base = q->io_base;
2453         }
2454
2455         hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2456
2457         ring_data[q->tqp_index].queue_index = q->tqp_index;
2458
2459         ring->tqp = q;
2460         ring->desc = NULL;
2461         ring->desc_cb = NULL;
2462         ring->dev = priv->dev;
2463         ring->desc_dma_addr = 0;
2464         ring->buf_size = q->buf_size;
2465         ring->desc_num = q->desc_num;
2466         ring->next_to_use = 0;
2467         ring->next_to_clean = 0;
2468
2469         return 0;
2470 }
2471
2472 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2473                               struct hns3_nic_priv *priv)
2474 {
2475         int ret;
2476
2477         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2478         if (ret)
2479                 return ret;
2480
2481         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2482         if (ret)
2483                 return ret;
2484
2485         return 0;
2486 }
2487
2488 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2489 {
2490         struct hnae3_handle *h = priv->ae_handle;
2491         struct pci_dev *pdev = h->pdev;
2492         int i, ret;
2493
2494         priv->ring_data =  devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2495                                         sizeof(*priv->ring_data) * 2,
2496                                         GFP_KERNEL);
2497         if (!priv->ring_data)
2498                 return -ENOMEM;
2499
2500         for (i = 0; i < h->kinfo.num_tqps; i++) {
2501                 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2502                 if (ret)
2503                         goto err;
2504         }
2505
2506         return 0;
2507 err:
2508         devm_kfree(&pdev->dev, priv->ring_data);
2509         return ret;
2510 }
2511
2512 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2513 {
2514         int ret;
2515
2516         if (ring->desc_num <= 0 || ring->buf_size <= 0)
2517                 return -EINVAL;
2518
2519         ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2520                                 GFP_KERNEL);
2521         if (!ring->desc_cb) {
2522                 ret = -ENOMEM;
2523                 goto out;
2524         }
2525
2526         ret = hns3_alloc_desc(ring);
2527         if (ret)
2528                 goto out_with_desc_cb;
2529
2530         if (!HNAE3_IS_TX_RING(ring)) {
2531                 ret = hns3_alloc_ring_buffers(ring);
2532                 if (ret)
2533                         goto out_with_desc;
2534         }
2535
2536         return 0;
2537
2538 out_with_desc:
2539         hns3_free_desc(ring);
2540 out_with_desc_cb:
2541         kfree(ring->desc_cb);
2542         ring->desc_cb = NULL;
2543 out:
2544         return ret;
2545 }
2546
2547 static void hns3_fini_ring(struct hns3_enet_ring *ring)
2548 {
2549         hns3_free_desc(ring);
2550         kfree(ring->desc_cb);
2551         ring->desc_cb = NULL;
2552         ring->next_to_clean = 0;
2553         ring->next_to_use = 0;
2554 }
2555
2556 int hns3_buf_size2type(u32 buf_size)
2557 {
2558         int bd_size_type;
2559
2560         switch (buf_size) {
2561         case 512:
2562                 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2563                 break;
2564         case 1024:
2565                 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2566                 break;
2567         case 2048:
2568                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2569                 break;
2570         case 4096:
2571                 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2572                 break;
2573         default:
2574                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2575         }
2576
2577         return bd_size_type;
2578 }
2579
2580 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2581 {
2582         dma_addr_t dma = ring->desc_dma_addr;
2583         struct hnae3_queue *q = ring->tqp;
2584
2585         if (!HNAE3_IS_TX_RING(ring)) {
2586                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2587                                (u32)dma);
2588                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2589                                (u32)((dma >> 31) >> 1));
2590
2591                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2592                                hns3_buf_size2type(ring->buf_size));
2593                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2594                                ring->desc_num / 8 - 1);
2595
2596         } else {
2597                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2598                                (u32)dma);
2599                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2600                                (u32)((dma >> 31) >> 1));
2601
2602                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2603                                hns3_buf_size2type(ring->buf_size));
2604                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2605                                ring->desc_num / 8 - 1);
2606         }
2607 }
2608
2609 static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2610 {
2611         struct hnae3_handle *h = priv->ae_handle;
2612         int ring_num = h->kinfo.num_tqps * 2;
2613         int i, j;
2614         int ret;
2615
2616         for (i = 0; i < ring_num; i++) {
2617                 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2618                 if (ret) {
2619                         dev_err(priv->dev,
2620                                 "Alloc ring memory fail! ret=%d\n", ret);
2621                         goto out_when_alloc_ring_memory;
2622                 }
2623
2624                 hns3_init_ring_hw(priv->ring_data[i].ring);
2625
2626                 u64_stats_init(&priv->ring_data[i].ring->syncp);
2627         }
2628
2629         return 0;
2630
2631 out_when_alloc_ring_memory:
2632         for (j = i - 1; j >= 0; j--)
2633                 hns3_fini_ring(priv->ring_data[i].ring);
2634
2635         return -ENOMEM;
2636 }
2637
2638 static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2639 {
2640         struct hnae3_handle *h = priv->ae_handle;
2641         int i;
2642
2643         for (i = 0; i < h->kinfo.num_tqps; i++) {
2644                 if (h->ae_algo->ops->reset_queue)
2645                         h->ae_algo->ops->reset_queue(h, i);
2646
2647                 hns3_fini_ring(priv->ring_data[i].ring);
2648                 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2649         }
2650
2651         return 0;
2652 }
2653
2654 /* Set mac addr if it is configured. or leave it to the AE driver */
2655 static void hns3_init_mac_addr(struct net_device *netdev)
2656 {
2657         struct hns3_nic_priv *priv = netdev_priv(netdev);
2658         struct hnae3_handle *h = priv->ae_handle;
2659         u8 mac_addr_temp[ETH_ALEN];
2660
2661         if (h->ae_algo->ops->get_mac_addr) {
2662                 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2663                 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2664         }
2665
2666         /* Check if the MAC address is valid, if not get a random one */
2667         if (!is_valid_ether_addr(netdev->dev_addr)) {
2668                 eth_hw_addr_random(netdev);
2669                 dev_warn(priv->dev, "using random MAC address %pM\n",
2670                          netdev->dev_addr);
2671                 /* Also copy this new MAC address into hdev */
2672                 if (h->ae_algo->ops->set_mac_addr)
2673                         h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2674         }
2675 }
2676
2677 static void hns3_nic_set_priv_ops(struct net_device *netdev)
2678 {
2679         struct hns3_nic_priv *priv = netdev_priv(netdev);
2680
2681         if ((netdev->features & NETIF_F_TSO) ||
2682             (netdev->features & NETIF_F_TSO6)) {
2683                 priv->ops.fill_desc = hns3_fill_desc_tso;
2684                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2685         } else {
2686                 priv->ops.fill_desc = hns3_fill_desc;
2687                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2688         }
2689 }
2690
2691 static int hns3_client_init(struct hnae3_handle *handle)
2692 {
2693         struct pci_dev *pdev = handle->pdev;
2694         struct hns3_nic_priv *priv;
2695         struct net_device *netdev;
2696         int ret;
2697
2698         netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2699                                    handle->kinfo.num_tqps);
2700         if (!netdev)
2701                 return -ENOMEM;
2702
2703         priv = netdev_priv(netdev);
2704         priv->dev = &pdev->dev;
2705         priv->netdev = netdev;
2706         priv->ae_handle = handle;
2707
2708         handle->kinfo.netdev = netdev;
2709         handle->priv = (void *)priv;
2710
2711         hns3_init_mac_addr(netdev);
2712
2713         hns3_set_default_feature(netdev);
2714
2715         netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2716         netdev->priv_flags |= IFF_UNICAST_FLT;
2717         netdev->netdev_ops = &hns3_nic_netdev_ops;
2718         SET_NETDEV_DEV(netdev, &pdev->dev);
2719         hns3_ethtool_set_ops(netdev);
2720         hns3_nic_set_priv_ops(netdev);
2721
2722         /* Carrier off reporting is important to ethtool even BEFORE open */
2723         netif_carrier_off(netdev);
2724
2725         ret = hns3_get_ring_config(priv);
2726         if (ret) {
2727                 ret = -ENOMEM;
2728                 goto out_get_ring_cfg;
2729         }
2730
2731         ret = hns3_nic_init_vector_data(priv);
2732         if (ret) {
2733                 ret = -ENOMEM;
2734                 goto out_init_vector_data;
2735         }
2736
2737         ret = hns3_init_all_ring(priv);
2738         if (ret) {
2739                 ret = -ENOMEM;
2740                 goto out_init_ring_data;
2741         }
2742
2743         ret = register_netdev(netdev);
2744         if (ret) {
2745                 dev_err(priv->dev, "probe register netdev fail!\n");
2746                 goto out_reg_netdev_fail;
2747         }
2748
2749         return ret;
2750
2751 out_reg_netdev_fail:
2752 out_init_ring_data:
2753         (void)hns3_nic_uninit_vector_data(priv);
2754         priv->ring_data = NULL;
2755 out_init_vector_data:
2756 out_get_ring_cfg:
2757         priv->ae_handle = NULL;
2758         free_netdev(netdev);
2759         return ret;
2760 }
2761
2762 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2763 {
2764         struct net_device *netdev = handle->kinfo.netdev;
2765         struct hns3_nic_priv *priv = netdev_priv(netdev);
2766         int ret;
2767
2768         if (netdev->reg_state != NETREG_UNINITIALIZED)
2769                 unregister_netdev(netdev);
2770
2771         ret = hns3_nic_uninit_vector_data(priv);
2772         if (ret)
2773                 netdev_err(netdev, "uninit vector error\n");
2774
2775         ret = hns3_uninit_all_ring(priv);
2776         if (ret)
2777                 netdev_err(netdev, "uninit ring error\n");
2778
2779         priv->ring_data = NULL;
2780
2781         free_netdev(netdev);
2782 }
2783
2784 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2785 {
2786         struct net_device *netdev = handle->kinfo.netdev;
2787
2788         if (!netdev)
2789                 return;
2790
2791         if (linkup) {
2792                 netif_carrier_on(netdev);
2793                 netif_tx_wake_all_queues(netdev);
2794                 netdev_info(netdev, "link up\n");
2795         } else {
2796                 netif_carrier_off(netdev);
2797                 netif_tx_stop_all_queues(netdev);
2798                 netdev_info(netdev, "link down\n");
2799         }
2800 }
2801
2802 const struct hnae3_client_ops client_ops = {
2803         .init_instance = hns3_client_init,
2804         .uninit_instance = hns3_client_uninit,
2805         .link_status_change = hns3_link_status_change,
2806 };
2807
2808 /* hns3_init_module - Driver registration routine
2809  * hns3_init_module is the first routine called when the driver is
2810  * loaded. All it does is register with the PCI subsystem.
2811  */
2812 static int __init hns3_init_module(void)
2813 {
2814         int ret;
2815
2816         pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2817         pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2818
2819         client.type = HNAE3_CLIENT_KNIC;
2820         snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2821                  hns3_driver_name);
2822
2823         client.ops = &client_ops;
2824
2825         ret = hnae3_register_client(&client);
2826         if (ret)
2827                 return ret;
2828
2829         ret = pci_register_driver(&hns3_driver);
2830         if (ret)
2831                 hnae3_unregister_client(&client);
2832
2833         return ret;
2834 }
2835 module_init(hns3_init_module);
2836
2837 /* hns3_exit_module - Driver exit cleanup routine
2838  * hns3_exit_module is called just before the driver is removed
2839  * from memory.
2840  */
2841 static void __exit hns3_exit_module(void)
2842 {
2843         pci_unregister_driver(&hns3_driver);
2844         hnae3_unregister_client(&client);
2845 }
2846 module_exit(hns3_exit_module);
2847
2848 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2849 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2850 MODULE_LICENSE("GPL");
2851 MODULE_ALIAS("pci:hns-nic");