net: hns3: Unify the prefix of vf functions
[linux-2.6-microblaze.git] / drivers / net / ethernet / hisilicon / hns3 / hns3vf / hclgevf_main.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/etherdevice.h>
5 #include <net/rtnetlink.h>
6 #include "hclgevf_cmd.h"
7 #include "hclgevf_main.h"
8 #include "hclge_mbx.h"
9 #include "hnae3.h"
10
11 #define HCLGEVF_NAME    "hclgevf"
12
13 static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
14 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
15 static struct hnae3_ae_algo ae_algovf;
16
17 static const struct pci_device_id ae_algovf_pci_tbl[] = {
18         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
19         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
20         /* required last entry */
21         {0, }
22 };
23
24 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
25
26 static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
27         struct hnae3_handle *handle)
28 {
29         return container_of(handle, struct hclgevf_dev, nic);
30 }
31
32 static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
33 {
34         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
35         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
36         struct hclgevf_desc desc;
37         struct hclgevf_tqp *tqp;
38         int status;
39         int i;
40
41         for (i = 0; i < kinfo->num_tqps; i++) {
42                 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
43                 hclgevf_cmd_setup_basic_desc(&desc,
44                                              HCLGEVF_OPC_QUERY_RX_STATUS,
45                                              true);
46
47                 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
48                 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
49                 if (status) {
50                         dev_err(&hdev->pdev->dev,
51                                 "Query tqp stat fail, status = %d,queue = %d\n",
52                                 status, i);
53                         return status;
54                 }
55                 tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
56                         le32_to_cpu(desc.data[1]);
57
58                 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
59                                              true);
60
61                 desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
62                 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
63                 if (status) {
64                         dev_err(&hdev->pdev->dev,
65                                 "Query tqp stat fail, status = %d,queue = %d\n",
66                                 status, i);
67                         return status;
68                 }
69                 tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
70                         le32_to_cpu(desc.data[1]);
71         }
72
73         return 0;
74 }
75
76 static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
77 {
78         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
79         struct hclgevf_tqp *tqp;
80         u64 *buff = data;
81         int i;
82
83         for (i = 0; i < kinfo->num_tqps; i++) {
84                 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
85                 *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
86         }
87         for (i = 0; i < kinfo->num_tqps; i++) {
88                 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
89                 *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
90         }
91
92         return buff;
93 }
94
95 static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
96 {
97         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
98
99         return kinfo->num_tqps * 2;
100 }
101
102 static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
103 {
104         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
105         u8 *buff = data;
106         int i = 0;
107
108         for (i = 0; i < kinfo->num_tqps; i++) {
109                 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
110                                                        struct hclgevf_tqp, q);
111                 snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
112                          tqp->index);
113                 buff += ETH_GSTRING_LEN;
114         }
115
116         for (i = 0; i < kinfo->num_tqps; i++) {
117                 struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
118                                                        struct hclgevf_tqp, q);
119                 snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
120                          tqp->index);
121                 buff += ETH_GSTRING_LEN;
122         }
123
124         return buff;
125 }
126
127 static void hclgevf_update_stats(struct hnae3_handle *handle,
128                                  struct net_device_stats *net_stats)
129 {
130         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
131         int status;
132
133         status = hclgevf_tqps_update_stats(handle);
134         if (status)
135                 dev_err(&hdev->pdev->dev,
136                         "VF update of TQPS stats fail, status = %d.\n",
137                         status);
138 }
139
140 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
141 {
142         if (strset == ETH_SS_TEST)
143                 return -EOPNOTSUPP;
144         else if (strset == ETH_SS_STATS)
145                 return hclgevf_tqps_get_sset_count(handle, strset);
146
147         return 0;
148 }
149
150 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
151                                 u8 *data)
152 {
153         u8 *p = (char *)data;
154
155         if (strset == ETH_SS_STATS)
156                 p = hclgevf_tqps_get_strings(handle, p);
157 }
158
159 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
160 {
161         hclgevf_tqps_get_stats(handle, data);
162 }
163
164 static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
165 {
166         u8 resp_msg;
167         int status;
168
169         status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_TCINFO, 0, NULL, 0,
170                                       true, &resp_msg, sizeof(u8));
171         if (status) {
172                 dev_err(&hdev->pdev->dev,
173                         "VF request to get TC info from PF failed %d",
174                         status);
175                 return status;
176         }
177
178         hdev->hw_tc_map = resp_msg;
179
180         return 0;
181 }
182
183 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
184 {
185 #define HCLGEVF_TQPS_RSS_INFO_LEN       8
186         u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
187         int status;
188
189         status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QINFO, 0, NULL, 0,
190                                       true, resp_msg,
191                                       HCLGEVF_TQPS_RSS_INFO_LEN);
192         if (status) {
193                 dev_err(&hdev->pdev->dev,
194                         "VF request to get tqp info from PF failed %d",
195                         status);
196                 return status;
197         }
198
199         memcpy(&hdev->num_tqps, &resp_msg[0], sizeof(u16));
200         memcpy(&hdev->rss_size_max, &resp_msg[2], sizeof(u16));
201         memcpy(&hdev->num_desc, &resp_msg[4], sizeof(u16));
202         memcpy(&hdev->rx_buf_len, &resp_msg[6], sizeof(u16));
203
204         return 0;
205 }
206
207 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
208 {
209         struct hclgevf_tqp *tqp;
210         int i;
211
212         /* if this is on going reset then we need to re-allocate the TPQs
213          * since we cannot assume we would get same number of TPQs back from PF
214          */
215         if (hclgevf_dev_ongoing_reset(hdev))
216                 devm_kfree(&hdev->pdev->dev, hdev->htqp);
217
218         hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
219                                   sizeof(struct hclgevf_tqp), GFP_KERNEL);
220         if (!hdev->htqp)
221                 return -ENOMEM;
222
223         tqp = hdev->htqp;
224
225         for (i = 0; i < hdev->num_tqps; i++) {
226                 tqp->dev = &hdev->pdev->dev;
227                 tqp->index = i;
228
229                 tqp->q.ae_algo = &ae_algovf;
230                 tqp->q.buf_size = hdev->rx_buf_len;
231                 tqp->q.desc_num = hdev->num_desc;
232                 tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
233                         i * HCLGEVF_TQP_REG_SIZE;
234
235                 tqp++;
236         }
237
238         return 0;
239 }
240
241 static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
242 {
243         struct hnae3_handle *nic = &hdev->nic;
244         struct hnae3_knic_private_info *kinfo;
245         u16 new_tqps = hdev->num_tqps;
246         int i;
247
248         kinfo = &nic->kinfo;
249         kinfo->num_tc = 0;
250         kinfo->num_desc = hdev->num_desc;
251         kinfo->rx_buf_len = hdev->rx_buf_len;
252         for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
253                 if (hdev->hw_tc_map & BIT(i))
254                         kinfo->num_tc++;
255
256         kinfo->rss_size
257                 = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
258         new_tqps = kinfo->rss_size * kinfo->num_tc;
259         kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
260
261         /* if this is on going reset then we need to re-allocate the hnae queues
262          * as well since number of TPQs from PF might have changed.
263          */
264         if (hclgevf_dev_ongoing_reset(hdev))
265                 devm_kfree(&hdev->pdev->dev, kinfo->tqp);
266
267         kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
268                                   sizeof(struct hnae3_queue *), GFP_KERNEL);
269         if (!kinfo->tqp)
270                 return -ENOMEM;
271
272         for (i = 0; i < kinfo->num_tqps; i++) {
273                 hdev->htqp[i].q.handle = &hdev->nic;
274                 hdev->htqp[i].q.tqp_index = i;
275                 kinfo->tqp[i] = &hdev->htqp[i].q;
276         }
277
278         return 0;
279 }
280
281 static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
282 {
283         int status;
284         u8 resp_msg;
285
286         status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_LINK_STATUS, 0, NULL,
287                                       0, false, &resp_msg, sizeof(u8));
288         if (status)
289                 dev_err(&hdev->pdev->dev,
290                         "VF failed to fetch link status(%d) from PF", status);
291 }
292
293 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
294 {
295         struct hnae3_handle *handle = &hdev->nic;
296         struct hnae3_client *client;
297
298         client = handle->client;
299
300         link_state =
301                 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
302
303         if (link_state != hdev->hw.mac.link) {
304                 client->ops->link_status_change(handle, !!link_state);
305                 hdev->hw.mac.link = link_state;
306         }
307 }
308
309 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
310 {
311         struct hnae3_handle *nic = &hdev->nic;
312         int ret;
313
314         nic->ae_algo = &ae_algovf;
315         nic->pdev = hdev->pdev;
316         nic->numa_node_mask = hdev->numa_node_mask;
317         nic->flags |= HNAE3_SUPPORT_VF;
318
319         if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) {
320                 dev_err(&hdev->pdev->dev, "unsupported device type %d\n",
321                         hdev->ae_dev->dev_type);
322                 return -EINVAL;
323         }
324
325         ret = hclgevf_knic_setup(hdev);
326         if (ret)
327                 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
328                         ret);
329         return ret;
330 }
331
332 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
333 {
334         if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
335                 dev_warn(&hdev->pdev->dev,
336                          "vector(vector_id %d) has been freed.\n", vector_id);
337                 return;
338         }
339
340         hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
341         hdev->num_msi_left += 1;
342         hdev->num_msi_used -= 1;
343 }
344
345 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
346                               struct hnae3_vector_info *vector_info)
347 {
348         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
349         struct hnae3_vector_info *vector = vector_info;
350         int alloc = 0;
351         int i, j;
352
353         vector_num = min(hdev->num_msi_left, vector_num);
354
355         for (j = 0; j < vector_num; j++) {
356                 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
357                         if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
358                                 vector->vector = pci_irq_vector(hdev->pdev, i);
359                                 vector->io_addr = hdev->hw.io_base +
360                                         HCLGEVF_VECTOR_REG_BASE +
361                                         (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
362                                 hdev->vector_status[i] = 0;
363                                 hdev->vector_irq[i] = vector->vector;
364
365                                 vector++;
366                                 alloc++;
367
368                                 break;
369                         }
370                 }
371         }
372         hdev->num_msi_left -= alloc;
373         hdev->num_msi_used += alloc;
374
375         return alloc;
376 }
377
378 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
379 {
380         int i;
381
382         for (i = 0; i < hdev->num_msi; i++)
383                 if (vector == hdev->vector_irq[i])
384                         return i;
385
386         return -EINVAL;
387 }
388
389 static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
390 {
391         return HCLGEVF_RSS_KEY_SIZE;
392 }
393
394 static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
395 {
396         return HCLGEVF_RSS_IND_TBL_SIZE;
397 }
398
399 static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
400 {
401         const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
402         struct hclgevf_rss_indirection_table_cmd *req;
403         struct hclgevf_desc desc;
404         int status;
405         int i, j;
406
407         req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
408
409         for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
410                 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
411                                              false);
412                 req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
413                 req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
414                 for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
415                         req->rss_result[j] =
416                                 indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
417
418                 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
419                 if (status) {
420                         dev_err(&hdev->pdev->dev,
421                                 "VF failed(=%d) to set RSS indirection table\n",
422                                 status);
423                         return status;
424                 }
425         }
426
427         return 0;
428 }
429
430 static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev,  u16 rss_size)
431 {
432         struct hclgevf_rss_tc_mode_cmd *req;
433         u16 tc_offset[HCLGEVF_MAX_TC_NUM];
434         u16 tc_valid[HCLGEVF_MAX_TC_NUM];
435         u16 tc_size[HCLGEVF_MAX_TC_NUM];
436         struct hclgevf_desc desc;
437         u16 roundup_size;
438         int status;
439         int i;
440
441         req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
442
443         roundup_size = roundup_pow_of_two(rss_size);
444         roundup_size = ilog2(roundup_size);
445
446         for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
447                 tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
448                 tc_size[i] = roundup_size;
449                 tc_offset[i] = rss_size * i;
450         }
451
452         hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
453         for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
454                 hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
455                               (tc_valid[i] & 0x1));
456                 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
457                                 HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
458                 hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
459                                 HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
460         }
461         status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
462         if (status)
463                 dev_err(&hdev->pdev->dev,
464                         "VF failed(=%d) to set rss tc mode\n", status);
465
466         return status;
467 }
468
469 static int hclgevf_get_rss_hw_cfg(struct hnae3_handle *handle, u8 *hash,
470                                   u8 *key)
471 {
472         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
473         struct hclgevf_rss_config_cmd *req;
474         int lkup_times = key ? 3 : 1;
475         struct hclgevf_desc desc;
476         int key_offset;
477         int key_size;
478         int status;
479
480         req = (struct hclgevf_rss_config_cmd *)desc.data;
481         lkup_times = (lkup_times == 3) ? 3 : ((hash) ? 1 : 0);
482
483         for (key_offset = 0; key_offset < lkup_times; key_offset++) {
484                 hclgevf_cmd_setup_basic_desc(&desc,
485                                              HCLGEVF_OPC_RSS_GENERIC_CONFIG,
486                                              true);
487                 req->hash_config |= (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET);
488
489                 status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
490                 if (status) {
491                         dev_err(&hdev->pdev->dev,
492                                 "failed to get hardware RSS cfg, status = %d\n",
493                                 status);
494                         return status;
495                 }
496
497                 if (key_offset == 2)
498                         key_size =
499                         HCLGEVF_RSS_KEY_SIZE - HCLGEVF_RSS_HASH_KEY_NUM * 2;
500                 else
501                         key_size = HCLGEVF_RSS_HASH_KEY_NUM;
502
503                 if (key)
504                         memcpy(key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM,
505                                req->hash_key,
506                                key_size);
507         }
508
509         if (hash) {
510                 if ((req->hash_config & 0xf) == HCLGEVF_RSS_HASH_ALGO_TOEPLITZ)
511                         *hash = ETH_RSS_HASH_TOP;
512                 else
513                         *hash = ETH_RSS_HASH_UNKNOWN;
514         }
515
516         return 0;
517 }
518
519 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
520                            u8 *hfunc)
521 {
522         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
523         struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
524         int i;
525
526         if (indir)
527                 for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
528                         indir[i] = rss_cfg->rss_indirection_tbl[i];
529
530         return hclgevf_get_rss_hw_cfg(handle, hfunc, key);
531 }
532
533 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
534                            const  u8 *key, const  u8 hfunc)
535 {
536         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
537         struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
538         int i;
539
540         /* update the shadow RSS table with user specified qids */
541         for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
542                 rss_cfg->rss_indirection_tbl[i] = indir[i];
543
544         /* update the hardware */
545         return hclgevf_set_rss_indir_table(hdev);
546 }
547
548 static int hclgevf_get_tc_size(struct hnae3_handle *handle)
549 {
550         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
551         struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
552
553         return rss_cfg->rss_size;
554 }
555
556 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
557                                        int vector_id,
558                                        struct hnae3_ring_chain_node *ring_chain)
559 {
560         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
561         struct hnae3_ring_chain_node *node;
562         struct hclge_mbx_vf_to_pf_cmd *req;
563         struct hclgevf_desc desc;
564         int i = 0;
565         int status;
566         u8 type;
567
568         req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
569
570         for (node = ring_chain; node; node = node->next) {
571                 int idx_offset = HCLGE_MBX_RING_MAP_BASIC_MSG_NUM +
572                                         HCLGE_MBX_RING_NODE_VARIABLE_NUM * i;
573
574                 if (i == 0) {
575                         hclgevf_cmd_setup_basic_desc(&desc,
576                                                      HCLGEVF_OPC_MBX_VF_TO_PF,
577                                                      false);
578                         type = en ?
579                                 HCLGE_MBX_MAP_RING_TO_VECTOR :
580                                 HCLGE_MBX_UNMAP_RING_TO_VECTOR;
581                         req->msg[0] = type;
582                         req->msg[1] = vector_id;
583                 }
584
585                 req->msg[idx_offset] =
586                                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
587                 req->msg[idx_offset + 1] = node->tqp_index;
588                 req->msg[idx_offset + 2] = hnae3_get_field(node->int_gl_idx,
589                                                            HNAE3_RING_GL_IDX_M,
590                                                            HNAE3_RING_GL_IDX_S);
591
592                 i++;
593                 if ((i == (HCLGE_MBX_VF_MSG_DATA_NUM -
594                      HCLGE_MBX_RING_MAP_BASIC_MSG_NUM) /
595                      HCLGE_MBX_RING_NODE_VARIABLE_NUM) ||
596                     !node->next) {
597                         req->msg[2] = i;
598
599                         status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
600                         if (status) {
601                                 dev_err(&hdev->pdev->dev,
602                                         "Map TQP fail, status is %d.\n",
603                                         status);
604                                 return status;
605                         }
606                         i = 0;
607                         hclgevf_cmd_setup_basic_desc(&desc,
608                                                      HCLGEVF_OPC_MBX_VF_TO_PF,
609                                                      false);
610                         req->msg[0] = type;
611                         req->msg[1] = vector_id;
612                 }
613         }
614
615         return 0;
616 }
617
618 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
619                                       struct hnae3_ring_chain_node *ring_chain)
620 {
621         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
622         int vector_id;
623
624         vector_id = hclgevf_get_vector_index(hdev, vector);
625         if (vector_id < 0) {
626                 dev_err(&handle->pdev->dev,
627                         "Get vector index fail. ret =%d\n", vector_id);
628                 return vector_id;
629         }
630
631         return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
632 }
633
634 static int hclgevf_unmap_ring_from_vector(
635                                 struct hnae3_handle *handle,
636                                 int vector,
637                                 struct hnae3_ring_chain_node *ring_chain)
638 {
639         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
640         int ret, vector_id;
641
642         vector_id = hclgevf_get_vector_index(hdev, vector);
643         if (vector_id < 0) {
644                 dev_err(&handle->pdev->dev,
645                         "Get vector index fail. ret =%d\n", vector_id);
646                 return vector_id;
647         }
648
649         ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
650         if (ret)
651                 dev_err(&handle->pdev->dev,
652                         "Unmap ring from vector fail. vector=%d, ret =%d\n",
653                         vector_id,
654                         ret);
655
656         return ret;
657 }
658
659 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
660 {
661         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
662         int vector_id;
663
664         vector_id = hclgevf_get_vector_index(hdev, vector);
665         if (vector_id < 0) {
666                 dev_err(&handle->pdev->dev,
667                         "hclgevf_put_vector get vector index fail. ret =%d\n",
668                         vector_id);
669                 return vector_id;
670         }
671
672         hclgevf_free_vector(hdev, vector_id);
673
674         return 0;
675 }
676
677 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
678                                         bool en_uc_pmc, bool en_mc_pmc)
679 {
680         struct hclge_mbx_vf_to_pf_cmd *req;
681         struct hclgevf_desc desc;
682         int status;
683
684         req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
685
686         hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
687         req->msg[0] = HCLGE_MBX_SET_PROMISC_MODE;
688         req->msg[1] = en_uc_pmc ? 1 : 0;
689         req->msg[2] = en_mc_pmc ? 1 : 0;
690
691         status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
692         if (status)
693                 dev_err(&hdev->pdev->dev,
694                         "Set promisc mode fail, status is %d.\n", status);
695
696         return status;
697 }
698
699 static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
700                                      bool en_uc_pmc, bool en_mc_pmc)
701 {
702         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
703
704         hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
705 }
706
707 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
708                               int stream_id, bool enable)
709 {
710         struct hclgevf_cfg_com_tqp_queue_cmd *req;
711         struct hclgevf_desc desc;
712         int status;
713
714         req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
715
716         hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
717                                      false);
718         req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
719         req->stream_id = cpu_to_le16(stream_id);
720         req->enable |= enable << HCLGEVF_TQP_ENABLE_B;
721
722         status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
723         if (status)
724                 dev_err(&hdev->pdev->dev,
725                         "TQP enable fail, status =%d.\n", status);
726
727         return status;
728 }
729
730 static int hclgevf_get_queue_id(struct hnae3_queue *queue)
731 {
732         struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
733
734         return tqp->index;
735 }
736
737 static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
738 {
739         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
740         struct hclgevf_tqp *tqp;
741         int i;
742
743         for (i = 0; i < kinfo->num_tqps; i++) {
744                 tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
745                 memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
746         }
747 }
748
749 static int hclgevf_cfg_func_mta_type(struct hclgevf_dev *hdev)
750 {
751         u8 resp_msg = HCLGEVF_MTA_TYPE_SEL_MAX;
752         int ret;
753
754         ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
755                                    HCLGE_MBX_MAC_VLAN_MTA_TYPE_READ,
756                                    NULL, 0, true, &resp_msg, sizeof(u8));
757
758         if (ret) {
759                 dev_err(&hdev->pdev->dev,
760                         "Read mta type fail, ret=%d.\n", ret);
761                 return ret;
762         }
763
764         if (resp_msg > HCLGEVF_MTA_TYPE_SEL_MAX) {
765                 dev_err(&hdev->pdev->dev,
766                         "Read mta type invalid, resp=%d.\n", resp_msg);
767                 return -EINVAL;
768         }
769
770         hdev->mta_mac_sel_type = resp_msg;
771
772         return 0;
773 }
774
775 static u16 hclgevf_get_mac_addr_to_mta_index(struct hclgevf_dev *hdev,
776                                              const u8 *addr)
777 {
778         u32 rsh = HCLGEVF_MTA_TYPE_SEL_MAX - hdev->mta_mac_sel_type;
779         u16 high_val = addr[1] | (addr[0] << 8);
780
781         return (high_val >> rsh) & 0xfff;
782 }
783
784 static int hclgevf_do_update_mta_status(struct hclgevf_dev *hdev,
785                                         unsigned long *status)
786 {
787 #define HCLGEVF_MTA_STATUS_MSG_SIZE 13
788 #define HCLGEVF_MTA_STATUS_MSG_BITS \
789                         (HCLGEVF_MTA_STATUS_MSG_SIZE * BITS_PER_BYTE)
790 #define HCLGEVF_MTA_STATUS_MSG_END_BITS \
791                         (HCLGEVF_MTA_TBL_SIZE % HCLGEVF_MTA_STATUS_MSG_BITS)
792         u16 tbl_cnt;
793         u16 tbl_idx;
794         u8 msg_cnt;
795         u8 msg_idx;
796         int ret;
797
798         msg_cnt = DIV_ROUND_UP(HCLGEVF_MTA_TBL_SIZE,
799                                HCLGEVF_MTA_STATUS_MSG_BITS);
800         tbl_idx = 0;
801         msg_idx = 0;
802         while (msg_cnt--) {
803                 u8 msg[HCLGEVF_MTA_STATUS_MSG_SIZE + 1];
804                 u8 *p = &msg[1];
805                 u8 msg_ofs;
806                 u8 msg_bit;
807
808                 memset(msg, 0, sizeof(msg));
809
810                 /* set index field */
811                 msg[0] = 0x7F & msg_idx;
812
813                 /* set end flag field */
814                 if (msg_cnt == 0) {
815                         msg[0] |= 0x80;
816                         tbl_cnt = HCLGEVF_MTA_STATUS_MSG_END_BITS;
817                 } else {
818                         tbl_cnt = HCLGEVF_MTA_STATUS_MSG_BITS;
819                 }
820
821                 /* set status field */
822                 msg_ofs = 0;
823                 msg_bit = 0;
824                 while (tbl_cnt--) {
825                         if (test_bit(tbl_idx, status))
826                                 p[msg_ofs] |= BIT(msg_bit);
827
828                         tbl_idx++;
829
830                         msg_bit++;
831                         if (msg_bit == BITS_PER_BYTE) {
832                                 msg_bit = 0;
833                                 msg_ofs++;
834                         }
835                 }
836
837                 ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
838                                            HCLGE_MBX_MAC_VLAN_MTA_STATUS_UPDATE,
839                                            msg, sizeof(msg), false, NULL, 0);
840                 if (ret)
841                         break;
842
843                 msg_idx++;
844         }
845
846         return ret;
847 }
848
849 static int hclgevf_update_mta_status(struct hnae3_handle *handle)
850 {
851         unsigned long mta_status[BITS_TO_LONGS(HCLGEVF_MTA_TBL_SIZE)];
852         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
853         struct net_device *netdev = hdev->nic.kinfo.netdev;
854         struct netdev_hw_addr *ha;
855         u16 tbl_idx;
856
857         /* clear status */
858         memset(mta_status, 0, sizeof(mta_status));
859
860         /* update status from mc addr list */
861         netdev_for_each_mc_addr(ha, netdev) {
862                 tbl_idx = hclgevf_get_mac_addr_to_mta_index(hdev, ha->addr);
863                 set_bit(tbl_idx, mta_status);
864         }
865
866         return hclgevf_do_update_mta_status(hdev, mta_status);
867 }
868
869 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
870 {
871         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
872
873         ether_addr_copy(p, hdev->hw.mac.mac_addr);
874 }
875
876 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
877                                 bool is_first)
878 {
879         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
880         u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
881         u8 *new_mac_addr = (u8 *)p;
882         u8 msg_data[ETH_ALEN * 2];
883         u16 subcode;
884         int status;
885
886         ether_addr_copy(msg_data, new_mac_addr);
887         ether_addr_copy(&msg_data[ETH_ALEN], old_mac_addr);
888
889         subcode = is_first ? HCLGE_MBX_MAC_VLAN_UC_ADD :
890                         HCLGE_MBX_MAC_VLAN_UC_MODIFY;
891
892         status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
893                                       subcode, msg_data, ETH_ALEN * 2,
894                                       true, NULL, 0);
895         if (!status)
896                 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
897
898         return status;
899 }
900
901 static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
902                                const unsigned char *addr)
903 {
904         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
905
906         return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
907                                     HCLGE_MBX_MAC_VLAN_UC_ADD,
908                                     addr, ETH_ALEN, false, NULL, 0);
909 }
910
911 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
912                               const unsigned char *addr)
913 {
914         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
915
916         return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_UNICAST,
917                                     HCLGE_MBX_MAC_VLAN_UC_REMOVE,
918                                     addr, ETH_ALEN, false, NULL, 0);
919 }
920
921 static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
922                                const unsigned char *addr)
923 {
924         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
925
926         return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
927                                     HCLGE_MBX_MAC_VLAN_MC_ADD,
928                                     addr, ETH_ALEN, false, NULL, 0);
929 }
930
931 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
932                               const unsigned char *addr)
933 {
934         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
935
936         return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MULTICAST,
937                                     HCLGE_MBX_MAC_VLAN_MC_REMOVE,
938                                     addr, ETH_ALEN, false, NULL, 0);
939 }
940
941 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
942                                    __be16 proto, u16 vlan_id,
943                                    bool is_kill)
944 {
945 #define HCLGEVF_VLAN_MBX_MSG_LEN 5
946         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
947         u8 msg_data[HCLGEVF_VLAN_MBX_MSG_LEN];
948
949         if (vlan_id > 4095)
950                 return -EINVAL;
951
952         if (proto != htons(ETH_P_8021Q))
953                 return -EPROTONOSUPPORT;
954
955         msg_data[0] = is_kill;
956         memcpy(&msg_data[1], &vlan_id, sizeof(vlan_id));
957         memcpy(&msg_data[3], &proto, sizeof(proto));
958         return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
959                                     HCLGE_MBX_VLAN_FILTER, msg_data,
960                                     HCLGEVF_VLAN_MBX_MSG_LEN, false, NULL, 0);
961 }
962
963 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
964 {
965         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
966         u8 msg_data;
967
968         msg_data = enable ? 1 : 0;
969         return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
970                                     HCLGE_MBX_VLAN_RX_OFF_CFG, &msg_data,
971                                     1, false, NULL, 0);
972 }
973
974 static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
975 {
976         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
977         u8 msg_data[2];
978         int ret;
979
980         memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
981
982         /* disable vf queue before send queue reset msg to PF */
983         ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
984         if (ret)
985                 return;
986
987         hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
988                              2, true, NULL, 0);
989 }
990
991 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
992                                  enum hnae3_reset_notify_type type)
993 {
994         struct hnae3_client *client = hdev->nic_client;
995         struct hnae3_handle *handle = &hdev->nic;
996
997         if (!client->ops->reset_notify)
998                 return -EOPNOTSUPP;
999
1000         return client->ops->reset_notify(handle, type);
1001 }
1002
1003 static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
1004 {
1005 #define HCLGEVF_RESET_WAIT_MS   500
1006 #define HCLGEVF_RESET_WAIT_CNT  20
1007         u32 val, cnt = 0;
1008
1009         /* wait to check the hardware reset completion status */
1010         val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1011         while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
1012                (cnt < HCLGEVF_RESET_WAIT_CNT)) {
1013                 msleep(HCLGEVF_RESET_WAIT_MS);
1014                 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
1015                 cnt++;
1016         }
1017
1018         /* hardware completion status should be available by this time */
1019         if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
1020                 dev_warn(&hdev->pdev->dev,
1021                          "could'nt get reset done status from h/w, timeout!\n");
1022                 return -EBUSY;
1023         }
1024
1025         /* we will wait a bit more to let reset of the stack to complete. This
1026          * might happen in case reset assertion was made by PF. Yes, this also
1027          * means we might end up waiting bit more even for VF reset.
1028          */
1029         msleep(5000);
1030
1031         return 0;
1032 }
1033
1034 static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
1035 {
1036         int ret;
1037
1038         /* uninitialize the nic client */
1039         hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1040
1041         /* re-initialize the hclge device */
1042         ret = hclgevf_init_hdev(hdev);
1043         if (ret) {
1044                 dev_err(&hdev->pdev->dev,
1045                         "hclge device re-init failed, VF is disabled!\n");
1046                 return ret;
1047         }
1048
1049         /* bring up the nic client again */
1050         hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
1051
1052         return 0;
1053 }
1054
1055 static int hclgevf_reset(struct hclgevf_dev *hdev)
1056 {
1057         int ret;
1058
1059         rtnl_lock();
1060
1061         /* bring down the nic to stop any ongoing TX/RX */
1062         hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
1063
1064         /* check if VF could successfully fetch the hardware reset completion
1065          * status from the hardware
1066          */
1067         ret = hclgevf_reset_wait(hdev);
1068         if (ret) {
1069                 /* can't do much in this situation, will disable VF */
1070                 dev_err(&hdev->pdev->dev,
1071                         "VF failed(=%d) to fetch H/W reset completion status\n",
1072                         ret);
1073
1074                 dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
1075                 hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
1076
1077                 rtnl_unlock();
1078                 return ret;
1079         }
1080
1081         /* now, re-initialize the nic client and ae device*/
1082         ret = hclgevf_reset_stack(hdev);
1083         if (ret)
1084                 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
1085
1086         /* bring up the nic to enable TX/RX again */
1087         hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
1088
1089         rtnl_unlock();
1090
1091         return ret;
1092 }
1093
1094 static int hclgevf_do_reset(struct hclgevf_dev *hdev)
1095 {
1096         int status;
1097         u8 respmsg;
1098
1099         status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
1100                                       0, false, &respmsg, sizeof(u8));
1101         if (status)
1102                 dev_err(&hdev->pdev->dev,
1103                         "VF reset request to PF failed(=%d)\n", status);
1104
1105         return status;
1106 }
1107
1108 static void hclgevf_reset_event(struct hnae3_handle *handle)
1109 {
1110         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1111
1112         dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
1113
1114         handle->reset_level = HNAE3_VF_RESET;
1115
1116         /* reset of this VF requested */
1117         set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
1118         hclgevf_reset_task_schedule(hdev);
1119
1120         handle->last_reset_time = jiffies;
1121 }
1122
1123 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
1124 {
1125         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1126
1127         return hdev->fw_version;
1128 }
1129
1130 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
1131 {
1132         struct hclgevf_misc_vector *vector = &hdev->misc_vector;
1133
1134         vector->vector_irq = pci_irq_vector(hdev->pdev,
1135                                             HCLGEVF_MISC_VECTOR_NUM);
1136         vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
1137         /* vector status always valid for Vector 0 */
1138         hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
1139         hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
1140
1141         hdev->num_msi_left -= 1;
1142         hdev->num_msi_used += 1;
1143 }
1144
1145 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
1146 {
1147         if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
1148             !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
1149                 set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1150                 schedule_work(&hdev->rst_service_task);
1151         }
1152 }
1153
1154 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
1155 {
1156         if (!test_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state) &&
1157             !test_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) {
1158                 set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1159                 schedule_work(&hdev->mbx_service_task);
1160         }
1161 }
1162
1163 static void hclgevf_task_schedule(struct hclgevf_dev *hdev)
1164 {
1165         if (!test_bit(HCLGEVF_STATE_DOWN, &hdev->state)  &&
1166             !test_and_set_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state))
1167                 schedule_work(&hdev->service_task);
1168 }
1169
1170 static void hclgevf_deferred_task_schedule(struct hclgevf_dev *hdev)
1171 {
1172         /* if we have any pending mailbox event then schedule the mbx task */
1173         if (hdev->mbx_event_pending)
1174                 hclgevf_mbx_task_schedule(hdev);
1175
1176         if (test_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state))
1177                 hclgevf_reset_task_schedule(hdev);
1178 }
1179
1180 static void hclgevf_service_timer(struct timer_list *t)
1181 {
1182         struct hclgevf_dev *hdev = from_timer(hdev, t, service_timer);
1183
1184         mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
1185
1186         hclgevf_task_schedule(hdev);
1187 }
1188
1189 static void hclgevf_reset_service_task(struct work_struct *work)
1190 {
1191         struct hclgevf_dev *hdev =
1192                 container_of(work, struct hclgevf_dev, rst_service_task);
1193         int ret;
1194
1195         if (test_and_set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
1196                 return;
1197
1198         clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
1199
1200         if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
1201                                &hdev->reset_state)) {
1202                 /* PF has initmated that it is about to reset the hardware.
1203                  * We now have to poll & check if harware has actually completed
1204                  * the reset sequence. On hardware reset completion, VF needs to
1205                  * reset the client and ae device.
1206                  */
1207                 hdev->reset_attempts = 0;
1208
1209                 ret = hclgevf_reset(hdev);
1210                 if (ret)
1211                         dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
1212         } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
1213                                       &hdev->reset_state)) {
1214                 /* we could be here when either of below happens:
1215                  * 1. reset was initiated due to watchdog timeout due to
1216                  *    a. IMP was earlier reset and our TX got choked down and
1217                  *       which resulted in watchdog reacting and inducing VF
1218                  *       reset. This also means our cmdq would be unreliable.
1219                  *    b. problem in TX due to other lower layer(example link
1220                  *       layer not functioning properly etc.)
1221                  * 2. VF reset might have been initiated due to some config
1222                  *    change.
1223                  *
1224                  * NOTE: Theres no clear way to detect above cases than to react
1225                  * to the response of PF for this reset request. PF will ack the
1226                  * 1b and 2. cases but we will not get any intimation about 1a
1227                  * from PF as cmdq would be in unreliable state i.e. mailbox
1228                  * communication between PF and VF would be broken.
1229                  */
1230
1231                 /* if we are never geting into pending state it means either:
1232                  * 1. PF is not receiving our request which could be due to IMP
1233                  *    reset
1234                  * 2. PF is screwed
1235                  * We cannot do much for 2. but to check first we can try reset
1236                  * our PCIe + stack and see if it alleviates the problem.
1237                  */
1238                 if (hdev->reset_attempts > 3) {
1239                         /* prepare for full reset of stack + pcie interface */
1240                         hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
1241
1242                         /* "defer" schedule the reset task again */
1243                         set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
1244                 } else {
1245                         hdev->reset_attempts++;
1246
1247                         /* request PF for resetting this VF via mailbox */
1248                         ret = hclgevf_do_reset(hdev);
1249                         if (ret)
1250                                 dev_warn(&hdev->pdev->dev,
1251                                          "VF rst fail, stack will call\n");
1252                 }
1253         }
1254
1255         clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
1256 }
1257
1258 static void hclgevf_mailbox_service_task(struct work_struct *work)
1259 {
1260         struct hclgevf_dev *hdev;
1261
1262         hdev = container_of(work, struct hclgevf_dev, mbx_service_task);
1263
1264         if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
1265                 return;
1266
1267         clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1268
1269         hclgevf_mbx_async_handler(hdev);
1270
1271         clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1272 }
1273
1274 static void hclgevf_service_task(struct work_struct *work)
1275 {
1276         struct hclgevf_dev *hdev;
1277
1278         hdev = container_of(work, struct hclgevf_dev, service_task);
1279
1280         /* request the link status from the PF. PF would be able to tell VF
1281          * about such updates in future so we might remove this later
1282          */
1283         hclgevf_request_link_info(hdev);
1284
1285         hclgevf_deferred_task_schedule(hdev);
1286
1287         clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1288 }
1289
1290 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
1291 {
1292         hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
1293 }
1294
1295 static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
1296 {
1297         u32 cmdq_src_reg;
1298
1299         /* fetch the events from their corresponding regs */
1300         cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
1301                                         HCLGEVF_VECTOR0_CMDQ_SRC_REG);
1302
1303         /* check for vector0 mailbox(=CMDQ RX) event source */
1304         if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
1305                 cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
1306                 *clearval = cmdq_src_reg;
1307                 return true;
1308         }
1309
1310         dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
1311
1312         return false;
1313 }
1314
1315 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
1316 {
1317         writel(en ? 1 : 0, vector->addr);
1318 }
1319
1320 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
1321 {
1322         struct hclgevf_dev *hdev = data;
1323         u32 clearval;
1324
1325         hclgevf_enable_vector(&hdev->misc_vector, false);
1326         if (!hclgevf_check_event_cause(hdev, &clearval))
1327                 goto skip_sched;
1328
1329         hclgevf_mbx_handler(hdev);
1330
1331         hclgevf_clear_event_cause(hdev, clearval);
1332
1333 skip_sched:
1334         hclgevf_enable_vector(&hdev->misc_vector, true);
1335
1336         return IRQ_HANDLED;
1337 }
1338
1339 static int hclgevf_configure(struct hclgevf_dev *hdev)
1340 {
1341         int ret;
1342
1343         /* get queue configuration from PF */
1344         ret = hclgevf_get_queue_info(hdev);
1345         if (ret)
1346                 return ret;
1347         /* get tc configuration from PF */
1348         return hclgevf_get_tc_info(hdev);
1349 }
1350
1351 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
1352 {
1353         struct pci_dev *pdev = ae_dev->pdev;
1354         struct hclgevf_dev *hdev = ae_dev->priv;
1355
1356         hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1357         if (!hdev)
1358                 return -ENOMEM;
1359
1360         hdev->pdev = pdev;
1361         hdev->ae_dev = ae_dev;
1362         ae_dev->priv = hdev;
1363
1364         return 0;
1365 }
1366
1367 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
1368 {
1369         struct hnae3_handle *roce = &hdev->roce;
1370         struct hnae3_handle *nic = &hdev->nic;
1371
1372         roce->rinfo.num_vectors = hdev->num_roce_msix;
1373
1374         if (hdev->num_msi_left < roce->rinfo.num_vectors ||
1375             hdev->num_msi_left == 0)
1376                 return -EINVAL;
1377
1378         roce->rinfo.base_vector = hdev->roce_base_vector;
1379
1380         roce->rinfo.netdev = nic->kinfo.netdev;
1381         roce->rinfo.roce_io_base = hdev->hw.io_base;
1382
1383         roce->pdev = nic->pdev;
1384         roce->ae_algo = nic->ae_algo;
1385         roce->numa_node_mask = nic->numa_node_mask;
1386
1387         return 0;
1388 }
1389
1390 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
1391 {
1392         struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
1393         int i, ret;
1394
1395         rss_cfg->rss_size = hdev->rss_size_max;
1396
1397         /* Initialize RSS indirect table for each vport */
1398         for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
1399                 rss_cfg->rss_indirection_tbl[i] = i % hdev->rss_size_max;
1400
1401         ret = hclgevf_set_rss_indir_table(hdev);
1402         if (ret)
1403                 return ret;
1404
1405         return hclgevf_set_rss_tc_mode(hdev, hdev->rss_size_max);
1406 }
1407
1408 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
1409 {
1410         /* other vlan config(like, VLAN TX/RX offload) would also be added
1411          * here later
1412          */
1413         return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
1414                                        false);
1415 }
1416
1417 static int hclgevf_ae_start(struct hnae3_handle *handle)
1418 {
1419         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1420         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1421         int i, queue_id;
1422
1423         for (i = 0; i < kinfo->num_tqps; i++) {
1424                 /* ring enable */
1425                 queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
1426                 if (queue_id < 0) {
1427                         dev_warn(&hdev->pdev->dev,
1428                                  "Get invalid queue id, ignore it\n");
1429                         continue;
1430                 }
1431
1432                 hclgevf_tqp_enable(hdev, queue_id, 0, true);
1433         }
1434
1435         /* reset tqp stats */
1436         hclgevf_reset_tqp_stats(handle);
1437
1438         hclgevf_request_link_info(hdev);
1439
1440         clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1441         mod_timer(&hdev->service_timer, jiffies + HZ);
1442
1443         return 0;
1444 }
1445
1446 static void hclgevf_ae_stop(struct hnae3_handle *handle)
1447 {
1448         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
1449         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1450         int i, queue_id;
1451
1452         set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1453
1454         for (i = 0; i < kinfo->num_tqps; i++) {
1455                 /* Ring disable */
1456                 queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
1457                 if (queue_id < 0) {
1458                         dev_warn(&hdev->pdev->dev,
1459                                  "Get invalid queue id, ignore it\n");
1460                         continue;
1461                 }
1462
1463                 hclgevf_tqp_enable(hdev, queue_id, 0, false);
1464         }
1465
1466         /* reset tqp stats */
1467         hclgevf_reset_tqp_stats(handle);
1468         del_timer_sync(&hdev->service_timer);
1469         cancel_work_sync(&hdev->service_task);
1470         clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1471         hclgevf_update_link_status(hdev, 0);
1472 }
1473
1474 static void hclgevf_state_init(struct hclgevf_dev *hdev)
1475 {
1476         /* if this is on going reset then skip this initialization */
1477         if (hclgevf_dev_ongoing_reset(hdev))
1478                 return;
1479
1480         /* setup tasks for the MBX */
1481         INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
1482         clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
1483         clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
1484
1485         /* setup tasks for service timer */
1486         timer_setup(&hdev->service_timer, hclgevf_service_timer, 0);
1487
1488         INIT_WORK(&hdev->service_task, hclgevf_service_task);
1489         clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
1490
1491         INIT_WORK(&hdev->rst_service_task, hclgevf_reset_service_task);
1492
1493         mutex_init(&hdev->mbx_resp.mbx_mutex);
1494
1495         /* bring the device down */
1496         set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1497 }
1498
1499 static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
1500 {
1501         set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
1502
1503         if (hdev->service_timer.function)
1504                 del_timer_sync(&hdev->service_timer);
1505         if (hdev->service_task.func)
1506                 cancel_work_sync(&hdev->service_task);
1507         if (hdev->mbx_service_task.func)
1508                 cancel_work_sync(&hdev->mbx_service_task);
1509         if (hdev->rst_service_task.func)
1510                 cancel_work_sync(&hdev->rst_service_task);
1511
1512         mutex_destroy(&hdev->mbx_resp.mbx_mutex);
1513 }
1514
1515 static int hclgevf_init_msi(struct hclgevf_dev *hdev)
1516 {
1517         struct pci_dev *pdev = hdev->pdev;
1518         int vectors;
1519         int i;
1520
1521         /* if this is on going reset then skip this initialization */
1522         if (hclgevf_dev_ongoing_reset(hdev))
1523                 return 0;
1524
1525         if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
1526                 vectors = pci_alloc_irq_vectors(pdev,
1527                                                 hdev->roce_base_msix_offset + 1,
1528                                                 hdev->num_msi,
1529                                                 PCI_IRQ_MSIX);
1530         else
1531                 vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
1532                                                 PCI_IRQ_MSI | PCI_IRQ_MSIX);
1533
1534         if (vectors < 0) {
1535                 dev_err(&pdev->dev,
1536                         "failed(%d) to allocate MSI/MSI-X vectors\n",
1537                         vectors);
1538                 return vectors;
1539         }
1540         if (vectors < hdev->num_msi)
1541                 dev_warn(&hdev->pdev->dev,
1542                          "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n",
1543                          hdev->num_msi, vectors);
1544
1545         hdev->num_msi = vectors;
1546         hdev->num_msi_left = vectors;
1547         hdev->base_msi_vector = pdev->irq;
1548         hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
1549
1550         hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
1551                                            sizeof(u16), GFP_KERNEL);
1552         if (!hdev->vector_status) {
1553                 pci_free_irq_vectors(pdev);
1554                 return -ENOMEM;
1555         }
1556
1557         for (i = 0; i < hdev->num_msi; i++)
1558                 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
1559
1560         hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
1561                                         sizeof(int), GFP_KERNEL);
1562         if (!hdev->vector_irq) {
1563                 pci_free_irq_vectors(pdev);
1564                 return -ENOMEM;
1565         }
1566
1567         return 0;
1568 }
1569
1570 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
1571 {
1572         struct pci_dev *pdev = hdev->pdev;
1573
1574         pci_free_irq_vectors(pdev);
1575 }
1576
1577 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
1578 {
1579         int ret = 0;
1580
1581         /* if this is on going reset then skip this initialization */
1582         if (hclgevf_dev_ongoing_reset(hdev))
1583                 return 0;
1584
1585         hclgevf_get_misc_vector(hdev);
1586
1587         ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
1588                           0, "hclgevf_cmd", hdev);
1589         if (ret) {
1590                 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
1591                         hdev->misc_vector.vector_irq);
1592                 return ret;
1593         }
1594
1595         hclgevf_clear_event_cause(hdev, 0);
1596
1597         /* enable misc. vector(vector 0) */
1598         hclgevf_enable_vector(&hdev->misc_vector, true);
1599
1600         return ret;
1601 }
1602
1603 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
1604 {
1605         /* disable misc vector(vector 0) */
1606         hclgevf_enable_vector(&hdev->misc_vector, false);
1607         synchronize_irq(hdev->misc_vector.vector_irq);
1608         free_irq(hdev->misc_vector.vector_irq, hdev);
1609         hclgevf_free_vector(hdev, 0);
1610 }
1611
1612 static int hclgevf_init_client_instance(struct hnae3_client *client,
1613                                         struct hnae3_ae_dev *ae_dev)
1614 {
1615         struct hclgevf_dev *hdev = ae_dev->priv;
1616         int ret;
1617
1618         switch (client->type) {
1619         case HNAE3_CLIENT_KNIC:
1620                 hdev->nic_client = client;
1621                 hdev->nic.client = client;
1622
1623                 ret = client->ops->init_instance(&hdev->nic);
1624                 if (ret)
1625                         goto clear_nic;
1626
1627                 hnae3_set_client_init_flag(client, ae_dev, 1);
1628
1629                 if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
1630                         struct hnae3_client *rc = hdev->roce_client;
1631
1632                         ret = hclgevf_init_roce_base_info(hdev);
1633                         if (ret)
1634                                 goto clear_roce;
1635                         ret = rc->ops->init_instance(&hdev->roce);
1636                         if (ret)
1637                                 goto clear_roce;
1638
1639                         hnae3_set_client_init_flag(hdev->roce_client, ae_dev,
1640                                                    1);
1641                 }
1642                 break;
1643         case HNAE3_CLIENT_UNIC:
1644                 hdev->nic_client = client;
1645                 hdev->nic.client = client;
1646
1647                 ret = client->ops->init_instance(&hdev->nic);
1648                 if (ret)
1649                         goto clear_nic;
1650
1651                 hnae3_set_client_init_flag(client, ae_dev, 1);
1652                 break;
1653         case HNAE3_CLIENT_ROCE:
1654                 if (hnae3_dev_roce_supported(hdev)) {
1655                         hdev->roce_client = client;
1656                         hdev->roce.client = client;
1657                 }
1658
1659                 if (hdev->roce_client && hdev->nic_client) {
1660                         ret = hclgevf_init_roce_base_info(hdev);
1661                         if (ret)
1662                                 goto clear_roce;
1663
1664                         ret = client->ops->init_instance(&hdev->roce);
1665                         if (ret)
1666                                 goto clear_roce;
1667                 }
1668
1669                 hnae3_set_client_init_flag(client, ae_dev, 1);
1670         }
1671
1672         return 0;
1673
1674 clear_nic:
1675         hdev->nic_client = NULL;
1676         hdev->nic.client = NULL;
1677         return ret;
1678 clear_roce:
1679         hdev->roce_client = NULL;
1680         hdev->roce.client = NULL;
1681         return ret;
1682 }
1683
1684 static void hclgevf_uninit_client_instance(struct hnae3_client *client,
1685                                            struct hnae3_ae_dev *ae_dev)
1686 {
1687         struct hclgevf_dev *hdev = ae_dev->priv;
1688
1689         /* un-init roce, if it exists */
1690         if (hdev->roce_client) {
1691                 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
1692                 hdev->roce_client = NULL;
1693                 hdev->roce.client = NULL;
1694         }
1695
1696         /* un-init nic/unic, if this was not called by roce client */
1697         if (client->ops->uninit_instance && hdev->nic_client &&
1698             client->type != HNAE3_CLIENT_ROCE) {
1699                 client->ops->uninit_instance(&hdev->nic, 0);
1700                 hdev->nic_client = NULL;
1701                 hdev->nic.client = NULL;
1702         }
1703 }
1704
1705 static int hclgevf_pci_init(struct hclgevf_dev *hdev)
1706 {
1707         struct pci_dev *pdev = hdev->pdev;
1708         struct hclgevf_hw *hw;
1709         int ret;
1710
1711         /* check if we need to skip initialization of pci. This will happen if
1712          * device is undergoing VF reset. Otherwise, we would need to
1713          * re-initialize pci interface again i.e. when device is not going
1714          * through *any* reset or actually undergoing full reset.
1715          */
1716         if (hclgevf_dev_ongoing_reset(hdev))
1717                 return 0;
1718
1719         ret = pci_enable_device(pdev);
1720         if (ret) {
1721                 dev_err(&pdev->dev, "failed to enable PCI device\n");
1722                 return ret;
1723         }
1724
1725         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1726         if (ret) {
1727                 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
1728                 goto err_disable_device;
1729         }
1730
1731         ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
1732         if (ret) {
1733                 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
1734                 goto err_disable_device;
1735         }
1736
1737         pci_set_master(pdev);
1738         hw = &hdev->hw;
1739         hw->hdev = hdev;
1740         hw->io_base = pci_iomap(pdev, 2, 0);
1741         if (!hw->io_base) {
1742                 dev_err(&pdev->dev, "can't map configuration register space\n");
1743                 ret = -ENOMEM;
1744                 goto err_clr_master;
1745         }
1746
1747         return 0;
1748
1749 err_clr_master:
1750         pci_clear_master(pdev);
1751         pci_release_regions(pdev);
1752 err_disable_device:
1753         pci_disable_device(pdev);
1754
1755         return ret;
1756 }
1757
1758 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
1759 {
1760         struct pci_dev *pdev = hdev->pdev;
1761
1762         pci_iounmap(pdev, hdev->hw.io_base);
1763         pci_clear_master(pdev);
1764         pci_release_regions(pdev);
1765         pci_disable_device(pdev);
1766 }
1767
1768 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
1769 {
1770         struct hclgevf_query_res_cmd *req;
1771         struct hclgevf_desc desc;
1772         int ret;
1773
1774         hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
1775         ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
1776         if (ret) {
1777                 dev_err(&hdev->pdev->dev,
1778                         "query vf resource failed, ret = %d.\n", ret);
1779                 return ret;
1780         }
1781
1782         req = (struct hclgevf_query_res_cmd *)desc.data;
1783
1784         if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
1785                 hdev->roce_base_msix_offset =
1786                 hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
1787                                 HCLGEVF_MSIX_OFT_ROCEE_M,
1788                                 HCLGEVF_MSIX_OFT_ROCEE_S);
1789                 hdev->num_roce_msix =
1790                 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
1791                                 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
1792
1793                 /* VF should have NIC vectors and Roce vectors, NIC vectors
1794                  * are queued before Roce vectors. The offset is fixed to 64.
1795                  */
1796                 hdev->num_msi = hdev->num_roce_msix +
1797                                 hdev->roce_base_msix_offset;
1798         } else {
1799                 hdev->num_msi =
1800                 hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
1801                                 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
1802         }
1803
1804         return 0;
1805 }
1806
1807 static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
1808 {
1809         struct pci_dev *pdev = hdev->pdev;
1810         int ret;
1811
1812         /* check if device is on-going full reset(i.e. pcie as well) */
1813         if (hclgevf_dev_ongoing_full_reset(hdev)) {
1814                 dev_warn(&pdev->dev, "device is going full reset\n");
1815                 hclgevf_uninit_hdev(hdev);
1816         }
1817
1818         ret = hclgevf_pci_init(hdev);
1819         if (ret) {
1820                 dev_err(&pdev->dev, "PCI initialization failed\n");
1821                 return ret;
1822         }
1823
1824         ret = hclgevf_cmd_init(hdev);
1825         if (ret)
1826                 goto err_cmd_init;
1827
1828         /* Get vf resource */
1829         ret = hclgevf_query_vf_resource(hdev);
1830         if (ret) {
1831                 dev_err(&hdev->pdev->dev,
1832                         "Query vf status error, ret = %d.\n", ret);
1833                 goto err_query_vf;
1834         }
1835
1836         ret = hclgevf_init_msi(hdev);
1837         if (ret) {
1838                 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
1839                 goto err_query_vf;
1840         }
1841
1842         hclgevf_state_init(hdev);
1843
1844         ret = hclgevf_misc_irq_init(hdev);
1845         if (ret) {
1846                 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
1847                         ret);
1848                 goto err_misc_irq_init;
1849         }
1850
1851         ret = hclgevf_configure(hdev);
1852         if (ret) {
1853                 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
1854                 goto err_config;
1855         }
1856
1857         ret = hclgevf_alloc_tqps(hdev);
1858         if (ret) {
1859                 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
1860                 goto err_config;
1861         }
1862
1863         ret = hclgevf_set_handle_info(hdev);
1864         if (ret) {
1865                 dev_err(&pdev->dev, "failed(%d) to set handle info\n", ret);
1866                 goto err_config;
1867         }
1868
1869         /* Initialize mta type for this VF */
1870         ret = hclgevf_cfg_func_mta_type(hdev);
1871         if (ret) {
1872                 dev_err(&hdev->pdev->dev,
1873                         "failed(%d) to initialize MTA type\n", ret);
1874                 goto err_config;
1875         }
1876
1877         /* Initialize RSS for this VF */
1878         ret = hclgevf_rss_init_hw(hdev);
1879         if (ret) {
1880                 dev_err(&hdev->pdev->dev,
1881                         "failed(%d) to initialize RSS\n", ret);
1882                 goto err_config;
1883         }
1884
1885         ret = hclgevf_init_vlan_config(hdev);
1886         if (ret) {
1887                 dev_err(&hdev->pdev->dev,
1888                         "failed(%d) to initialize VLAN config\n", ret);
1889                 goto err_config;
1890         }
1891
1892         pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
1893
1894         return 0;
1895
1896 err_config:
1897         hclgevf_misc_irq_uninit(hdev);
1898 err_misc_irq_init:
1899         hclgevf_state_uninit(hdev);
1900         hclgevf_uninit_msi(hdev);
1901 err_query_vf:
1902         hclgevf_cmd_uninit(hdev);
1903 err_cmd_init:
1904         hclgevf_pci_uninit(hdev);
1905         return ret;
1906 }
1907
1908 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
1909 {
1910         hclgevf_state_uninit(hdev);
1911         hclgevf_misc_irq_uninit(hdev);
1912         hclgevf_cmd_uninit(hdev);
1913         hclgevf_uninit_msi(hdev);
1914         hclgevf_pci_uninit(hdev);
1915 }
1916
1917 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
1918 {
1919         struct pci_dev *pdev = ae_dev->pdev;
1920         int ret;
1921
1922         ret = hclgevf_alloc_hdev(ae_dev);
1923         if (ret) {
1924                 dev_err(&pdev->dev, "hclge device allocation failed\n");
1925                 return ret;
1926         }
1927
1928         ret = hclgevf_init_hdev(ae_dev->priv);
1929         if (ret)
1930                 dev_err(&pdev->dev, "hclge device initialization failed\n");
1931
1932         return ret;
1933 }
1934
1935 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
1936 {
1937         struct hclgevf_dev *hdev = ae_dev->priv;
1938
1939         hclgevf_uninit_hdev(hdev);
1940         ae_dev->priv = NULL;
1941 }
1942
1943 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
1944 {
1945         struct hnae3_handle *nic = &hdev->nic;
1946         struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1947
1948         return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
1949 }
1950
1951 /**
1952  * hclgevf_get_channels - Get the current channels enabled and max supported.
1953  * @handle: hardware information for network interface
1954  * @ch: ethtool channels structure
1955  *
1956  * We don't support separate tx and rx queues as channels. The other count
1957  * represents how many queues are being used for control. max_combined counts
1958  * how many queue pairs we can support. They may not be mapped 1 to 1 with
1959  * q_vectors since we support a lot more queue pairs than q_vectors.
1960  **/
1961 static void hclgevf_get_channels(struct hnae3_handle *handle,
1962                                  struct ethtool_channels *ch)
1963 {
1964         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1965
1966         ch->max_combined = hclgevf_get_max_channels(hdev);
1967         ch->other_count = 0;
1968         ch->max_other = 0;
1969         ch->combined_count = hdev->num_tqps;
1970 }
1971
1972 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
1973                                           u16 *free_tqps, u16 *max_rss_size)
1974 {
1975         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1976
1977         *free_tqps = 0;
1978         *max_rss_size = hdev->rss_size_max;
1979 }
1980
1981 static int hclgevf_get_status(struct hnae3_handle *handle)
1982 {
1983         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1984
1985         return hdev->hw.mac.link;
1986 }
1987
1988 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
1989                                             u8 *auto_neg, u32 *speed,
1990                                             u8 *duplex)
1991 {
1992         struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
1993
1994         if (speed)
1995                 *speed = hdev->hw.mac.speed;
1996         if (duplex)
1997                 *duplex = hdev->hw.mac.duplex;
1998         if (auto_neg)
1999                 *auto_neg = AUTONEG_DISABLE;
2000 }
2001
2002 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
2003                                  u8 duplex)
2004 {
2005         hdev->hw.mac.speed = speed;
2006         hdev->hw.mac.duplex = duplex;
2007 }
2008
2009 static const struct hnae3_ae_ops hclgevf_ops = {
2010         .init_ae_dev = hclgevf_init_ae_dev,
2011         .uninit_ae_dev = hclgevf_uninit_ae_dev,
2012         .init_client_instance = hclgevf_init_client_instance,
2013         .uninit_client_instance = hclgevf_uninit_client_instance,
2014         .start = hclgevf_ae_start,
2015         .stop = hclgevf_ae_stop,
2016         .map_ring_to_vector = hclgevf_map_ring_to_vector,
2017         .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
2018         .get_vector = hclgevf_get_vector,
2019         .put_vector = hclgevf_put_vector,
2020         .reset_queue = hclgevf_reset_tqp,
2021         .set_promisc_mode = hclgevf_set_promisc_mode,
2022         .get_mac_addr = hclgevf_get_mac_addr,
2023         .set_mac_addr = hclgevf_set_mac_addr,
2024         .add_uc_addr = hclgevf_add_uc_addr,
2025         .rm_uc_addr = hclgevf_rm_uc_addr,
2026         .add_mc_addr = hclgevf_add_mc_addr,
2027         .rm_mc_addr = hclgevf_rm_mc_addr,
2028         .update_mta_status = hclgevf_update_mta_status,
2029         .get_stats = hclgevf_get_stats,
2030         .update_stats = hclgevf_update_stats,
2031         .get_strings = hclgevf_get_strings,
2032         .get_sset_count = hclgevf_get_sset_count,
2033         .get_rss_key_size = hclgevf_get_rss_key_size,
2034         .get_rss_indir_size = hclgevf_get_rss_indir_size,
2035         .get_rss = hclgevf_get_rss,
2036         .set_rss = hclgevf_set_rss,
2037         .get_tc_size = hclgevf_get_tc_size,
2038         .get_fw_version = hclgevf_get_fw_version,
2039         .set_vlan_filter = hclgevf_set_vlan_filter,
2040         .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
2041         .reset_event = hclgevf_reset_event,
2042         .get_channels = hclgevf_get_channels,
2043         .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
2044         .get_status = hclgevf_get_status,
2045         .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
2046 };
2047
2048 static struct hnae3_ae_algo ae_algovf = {
2049         .ops = &hclgevf_ops,
2050         .pdev_id_table = ae_algovf_pci_tbl,
2051 };
2052
2053 static int hclgevf_init(void)
2054 {
2055         pr_info("%s is initializing\n", HCLGEVF_NAME);
2056
2057         hnae3_register_ae_algo(&ae_algovf);
2058
2059         return 0;
2060 }
2061
2062 static void hclgevf_exit(void)
2063 {
2064         hnae3_unregister_ae_algo(&ae_algovf);
2065 }
2066 module_init(hclgevf_init);
2067 module_exit(hclgevf_exit);
2068
2069 MODULE_LICENSE("GPL");
2070 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2071 MODULE_DESCRIPTION("HCLGEVF Driver");
2072 MODULE_VERSION(HCLGEVF_MOD_VERSION);