Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyper...
[linux-2.6-microblaze.git] / drivers / net / hyperv / netvsc.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/mm.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/slab.h>
18 #include <linux/netdevice.h>
19 #include <linux/if_ether.h>
20 #include <linux/vmalloc.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/prefetch.h>
23
24 #include <asm/sync_bitops.h>
25
26 #include "hyperv_net.h"
27 #include "netvsc_trace.h"
28
29 /*
30  * Switch the data path from the synthetic interface to the VF
31  * interface.
32  */
33 void netvsc_switch_datapath(struct net_device *ndev, bool vf)
34 {
35         struct net_device_context *net_device_ctx = netdev_priv(ndev);
36         struct hv_device *dev = net_device_ctx->device_ctx;
37         struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev);
38         struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
39
40         memset(init_pkt, 0, sizeof(struct nvsp_message));
41         init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
42         if (vf)
43                 init_pkt->msg.v4_msg.active_dp.active_datapath =
44                         NVSP_DATAPATH_VF;
45         else
46                 init_pkt->msg.v4_msg.active_dp.active_datapath =
47                         NVSP_DATAPATH_SYNTHETIC;
48
49         trace_nvsp_send(ndev, init_pkt);
50
51         vmbus_sendpacket(dev->channel, init_pkt,
52                                sizeof(struct nvsp_message),
53                                (unsigned long)init_pkt,
54                                VM_PKT_DATA_INBAND, 0);
55 }
56
57 /* Worker to setup sub channels on initial setup
58  * Initial hotplug event occurs in softirq context
59  * and can't wait for channels.
60  */
61 static void netvsc_subchan_work(struct work_struct *w)
62 {
63         struct netvsc_device *nvdev =
64                 container_of(w, struct netvsc_device, subchan_work);
65         struct rndis_device *rdev;
66         int i, ret;
67
68         /* Avoid deadlock with device removal already under RTNL */
69         if (!rtnl_trylock()) {
70                 schedule_work(w);
71                 return;
72         }
73
74         rdev = nvdev->extension;
75         if (rdev) {
76                 ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
77                 if (ret == 0) {
78                         netif_device_attach(rdev->ndev);
79                 } else {
80                         /* fallback to only primary channel */
81                         for (i = 1; i < nvdev->num_chn; i++)
82                                 netif_napi_del(&nvdev->chan_table[i].napi);
83
84                         nvdev->max_chn = 1;
85                         nvdev->num_chn = 1;
86                 }
87         }
88
89         rtnl_unlock();
90 }
91
92 static struct netvsc_device *alloc_net_device(void)
93 {
94         struct netvsc_device *net_device;
95
96         net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
97         if (!net_device)
98                 return NULL;
99
100         init_waitqueue_head(&net_device->wait_drain);
101         net_device->destroy = false;
102         net_device->tx_disable = true;
103
104         net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
105         net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
106
107         init_completion(&net_device->channel_init_wait);
108         init_waitqueue_head(&net_device->subchan_open);
109         INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
110
111         return net_device;
112 }
113
114 static void free_netvsc_device(struct rcu_head *head)
115 {
116         struct netvsc_device *nvdev
117                 = container_of(head, struct netvsc_device, rcu);
118         int i;
119
120         kfree(nvdev->extension);
121         vfree(nvdev->recv_buf);
122         vfree(nvdev->send_buf);
123         kfree(nvdev->send_section_map);
124
125         for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
126                 xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq);
127                 vfree(nvdev->chan_table[i].mrc.slots);
128         }
129
130         kfree(nvdev);
131 }
132
133 static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
134 {
135         call_rcu(&nvdev->rcu, free_netvsc_device);
136 }
137
138 static void netvsc_revoke_recv_buf(struct hv_device *device,
139                                    struct netvsc_device *net_device,
140                                    struct net_device *ndev)
141 {
142         struct nvsp_message *revoke_packet;
143         int ret;
144
145         /*
146          * If we got a section count, it means we received a
147          * SendReceiveBufferComplete msg (ie sent
148          * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
149          * to send a revoke msg here
150          */
151         if (net_device->recv_section_cnt) {
152                 /* Send the revoke receive buffer */
153                 revoke_packet = &net_device->revoke_packet;
154                 memset(revoke_packet, 0, sizeof(struct nvsp_message));
155
156                 revoke_packet->hdr.msg_type =
157                         NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
158                 revoke_packet->msg.v1_msg.
159                 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
160
161                 trace_nvsp_send(ndev, revoke_packet);
162
163                 ret = vmbus_sendpacket(device->channel,
164                                        revoke_packet,
165                                        sizeof(struct nvsp_message),
166                                        (unsigned long)revoke_packet,
167                                        VM_PKT_DATA_INBAND, 0);
168                 /* If the failure is because the channel is rescinded;
169                  * ignore the failure since we cannot send on a rescinded
170                  * channel. This would allow us to properly cleanup
171                  * even when the channel is rescinded.
172                  */
173                 if (device->channel->rescind)
174                         ret = 0;
175                 /*
176                  * If we failed here, we might as well return and
177                  * have a leak rather than continue and a bugchk
178                  */
179                 if (ret != 0) {
180                         netdev_err(ndev, "unable to send "
181                                 "revoke receive buffer to netvsp\n");
182                         return;
183                 }
184                 net_device->recv_section_cnt = 0;
185         }
186 }
187
188 static void netvsc_revoke_send_buf(struct hv_device *device,
189                                    struct netvsc_device *net_device,
190                                    struct net_device *ndev)
191 {
192         struct nvsp_message *revoke_packet;
193         int ret;
194
195         /* Deal with the send buffer we may have setup.
196          * If we got a  send section size, it means we received a
197          * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
198          * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
199          * to send a revoke msg here
200          */
201         if (net_device->send_section_cnt) {
202                 /* Send the revoke receive buffer */
203                 revoke_packet = &net_device->revoke_packet;
204                 memset(revoke_packet, 0, sizeof(struct nvsp_message));
205
206                 revoke_packet->hdr.msg_type =
207                         NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
208                 revoke_packet->msg.v1_msg.revoke_send_buf.id =
209                         NETVSC_SEND_BUFFER_ID;
210
211                 trace_nvsp_send(ndev, revoke_packet);
212
213                 ret = vmbus_sendpacket(device->channel,
214                                        revoke_packet,
215                                        sizeof(struct nvsp_message),
216                                        (unsigned long)revoke_packet,
217                                        VM_PKT_DATA_INBAND, 0);
218
219                 /* If the failure is because the channel is rescinded;
220                  * ignore the failure since we cannot send on a rescinded
221                  * channel. This would allow us to properly cleanup
222                  * even when the channel is rescinded.
223                  */
224                 if (device->channel->rescind)
225                         ret = 0;
226
227                 /* If we failed here, we might as well return and
228                  * have a leak rather than continue and a bugchk
229                  */
230                 if (ret != 0) {
231                         netdev_err(ndev, "unable to send "
232                                    "revoke send buffer to netvsp\n");
233                         return;
234                 }
235                 net_device->send_section_cnt = 0;
236         }
237 }
238
239 static void netvsc_teardown_recv_gpadl(struct hv_device *device,
240                                        struct netvsc_device *net_device,
241                                        struct net_device *ndev)
242 {
243         int ret;
244
245         if (net_device->recv_buf_gpadl_handle) {
246                 ret = vmbus_teardown_gpadl(device->channel,
247                                            net_device->recv_buf_gpadl_handle);
248
249                 /* If we failed here, we might as well return and have a leak
250                  * rather than continue and a bugchk
251                  */
252                 if (ret != 0) {
253                         netdev_err(ndev,
254                                    "unable to teardown receive buffer's gpadl\n");
255                         return;
256                 }
257                 net_device->recv_buf_gpadl_handle = 0;
258         }
259 }
260
261 static void netvsc_teardown_send_gpadl(struct hv_device *device,
262                                        struct netvsc_device *net_device,
263                                        struct net_device *ndev)
264 {
265         int ret;
266
267         if (net_device->send_buf_gpadl_handle) {
268                 ret = vmbus_teardown_gpadl(device->channel,
269                                            net_device->send_buf_gpadl_handle);
270
271                 /* If we failed here, we might as well return and have a leak
272                  * rather than continue and a bugchk
273                  */
274                 if (ret != 0) {
275                         netdev_err(ndev,
276                                    "unable to teardown send buffer's gpadl\n");
277                         return;
278                 }
279                 net_device->send_buf_gpadl_handle = 0;
280         }
281 }
282
283 int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
284 {
285         struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
286         int node = cpu_to_node(nvchan->channel->target_cpu);
287         size_t size;
288
289         size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data);
290         nvchan->mrc.slots = vzalloc_node(size, node);
291         if (!nvchan->mrc.slots)
292                 nvchan->mrc.slots = vzalloc(size);
293
294         return nvchan->mrc.slots ? 0 : -ENOMEM;
295 }
296
297 static int netvsc_init_buf(struct hv_device *device,
298                            struct netvsc_device *net_device,
299                            const struct netvsc_device_info *device_info)
300 {
301         struct nvsp_1_message_send_receive_buffer_complete *resp;
302         struct net_device *ndev = hv_get_drvdata(device);
303         struct nvsp_message *init_packet;
304         unsigned int buf_size;
305         size_t map_words;
306         int ret = 0;
307
308         /* Get receive buffer area. */
309         buf_size = device_info->recv_sections * device_info->recv_section_size;
310         buf_size = roundup(buf_size, PAGE_SIZE);
311
312         /* Legacy hosts only allow smaller receive buffer */
313         if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
314                 buf_size = min_t(unsigned int, buf_size,
315                                  NETVSC_RECEIVE_BUFFER_SIZE_LEGACY);
316
317         net_device->recv_buf = vzalloc(buf_size);
318         if (!net_device->recv_buf) {
319                 netdev_err(ndev,
320                            "unable to allocate receive buffer of size %u\n",
321                            buf_size);
322                 ret = -ENOMEM;
323                 goto cleanup;
324         }
325
326         net_device->recv_buf_size = buf_size;
327
328         /*
329          * Establish the gpadl handle for this buffer on this
330          * channel.  Note: This call uses the vmbus connection rather
331          * than the channel to establish the gpadl handle.
332          */
333         ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
334                                     buf_size,
335                                     &net_device->recv_buf_gpadl_handle);
336         if (ret != 0) {
337                 netdev_err(ndev,
338                         "unable to establish receive buffer's gpadl\n");
339                 goto cleanup;
340         }
341
342         /* Notify the NetVsp of the gpadl handle */
343         init_packet = &net_device->channel_init_pkt;
344         memset(init_packet, 0, sizeof(struct nvsp_message));
345         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
346         init_packet->msg.v1_msg.send_recv_buf.
347                 gpadl_handle = net_device->recv_buf_gpadl_handle;
348         init_packet->msg.v1_msg.
349                 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
350
351         trace_nvsp_send(ndev, init_packet);
352
353         /* Send the gpadl notification request */
354         ret = vmbus_sendpacket(device->channel, init_packet,
355                                sizeof(struct nvsp_message),
356                                (unsigned long)init_packet,
357                                VM_PKT_DATA_INBAND,
358                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
359         if (ret != 0) {
360                 netdev_err(ndev,
361                         "unable to send receive buffer's gpadl to netvsp\n");
362                 goto cleanup;
363         }
364
365         wait_for_completion(&net_device->channel_init_wait);
366
367         /* Check the response */
368         resp = &init_packet->msg.v1_msg.send_recv_buf_complete;
369         if (resp->status != NVSP_STAT_SUCCESS) {
370                 netdev_err(ndev,
371                            "Unable to complete receive buffer initialization with NetVsp - status %d\n",
372                            resp->status);
373                 ret = -EINVAL;
374                 goto cleanup;
375         }
376
377         /* Parse the response */
378         netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n",
379                    resp->num_sections, resp->sections[0].sub_alloc_size,
380                    resp->sections[0].num_sub_allocs);
381
382         /* There should only be one section for the entire receive buffer */
383         if (resp->num_sections != 1 || resp->sections[0].offset != 0) {
384                 ret = -EINVAL;
385                 goto cleanup;
386         }
387
388         net_device->recv_section_size = resp->sections[0].sub_alloc_size;
389         net_device->recv_section_cnt = resp->sections[0].num_sub_allocs;
390
391         /* Ensure buffer will not overflow */
392         if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size *
393             (u64)net_device->recv_section_cnt > (u64)buf_size) {
394                 netdev_err(ndev, "invalid recv_section_size %u\n",
395                            net_device->recv_section_size);
396                 ret = -EINVAL;
397                 goto cleanup;
398         }
399
400         /* Setup receive completion ring.
401          * Add 1 to the recv_section_cnt because at least one entry in a
402          * ring buffer has to be empty.
403          */
404         net_device->recv_completion_cnt = net_device->recv_section_cnt + 1;
405         ret = netvsc_alloc_recv_comp_ring(net_device, 0);
406         if (ret)
407                 goto cleanup;
408
409         /* Now setup the send buffer. */
410         buf_size = device_info->send_sections * device_info->send_section_size;
411         buf_size = round_up(buf_size, PAGE_SIZE);
412
413         net_device->send_buf = vzalloc(buf_size);
414         if (!net_device->send_buf) {
415                 netdev_err(ndev, "unable to allocate send buffer of size %u\n",
416                            buf_size);
417                 ret = -ENOMEM;
418                 goto cleanup;
419         }
420
421         /* Establish the gpadl handle for this buffer on this
422          * channel.  Note: This call uses the vmbus connection rather
423          * than the channel to establish the gpadl handle.
424          */
425         ret = vmbus_establish_gpadl(device->channel, net_device->send_buf,
426                                     buf_size,
427                                     &net_device->send_buf_gpadl_handle);
428         if (ret != 0) {
429                 netdev_err(ndev,
430                            "unable to establish send buffer's gpadl\n");
431                 goto cleanup;
432         }
433
434         /* Notify the NetVsp of the gpadl handle */
435         init_packet = &net_device->channel_init_pkt;
436         memset(init_packet, 0, sizeof(struct nvsp_message));
437         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
438         init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
439                 net_device->send_buf_gpadl_handle;
440         init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
441
442         trace_nvsp_send(ndev, init_packet);
443
444         /* Send the gpadl notification request */
445         ret = vmbus_sendpacket(device->channel, init_packet,
446                                sizeof(struct nvsp_message),
447                                (unsigned long)init_packet,
448                                VM_PKT_DATA_INBAND,
449                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
450         if (ret != 0) {
451                 netdev_err(ndev,
452                            "unable to send send buffer's gpadl to netvsp\n");
453                 goto cleanup;
454         }
455
456         wait_for_completion(&net_device->channel_init_wait);
457
458         /* Check the response */
459         if (init_packet->msg.v1_msg.
460             send_send_buf_complete.status != NVSP_STAT_SUCCESS) {
461                 netdev_err(ndev, "Unable to complete send buffer "
462                            "initialization with NetVsp - status %d\n",
463                            init_packet->msg.v1_msg.
464                            send_send_buf_complete.status);
465                 ret = -EINVAL;
466                 goto cleanup;
467         }
468
469         /* Parse the response */
470         net_device->send_section_size = init_packet->msg.
471                                 v1_msg.send_send_buf_complete.section_size;
472         if (net_device->send_section_size < NETVSC_MTU_MIN) {
473                 netdev_err(ndev, "invalid send_section_size %u\n",
474                            net_device->send_section_size);
475                 ret = -EINVAL;
476                 goto cleanup;
477         }
478
479         /* Section count is simply the size divided by the section size. */
480         net_device->send_section_cnt = buf_size / net_device->send_section_size;
481
482         netdev_dbg(ndev, "Send section size: %d, Section count:%d\n",
483                    net_device->send_section_size, net_device->send_section_cnt);
484
485         /* Setup state for managing the send buffer. */
486         map_words = DIV_ROUND_UP(net_device->send_section_cnt, BITS_PER_LONG);
487
488         net_device->send_section_map = kcalloc(map_words, sizeof(ulong), GFP_KERNEL);
489         if (net_device->send_section_map == NULL) {
490                 ret = -ENOMEM;
491                 goto cleanup;
492         }
493
494         goto exit;
495
496 cleanup:
497         netvsc_revoke_recv_buf(device, net_device, ndev);
498         netvsc_revoke_send_buf(device, net_device, ndev);
499         netvsc_teardown_recv_gpadl(device, net_device, ndev);
500         netvsc_teardown_send_gpadl(device, net_device, ndev);
501
502 exit:
503         return ret;
504 }
505
506 /* Negotiate NVSP protocol version */
507 static int negotiate_nvsp_ver(struct hv_device *device,
508                               struct netvsc_device *net_device,
509                               struct nvsp_message *init_packet,
510                               u32 nvsp_ver)
511 {
512         struct net_device *ndev = hv_get_drvdata(device);
513         int ret;
514
515         memset(init_packet, 0, sizeof(struct nvsp_message));
516         init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
517         init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
518         init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
519         trace_nvsp_send(ndev, init_packet);
520
521         /* Send the init request */
522         ret = vmbus_sendpacket(device->channel, init_packet,
523                                sizeof(struct nvsp_message),
524                                (unsigned long)init_packet,
525                                VM_PKT_DATA_INBAND,
526                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
527
528         if (ret != 0)
529                 return ret;
530
531         wait_for_completion(&net_device->channel_init_wait);
532
533         if (init_packet->msg.init_msg.init_complete.status !=
534             NVSP_STAT_SUCCESS)
535                 return -EINVAL;
536
537         if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
538                 return 0;
539
540         /* NVSPv2 or later: Send NDIS config */
541         memset(init_packet, 0, sizeof(struct nvsp_message));
542         init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
543         init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
544         init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
545
546         if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) {
547                 init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
548
549                 /* Teaming bit is needed to receive link speed updates */
550                 init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1;
551         }
552
553         if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61)
554                 init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1;
555
556         trace_nvsp_send(ndev, init_packet);
557
558         ret = vmbus_sendpacket(device->channel, init_packet,
559                                 sizeof(struct nvsp_message),
560                                 (unsigned long)init_packet,
561                                 VM_PKT_DATA_INBAND, 0);
562
563         return ret;
564 }
565
566 static int netvsc_connect_vsp(struct hv_device *device,
567                               struct netvsc_device *net_device,
568                               const struct netvsc_device_info *device_info)
569 {
570         struct net_device *ndev = hv_get_drvdata(device);
571         static const u32 ver_list[] = {
572                 NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
573                 NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5,
574                 NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61
575         };
576         struct nvsp_message *init_packet;
577         int ndis_version, i, ret;
578
579         init_packet = &net_device->channel_init_pkt;
580
581         /* Negotiate the latest NVSP protocol supported */
582         for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--)
583                 if (negotiate_nvsp_ver(device, net_device, init_packet,
584                                        ver_list[i])  == 0) {
585                         net_device->nvsp_version = ver_list[i];
586                         break;
587                 }
588
589         if (i < 0) {
590                 ret = -EPROTO;
591                 goto cleanup;
592         }
593
594         pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
595
596         /* Send the ndis version */
597         memset(init_packet, 0, sizeof(struct nvsp_message));
598
599         if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
600                 ndis_version = 0x00060001;
601         else
602                 ndis_version = 0x0006001e;
603
604         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
605         init_packet->msg.v1_msg.
606                 send_ndis_ver.ndis_major_ver =
607                                 (ndis_version & 0xFFFF0000) >> 16;
608         init_packet->msg.v1_msg.
609                 send_ndis_ver.ndis_minor_ver =
610                                 ndis_version & 0xFFFF;
611
612         trace_nvsp_send(ndev, init_packet);
613
614         /* Send the init request */
615         ret = vmbus_sendpacket(device->channel, init_packet,
616                                 sizeof(struct nvsp_message),
617                                 (unsigned long)init_packet,
618                                 VM_PKT_DATA_INBAND, 0);
619         if (ret != 0)
620                 goto cleanup;
621
622
623         ret = netvsc_init_buf(device, net_device, device_info);
624
625 cleanup:
626         return ret;
627 }
628
629 /*
630  * netvsc_device_remove - Callback when the root bus device is removed
631  */
632 void netvsc_device_remove(struct hv_device *device)
633 {
634         struct net_device *ndev = hv_get_drvdata(device);
635         struct net_device_context *net_device_ctx = netdev_priv(ndev);
636         struct netvsc_device *net_device
637                 = rtnl_dereference(net_device_ctx->nvdev);
638         int i;
639
640         /*
641          * Revoke receive buffer. If host is pre-Win2016 then tear down
642          * receive buffer GPADL. Do the same for send buffer.
643          */
644         netvsc_revoke_recv_buf(device, net_device, ndev);
645         if (vmbus_proto_version < VERSION_WIN10)
646                 netvsc_teardown_recv_gpadl(device, net_device, ndev);
647
648         netvsc_revoke_send_buf(device, net_device, ndev);
649         if (vmbus_proto_version < VERSION_WIN10)
650                 netvsc_teardown_send_gpadl(device, net_device, ndev);
651
652         RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
653
654         /* Disable NAPI and disassociate its context from the device. */
655         for (i = 0; i < net_device->num_chn; i++) {
656                 /* See also vmbus_reset_channel_cb(). */
657                 napi_disable(&net_device->chan_table[i].napi);
658                 netif_napi_del(&net_device->chan_table[i].napi);
659         }
660
661         /*
662          * At this point, no one should be accessing net_device
663          * except in here
664          */
665         netdev_dbg(ndev, "net device safe to remove\n");
666
667         /* Now, we can close the channel safely */
668         vmbus_close(device->channel);
669
670         /*
671          * If host is Win2016 or higher then we do the GPADL tear down
672          * here after VMBus is closed.
673         */
674         if (vmbus_proto_version >= VERSION_WIN10) {
675                 netvsc_teardown_recv_gpadl(device, net_device, ndev);
676                 netvsc_teardown_send_gpadl(device, net_device, ndev);
677         }
678
679         /* Release all resources */
680         free_netvsc_device_rcu(net_device);
681 }
682
683 #define RING_AVAIL_PERCENT_HIWATER 20
684 #define RING_AVAIL_PERCENT_LOWATER 10
685
686 static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
687                                          u32 index)
688 {
689         sync_change_bit(index, net_device->send_section_map);
690 }
691
692 static void netvsc_send_tx_complete(struct net_device *ndev,
693                                     struct netvsc_device *net_device,
694                                     struct vmbus_channel *channel,
695                                     const struct vmpacket_descriptor *desc,
696                                     int budget)
697 {
698         struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
699         struct net_device_context *ndev_ctx = netdev_priv(ndev);
700         u16 q_idx = 0;
701         int queue_sends;
702
703         /* Notify the layer above us */
704         if (likely(skb)) {
705                 const struct hv_netvsc_packet *packet
706                         = (struct hv_netvsc_packet *)skb->cb;
707                 u32 send_index = packet->send_buf_index;
708                 struct netvsc_stats *tx_stats;
709
710                 if (send_index != NETVSC_INVALID_INDEX)
711                         netvsc_free_send_slot(net_device, send_index);
712                 q_idx = packet->q_idx;
713
714                 tx_stats = &net_device->chan_table[q_idx].tx_stats;
715
716                 u64_stats_update_begin(&tx_stats->syncp);
717                 tx_stats->packets += packet->total_packets;
718                 tx_stats->bytes += packet->total_bytes;
719                 u64_stats_update_end(&tx_stats->syncp);
720
721                 napi_consume_skb(skb, budget);
722         }
723
724         queue_sends =
725                 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
726
727         if (unlikely(net_device->destroy)) {
728                 if (queue_sends == 0)
729                         wake_up(&net_device->wait_drain);
730         } else {
731                 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
732
733                 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
734                     (hv_get_avail_to_write_percent(&channel->outbound) >
735                      RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
736                         netif_tx_wake_queue(txq);
737                         ndev_ctx->eth_stats.wake_queue++;
738                 }
739         }
740 }
741
742 static void netvsc_send_completion(struct net_device *ndev,
743                                    struct netvsc_device *net_device,
744                                    struct vmbus_channel *incoming_channel,
745                                    const struct vmpacket_descriptor *desc,
746                                    int budget)
747 {
748         const struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
749         u32 msglen = hv_pkt_datalen(desc);
750
751         /* Ensure packet is big enough to read header fields */
752         if (msglen < sizeof(struct nvsp_message_header)) {
753                 netdev_err(ndev, "nvsp_message length too small: %u\n", msglen);
754                 return;
755         }
756
757         switch (nvsp_packet->hdr.msg_type) {
758         case NVSP_MSG_TYPE_INIT_COMPLETE:
759                 if (msglen < sizeof(struct nvsp_message_header) +
760                                 sizeof(struct nvsp_message_init_complete)) {
761                         netdev_err(ndev, "nvsp_msg length too small: %u\n",
762                                    msglen);
763                         return;
764                 }
765                 fallthrough;
766
767         case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
768                 if (msglen < sizeof(struct nvsp_message_header) +
769                                 sizeof(struct nvsp_1_message_send_receive_buffer_complete)) {
770                         netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
771                                    msglen);
772                         return;
773                 }
774                 fallthrough;
775
776         case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE:
777                 if (msglen < sizeof(struct nvsp_message_header) +
778                                 sizeof(struct nvsp_1_message_send_send_buffer_complete)) {
779                         netdev_err(ndev, "nvsp_msg1 length too small: %u\n",
780                                    msglen);
781                         return;
782                 }
783                 fallthrough;
784
785         case NVSP_MSG5_TYPE_SUBCHANNEL:
786                 if (msglen < sizeof(struct nvsp_message_header) +
787                                 sizeof(struct nvsp_5_subchannel_complete)) {
788                         netdev_err(ndev, "nvsp_msg5 length too small: %u\n",
789                                    msglen);
790                         return;
791                 }
792                 /* Copy the response back */
793                 memcpy(&net_device->channel_init_pkt, nvsp_packet,
794                        sizeof(struct nvsp_message));
795                 complete(&net_device->channel_init_wait);
796                 break;
797
798         case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
799                 netvsc_send_tx_complete(ndev, net_device, incoming_channel,
800                                         desc, budget);
801                 break;
802
803         default:
804                 netdev_err(ndev,
805                            "Unknown send completion type %d received!!\n",
806                            nvsp_packet->hdr.msg_type);
807         }
808 }
809
810 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
811 {
812         unsigned long *map_addr = net_device->send_section_map;
813         unsigned int i;
814
815         for_each_clear_bit(i, map_addr, net_device->send_section_cnt) {
816                 if (sync_test_and_set_bit(i, map_addr) == 0)
817                         return i;
818         }
819
820         return NETVSC_INVALID_INDEX;
821 }
822
823 static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
824                                     unsigned int section_index,
825                                     u32 pend_size,
826                                     struct hv_netvsc_packet *packet,
827                                     struct rndis_message *rndis_msg,
828                                     struct hv_page_buffer *pb,
829                                     bool xmit_more)
830 {
831         char *start = net_device->send_buf;
832         char *dest = start + (section_index * net_device->send_section_size)
833                      + pend_size;
834         int i;
835         u32 padding = 0;
836         u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
837                 packet->page_buf_cnt;
838         u32 remain;
839
840         /* Add padding */
841         remain = packet->total_data_buflen & (net_device->pkt_align - 1);
842         if (xmit_more && remain) {
843                 padding = net_device->pkt_align - remain;
844                 rndis_msg->msg_len += padding;
845                 packet->total_data_buflen += padding;
846         }
847
848         for (i = 0; i < page_count; i++) {
849                 char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT);
850                 u32 offset = pb[i].offset;
851                 u32 len = pb[i].len;
852
853                 memcpy(dest, (src + offset), len);
854                 dest += len;
855         }
856
857         if (padding)
858                 memset(dest, 0, padding);
859 }
860
861 static inline int netvsc_send_pkt(
862         struct hv_device *device,
863         struct hv_netvsc_packet *packet,
864         struct netvsc_device *net_device,
865         struct hv_page_buffer *pb,
866         struct sk_buff *skb)
867 {
868         struct nvsp_message nvmsg;
869         struct nvsp_1_message_send_rndis_packet *rpkt =
870                 &nvmsg.msg.v1_msg.send_rndis_pkt;
871         struct netvsc_channel * const nvchan =
872                 &net_device->chan_table[packet->q_idx];
873         struct vmbus_channel *out_channel = nvchan->channel;
874         struct net_device *ndev = hv_get_drvdata(device);
875         struct net_device_context *ndev_ctx = netdev_priv(ndev);
876         struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
877         u64 req_id;
878         int ret;
879         u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound);
880
881         nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
882         if (skb)
883                 rpkt->channel_type = 0;         /* 0 is RMC_DATA */
884         else
885                 rpkt->channel_type = 1;         /* 1 is RMC_CONTROL */
886
887         rpkt->send_buf_section_index = packet->send_buf_index;
888         if (packet->send_buf_index == NETVSC_INVALID_INDEX)
889                 rpkt->send_buf_section_size = 0;
890         else
891                 rpkt->send_buf_section_size = packet->total_data_buflen;
892
893         req_id = (ulong)skb;
894
895         if (out_channel->rescind)
896                 return -ENODEV;
897
898         trace_nvsp_send_pkt(ndev, out_channel, rpkt);
899
900         if (packet->page_buf_cnt) {
901                 if (packet->cp_partial)
902                         pb += packet->rmsg_pgcnt;
903
904                 ret = vmbus_sendpacket_pagebuffer(out_channel,
905                                                   pb, packet->page_buf_cnt,
906                                                   &nvmsg, sizeof(nvmsg),
907                                                   req_id);
908         } else {
909                 ret = vmbus_sendpacket(out_channel,
910                                        &nvmsg, sizeof(nvmsg),
911                                        req_id, VM_PKT_DATA_INBAND,
912                                        VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
913         }
914
915         if (ret == 0) {
916                 atomic_inc_return(&nvchan->queue_sends);
917
918                 if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
919                         netif_tx_stop_queue(txq);
920                         ndev_ctx->eth_stats.stop_queue++;
921                 }
922         } else if (ret == -EAGAIN) {
923                 netif_tx_stop_queue(txq);
924                 ndev_ctx->eth_stats.stop_queue++;
925         } else {
926                 netdev_err(ndev,
927                            "Unable to send packet pages %u len %u, ret %d\n",
928                            packet->page_buf_cnt, packet->total_data_buflen,
929                            ret);
930         }
931
932         if (netif_tx_queue_stopped(txq) &&
933             atomic_read(&nvchan->queue_sends) < 1 &&
934             !net_device->tx_disable) {
935                 netif_tx_wake_queue(txq);
936                 ndev_ctx->eth_stats.wake_queue++;
937                 if (ret == -EAGAIN)
938                         ret = -ENOSPC;
939         }
940
941         return ret;
942 }
943
944 /* Move packet out of multi send data (msd), and clear msd */
945 static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
946                                 struct sk_buff **msd_skb,
947                                 struct multi_send_data *msdp)
948 {
949         *msd_skb = msdp->skb;
950         *msd_send = msdp->pkt;
951         msdp->skb = NULL;
952         msdp->pkt = NULL;
953         msdp->count = 0;
954 }
955
956 /* RCU already held by caller */
957 int netvsc_send(struct net_device *ndev,
958                 struct hv_netvsc_packet *packet,
959                 struct rndis_message *rndis_msg,
960                 struct hv_page_buffer *pb,
961                 struct sk_buff *skb,
962                 bool xdp_tx)
963 {
964         struct net_device_context *ndev_ctx = netdev_priv(ndev);
965         struct netvsc_device *net_device
966                 = rcu_dereference_bh(ndev_ctx->nvdev);
967         struct hv_device *device = ndev_ctx->device_ctx;
968         int ret = 0;
969         struct netvsc_channel *nvchan;
970         u32 pktlen = packet->total_data_buflen, msd_len = 0;
971         unsigned int section_index = NETVSC_INVALID_INDEX;
972         struct multi_send_data *msdp;
973         struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
974         struct sk_buff *msd_skb = NULL;
975         bool try_batch, xmit_more;
976
977         /* If device is rescinded, return error and packet will get dropped. */
978         if (unlikely(!net_device || net_device->destroy))
979                 return -ENODEV;
980
981         nvchan = &net_device->chan_table[packet->q_idx];
982         packet->send_buf_index = NETVSC_INVALID_INDEX;
983         packet->cp_partial = false;
984
985         /* Send a control message or XDP packet directly without accessing
986          * msd (Multi-Send Data) field which may be changed during data packet
987          * processing.
988          */
989         if (!skb || xdp_tx)
990                 return netvsc_send_pkt(device, packet, net_device, pb, skb);
991
992         /* batch packets in send buffer if possible */
993         msdp = &nvchan->msd;
994         if (msdp->pkt)
995                 msd_len = msdp->pkt->total_data_buflen;
996
997         try_batch =  msd_len > 0 && msdp->count < net_device->max_pkt;
998         if (try_batch && msd_len + pktlen + net_device->pkt_align <
999             net_device->send_section_size) {
1000                 section_index = msdp->pkt->send_buf_index;
1001
1002         } else if (try_batch && msd_len + packet->rmsg_size <
1003                    net_device->send_section_size) {
1004                 section_index = msdp->pkt->send_buf_index;
1005                 packet->cp_partial = true;
1006
1007         } else if (pktlen + net_device->pkt_align <
1008                    net_device->send_section_size) {
1009                 section_index = netvsc_get_next_send_section(net_device);
1010                 if (unlikely(section_index == NETVSC_INVALID_INDEX)) {
1011                         ++ndev_ctx->eth_stats.tx_send_full;
1012                 } else {
1013                         move_pkt_msd(&msd_send, &msd_skb, msdp);
1014                         msd_len = 0;
1015                 }
1016         }
1017
1018         /* Keep aggregating only if stack says more data is coming
1019          * and not doing mixed modes send and not flow blocked
1020          */
1021         xmit_more = netdev_xmit_more() &&
1022                 !packet->cp_partial &&
1023                 !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
1024
1025         if (section_index != NETVSC_INVALID_INDEX) {
1026                 netvsc_copy_to_send_buf(net_device,
1027                                         section_index, msd_len,
1028                                         packet, rndis_msg, pb, xmit_more);
1029
1030                 packet->send_buf_index = section_index;
1031
1032                 if (packet->cp_partial) {
1033                         packet->page_buf_cnt -= packet->rmsg_pgcnt;
1034                         packet->total_data_buflen = msd_len + packet->rmsg_size;
1035                 } else {
1036                         packet->page_buf_cnt = 0;
1037                         packet->total_data_buflen += msd_len;
1038                 }
1039
1040                 if (msdp->pkt) {
1041                         packet->total_packets += msdp->pkt->total_packets;
1042                         packet->total_bytes += msdp->pkt->total_bytes;
1043                 }
1044
1045                 if (msdp->skb)
1046                         dev_consume_skb_any(msdp->skb);
1047
1048                 if (xmit_more) {
1049                         msdp->skb = skb;
1050                         msdp->pkt = packet;
1051                         msdp->count++;
1052                 } else {
1053                         cur_send = packet;
1054                         msdp->skb = NULL;
1055                         msdp->pkt = NULL;
1056                         msdp->count = 0;
1057                 }
1058         } else {
1059                 move_pkt_msd(&msd_send, &msd_skb, msdp);
1060                 cur_send = packet;
1061         }
1062
1063         if (msd_send) {
1064                 int m_ret = netvsc_send_pkt(device, msd_send, net_device,
1065                                             NULL, msd_skb);
1066
1067                 if (m_ret != 0) {
1068                         netvsc_free_send_slot(net_device,
1069                                               msd_send->send_buf_index);
1070                         dev_kfree_skb_any(msd_skb);
1071                 }
1072         }
1073
1074         if (cur_send)
1075                 ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
1076
1077         if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
1078                 netvsc_free_send_slot(net_device, section_index);
1079
1080         return ret;
1081 }
1082
1083 /* Send pending recv completions */
1084 static int send_recv_completions(struct net_device *ndev,
1085                                  struct netvsc_device *nvdev,
1086                                  struct netvsc_channel *nvchan)
1087 {
1088         struct multi_recv_comp *mrc = &nvchan->mrc;
1089         struct recv_comp_msg {
1090                 struct nvsp_message_header hdr;
1091                 u32 status;
1092         }  __packed;
1093         struct recv_comp_msg msg = {
1094                 .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
1095         };
1096         int ret;
1097
1098         while (mrc->first != mrc->next) {
1099                 const struct recv_comp_data *rcd
1100                         = mrc->slots + mrc->first;
1101
1102                 msg.status = rcd->status;
1103                 ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg),
1104                                        rcd->tid, VM_PKT_COMP, 0);
1105                 if (unlikely(ret)) {
1106                         struct net_device_context *ndev_ctx = netdev_priv(ndev);
1107
1108                         ++ndev_ctx->eth_stats.rx_comp_busy;
1109                         return ret;
1110                 }
1111
1112                 if (++mrc->first == nvdev->recv_completion_cnt)
1113                         mrc->first = 0;
1114         }
1115
1116         /* receive completion ring has been emptied */
1117         if (unlikely(nvdev->destroy))
1118                 wake_up(&nvdev->wait_drain);
1119
1120         return 0;
1121 }
1122
1123 /* Count how many receive completions are outstanding */
1124 static void recv_comp_slot_avail(const struct netvsc_device *nvdev,
1125                                  const struct multi_recv_comp *mrc,
1126                                  u32 *filled, u32 *avail)
1127 {
1128         u32 count = nvdev->recv_completion_cnt;
1129
1130         if (mrc->next >= mrc->first)
1131                 *filled = mrc->next - mrc->first;
1132         else
1133                 *filled = (count - mrc->first) + mrc->next;
1134
1135         *avail = count - *filled - 1;
1136 }
1137
1138 /* Add receive complete to ring to send to host. */
1139 static void enq_receive_complete(struct net_device *ndev,
1140                                  struct netvsc_device *nvdev, u16 q_idx,
1141                                  u64 tid, u32 status)
1142 {
1143         struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx];
1144         struct multi_recv_comp *mrc = &nvchan->mrc;
1145         struct recv_comp_data *rcd;
1146         u32 filled, avail;
1147
1148         recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1149
1150         if (unlikely(filled > NAPI_POLL_WEIGHT)) {
1151                 send_recv_completions(ndev, nvdev, nvchan);
1152                 recv_comp_slot_avail(nvdev, mrc, &filled, &avail);
1153         }
1154
1155         if (unlikely(!avail)) {
1156                 netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
1157                            q_idx, tid);
1158                 return;
1159         }
1160
1161         rcd = mrc->slots + mrc->next;
1162         rcd->tid = tid;
1163         rcd->status = status;
1164
1165         if (++mrc->next == nvdev->recv_completion_cnt)
1166                 mrc->next = 0;
1167 }
1168
1169 static int netvsc_receive(struct net_device *ndev,
1170                           struct netvsc_device *net_device,
1171                           struct netvsc_channel *nvchan,
1172                           const struct vmpacket_descriptor *desc)
1173 {
1174         struct net_device_context *net_device_ctx = netdev_priv(ndev);
1175         struct vmbus_channel *channel = nvchan->channel;
1176         const struct vmtransfer_page_packet_header *vmxferpage_packet
1177                 = container_of(desc, const struct vmtransfer_page_packet_header, d);
1178         const struct nvsp_message *nvsp = hv_pkt_data(desc);
1179         u32 msglen = hv_pkt_datalen(desc);
1180         u16 q_idx = channel->offermsg.offer.sub_channel_index;
1181         char *recv_buf = net_device->recv_buf;
1182         u32 status = NVSP_STAT_SUCCESS;
1183         int i;
1184         int count = 0;
1185
1186         /* Ensure packet is big enough to read header fields */
1187         if (msglen < sizeof(struct nvsp_message_header)) {
1188                 netif_err(net_device_ctx, rx_err, ndev,
1189                           "invalid nvsp header, length too small: %u\n",
1190                           msglen);
1191                 return 0;
1192         }
1193
1194         /* Make sure this is a valid nvsp packet */
1195         if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
1196                 netif_err(net_device_ctx, rx_err, ndev,
1197                           "Unknown nvsp packet type received %u\n",
1198                           nvsp->hdr.msg_type);
1199                 return 0;
1200         }
1201
1202         /* Validate xfer page pkt header */
1203         if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) {
1204                 netif_err(net_device_ctx, rx_err, ndev,
1205                           "Invalid xfer page pkt, offset too small: %u\n",
1206                           desc->offset8 << 3);
1207                 return 0;
1208         }
1209
1210         if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
1211                 netif_err(net_device_ctx, rx_err, ndev,
1212                           "Invalid xfer page set id - expecting %x got %x\n",
1213                           NETVSC_RECEIVE_BUFFER_ID,
1214                           vmxferpage_packet->xfer_pageset_id);
1215                 return 0;
1216         }
1217
1218         count = vmxferpage_packet->range_cnt;
1219
1220         /* Check count for a valid value */
1221         if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) {
1222                 netif_err(net_device_ctx, rx_err, ndev,
1223                           "Range count is not valid: %d\n",
1224                           count);
1225                 return 0;
1226         }
1227
1228         /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
1229         for (i = 0; i < count; i++) {
1230                 u32 offset = vmxferpage_packet->ranges[i].byte_offset;
1231                 u32 buflen = vmxferpage_packet->ranges[i].byte_count;
1232                 void *data;
1233                 int ret;
1234
1235                 if (unlikely(offset > net_device->recv_buf_size ||
1236                              buflen > net_device->recv_buf_size - offset)) {
1237                         nvchan->rsc.cnt = 0;
1238                         status = NVSP_STAT_FAIL;
1239                         netif_err(net_device_ctx, rx_err, ndev,
1240                                   "Packet offset:%u + len:%u too big\n",
1241                                   offset, buflen);
1242
1243                         continue;
1244                 }
1245
1246                 data = recv_buf + offset;
1247
1248                 nvchan->rsc.is_last = (i == count - 1);
1249
1250                 trace_rndis_recv(ndev, q_idx, data);
1251
1252                 /* Pass it to the upper layer */
1253                 ret = rndis_filter_receive(ndev, net_device,
1254                                            nvchan, data, buflen);
1255
1256                 if (unlikely(ret != NVSP_STAT_SUCCESS))
1257                         status = NVSP_STAT_FAIL;
1258         }
1259
1260         enq_receive_complete(ndev, net_device, q_idx,
1261                              vmxferpage_packet->d.trans_id, status);
1262
1263         return count;
1264 }
1265
1266 static void netvsc_send_table(struct net_device *ndev,
1267                               struct netvsc_device *nvscdev,
1268                               const struct nvsp_message *nvmsg,
1269                               u32 msglen)
1270 {
1271         struct net_device_context *net_device_ctx = netdev_priv(ndev);
1272         u32 count, offset, *tab;
1273         int i;
1274
1275         /* Ensure packet is big enough to read send_table fields */
1276         if (msglen < sizeof(struct nvsp_message_header) +
1277                      sizeof(struct nvsp_5_send_indirect_table)) {
1278                 netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen);
1279                 return;
1280         }
1281
1282         count = nvmsg->msg.v5_msg.send_table.count;
1283         offset = nvmsg->msg.v5_msg.send_table.offset;
1284
1285         if (count != VRSS_SEND_TAB_SIZE) {
1286                 netdev_err(ndev, "Received wrong send-table size:%u\n", count);
1287                 return;
1288         }
1289
1290         /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
1291          * wrong due to a host bug. So fix the offset here.
1292          */
1293         if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
1294             msglen >= sizeof(struct nvsp_message_header) +
1295             sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
1296                 offset = sizeof(struct nvsp_message_header) +
1297                          sizeof(union nvsp_6_message_uber);
1298
1299         /* Boundary check for all versions */
1300         if (offset > msglen - count * sizeof(u32)) {
1301                 netdev_err(ndev, "Received send-table offset too big:%u\n",
1302                            offset);
1303                 return;
1304         }
1305
1306         tab = (void *)nvmsg + offset;
1307
1308         for (i = 0; i < count; i++)
1309                 net_device_ctx->tx_table[i] = tab[i];
1310 }
1311
1312 static void netvsc_send_vf(struct net_device *ndev,
1313                            const struct nvsp_message *nvmsg,
1314                            u32 msglen)
1315 {
1316         struct net_device_context *net_device_ctx = netdev_priv(ndev);
1317
1318         /* Ensure packet is big enough to read its fields */
1319         if (msglen < sizeof(struct nvsp_message_header) +
1320                      sizeof(struct nvsp_4_send_vf_association)) {
1321                 netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen);
1322                 return;
1323         }
1324
1325         net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1326         net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1327         netdev_info(ndev, "VF slot %u %s\n",
1328                     net_device_ctx->vf_serial,
1329                     net_device_ctx->vf_alloc ? "added" : "removed");
1330 }
1331
1332 static void netvsc_receive_inband(struct net_device *ndev,
1333                                   struct netvsc_device *nvscdev,
1334                                   const struct vmpacket_descriptor *desc)
1335 {
1336         const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1337         u32 msglen = hv_pkt_datalen(desc);
1338
1339         /* Ensure packet is big enough to read header fields */
1340         if (msglen < sizeof(struct nvsp_message_header)) {
1341                 netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen);
1342                 return;
1343         }
1344
1345         switch (nvmsg->hdr.msg_type) {
1346         case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
1347                 netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
1348                 break;
1349
1350         case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
1351                 netvsc_send_vf(ndev, nvmsg, msglen);
1352                 break;
1353         }
1354 }
1355
1356 static int netvsc_process_raw_pkt(struct hv_device *device,
1357                                   struct netvsc_channel *nvchan,
1358                                   struct netvsc_device *net_device,
1359                                   struct net_device *ndev,
1360                                   const struct vmpacket_descriptor *desc,
1361                                   int budget)
1362 {
1363         struct vmbus_channel *channel = nvchan->channel;
1364         const struct nvsp_message *nvmsg = hv_pkt_data(desc);
1365
1366         trace_nvsp_recv(ndev, channel, nvmsg);
1367
1368         switch (desc->type) {
1369         case VM_PKT_COMP:
1370                 netvsc_send_completion(ndev, net_device, channel, desc, budget);
1371                 break;
1372
1373         case VM_PKT_DATA_USING_XFER_PAGES:
1374                 return netvsc_receive(ndev, net_device, nvchan, desc);
1375                 break;
1376
1377         case VM_PKT_DATA_INBAND:
1378                 netvsc_receive_inband(ndev, net_device, desc);
1379                 break;
1380
1381         default:
1382                 netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
1383                            desc->type, desc->trans_id);
1384                 break;
1385         }
1386
1387         return 0;
1388 }
1389
1390 static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
1391 {
1392         struct vmbus_channel *primary = channel->primary_channel;
1393
1394         return primary ? primary->device_obj : channel->device_obj;
1395 }
1396
1397 /* Network processing softirq
1398  * Process data in incoming ring buffer from host
1399  * Stops when ring is empty or budget is met or exceeded.
1400  */
1401 int netvsc_poll(struct napi_struct *napi, int budget)
1402 {
1403         struct netvsc_channel *nvchan
1404                 = container_of(napi, struct netvsc_channel, napi);
1405         struct netvsc_device *net_device = nvchan->net_device;
1406         struct vmbus_channel *channel = nvchan->channel;
1407         struct hv_device *device = netvsc_channel_to_device(channel);
1408         struct net_device *ndev = hv_get_drvdata(device);
1409         int work_done = 0;
1410         int ret;
1411
1412         /* If starting a new interval */
1413         if (!nvchan->desc)
1414                 nvchan->desc = hv_pkt_iter_first(channel);
1415
1416         while (nvchan->desc && work_done < budget) {
1417                 work_done += netvsc_process_raw_pkt(device, nvchan, net_device,
1418                                                     ndev, nvchan->desc, budget);
1419                 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1420         }
1421
1422         /* Send any pending receive completions */
1423         ret = send_recv_completions(ndev, net_device, nvchan);
1424
1425         /* If it did not exhaust NAPI budget this time
1426          *  and not doing busy poll
1427          * then re-enable host interrupts
1428          *  and reschedule if ring is not empty
1429          *   or sending receive completion failed.
1430          */
1431         if (work_done < budget &&
1432             napi_complete_done(napi, work_done) &&
1433             (ret || hv_end_read(&channel->inbound)) &&
1434             napi_schedule_prep(napi)) {
1435                 hv_begin_read(&channel->inbound);
1436                 __napi_schedule(napi);
1437         }
1438
1439         /* Driver may overshoot since multiple packets per descriptor */
1440         return min(work_done, budget);
1441 }
1442
1443 /* Call back when data is available in host ring buffer.
1444  * Processing is deferred until network softirq (NAPI)
1445  */
1446 void netvsc_channel_cb(void *context)
1447 {
1448         struct netvsc_channel *nvchan = context;
1449         struct vmbus_channel *channel = nvchan->channel;
1450         struct hv_ring_buffer_info *rbi = &channel->inbound;
1451
1452         /* preload first vmpacket descriptor */
1453         prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index);
1454
1455         if (napi_schedule_prep(&nvchan->napi)) {
1456                 /* disable interrupts from host */
1457                 hv_begin_read(rbi);
1458
1459                 __napi_schedule_irqoff(&nvchan->napi);
1460         }
1461 }
1462
1463 /*
1464  * netvsc_device_add - Callback when the device belonging to this
1465  * driver is added
1466  */
1467 struct netvsc_device *netvsc_device_add(struct hv_device *device,
1468                                 const struct netvsc_device_info *device_info)
1469 {
1470         int i, ret = 0;
1471         struct netvsc_device *net_device;
1472         struct net_device *ndev = hv_get_drvdata(device);
1473         struct net_device_context *net_device_ctx = netdev_priv(ndev);
1474
1475         net_device = alloc_net_device();
1476         if (!net_device)
1477                 return ERR_PTR(-ENOMEM);
1478
1479         for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1480                 net_device_ctx->tx_table[i] = 0;
1481
1482         /* Because the device uses NAPI, all the interrupt batching and
1483          * control is done via Net softirq, not the channel handling
1484          */
1485         set_channel_read_mode(device->channel, HV_CALL_ISR);
1486
1487         /* If we're reopening the device we may have multiple queues, fill the
1488          * chn_table with the default channel to use it before subchannels are
1489          * opened.
1490          * Initialize the channel state before we open;
1491          * we can be interrupted as soon as we open the channel.
1492          */
1493
1494         for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
1495                 struct netvsc_channel *nvchan = &net_device->chan_table[i];
1496
1497                 nvchan->channel = device->channel;
1498                 nvchan->net_device = net_device;
1499                 u64_stats_init(&nvchan->tx_stats.syncp);
1500                 u64_stats_init(&nvchan->rx_stats.syncp);
1501
1502                 ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i);
1503
1504                 if (ret) {
1505                         netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret);
1506                         goto cleanup2;
1507                 }
1508
1509                 ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq,
1510                                                  MEM_TYPE_PAGE_SHARED, NULL);
1511
1512                 if (ret) {
1513                         netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret);
1514                         goto cleanup2;
1515                 }
1516         }
1517
1518         /* Enable NAPI handler before init callbacks */
1519         netif_napi_add(ndev, &net_device->chan_table[0].napi,
1520                        netvsc_poll, NAPI_POLL_WEIGHT);
1521
1522         /* Open the channel */
1523         ret = vmbus_open(device->channel, netvsc_ring_bytes,
1524                          netvsc_ring_bytes,  NULL, 0,
1525                          netvsc_channel_cb, net_device->chan_table);
1526
1527         if (ret != 0) {
1528                 netdev_err(ndev, "unable to open channel: %d\n", ret);
1529                 goto cleanup;
1530         }
1531
1532         /* Channel is opened */
1533         netdev_dbg(ndev, "hv_netvsc channel opened successfully\n");
1534
1535         napi_enable(&net_device->chan_table[0].napi);
1536
1537         /* Connect with the NetVsp */
1538         ret = netvsc_connect_vsp(device, net_device, device_info);
1539         if (ret != 0) {
1540                 netdev_err(ndev,
1541                         "unable to connect to NetVSP - %d\n", ret);
1542                 goto close;
1543         }
1544
1545         /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
1546          * populated.
1547          */
1548         rcu_assign_pointer(net_device_ctx->nvdev, net_device);
1549
1550         return net_device;
1551
1552 close:
1553         RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
1554         napi_disable(&net_device->chan_table[0].napi);
1555
1556         /* Now, we can close the channel safely */
1557         vmbus_close(device->channel);
1558
1559 cleanup:
1560         netif_napi_del(&net_device->chan_table[0].napi);
1561
1562 cleanup2:
1563         free_netvsc_device(&net_device->rcu);
1564
1565         return ERR_PTR(ret);
1566 }