kconfig: qconf: remove qInfo() to get back Qt4 support
[linux-2.6-microblaze.git] / drivers / hv / channel.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
14 #include <linux/mm.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/hyperv.h>
18 #include <linux/uio.h>
19 #include <linux/interrupt.h>
20 #include <asm/page.h>
21 #include <asm/mshyperv.h>
22
23 #include "hyperv_vmbus.h"
24
25 #define NUM_PAGES_SPANNED(addr, len) \
26 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
27
28 static unsigned long virt_to_hvpfn(void *addr)
29 {
30         phys_addr_t paddr;
31
32         if (is_vmalloc_addr(addr))
33                 paddr = page_to_phys(vmalloc_to_page(addr)) +
34                                          offset_in_page(addr);
35         else
36                 paddr = __pa(addr);
37
38         return  paddr >> PAGE_SHIFT;
39 }
40
41 /*
42  * vmbus_setevent- Trigger an event notification on the specified
43  * channel.
44  */
45 void vmbus_setevent(struct vmbus_channel *channel)
46 {
47         struct hv_monitor_page *monitorpage;
48
49         trace_vmbus_setevent(channel);
50
51         /*
52          * For channels marked as in "low latency" mode
53          * bypass the monitor page mechanism.
54          */
55         if (channel->offermsg.monitor_allocated && !channel->low_latency) {
56                 vmbus_send_interrupt(channel->offermsg.child_relid);
57
58                 /* Get the child to parent monitor page */
59                 monitorpage = vmbus_connection.monitor_pages[1];
60
61                 sync_set_bit(channel->monitor_bit,
62                         (unsigned long *)&monitorpage->trigger_group
63                                         [channel->monitor_grp].pending);
64
65         } else {
66                 vmbus_set_event(channel);
67         }
68 }
69 EXPORT_SYMBOL_GPL(vmbus_setevent);
70
71 /* vmbus_free_ring - drop mapping of ring buffer */
72 void vmbus_free_ring(struct vmbus_channel *channel)
73 {
74         hv_ringbuffer_cleanup(&channel->outbound);
75         hv_ringbuffer_cleanup(&channel->inbound);
76
77         if (channel->ringbuffer_page) {
78                 __free_pages(channel->ringbuffer_page,
79                              get_order(channel->ringbuffer_pagecount
80                                        << PAGE_SHIFT));
81                 channel->ringbuffer_page = NULL;
82         }
83 }
84 EXPORT_SYMBOL_GPL(vmbus_free_ring);
85
86 /* vmbus_alloc_ring - allocate and map pages for ring buffer */
87 int vmbus_alloc_ring(struct vmbus_channel *newchannel,
88                      u32 send_size, u32 recv_size)
89 {
90         struct page *page;
91         int order;
92
93         if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
94                 return -EINVAL;
95
96         /* Allocate the ring buffer */
97         order = get_order(send_size + recv_size);
98         page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
99                                 GFP_KERNEL|__GFP_ZERO, order);
100
101         if (!page)
102                 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
103
104         if (!page)
105                 return -ENOMEM;
106
107         newchannel->ringbuffer_page = page;
108         newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
109         newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
110
111         return 0;
112 }
113 EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
114
115 static int __vmbus_open(struct vmbus_channel *newchannel,
116                        void *userdata, u32 userdatalen,
117                        void (*onchannelcallback)(void *context), void *context)
118 {
119         struct vmbus_channel_open_channel *open_msg;
120         struct vmbus_channel_msginfo *open_info = NULL;
121         struct page *page = newchannel->ringbuffer_page;
122         u32 send_pages, recv_pages;
123         unsigned long flags;
124         int err;
125
126         if (userdatalen > MAX_USER_DEFINED_BYTES)
127                 return -EINVAL;
128
129         send_pages = newchannel->ringbuffer_send_offset;
130         recv_pages = newchannel->ringbuffer_pagecount - send_pages;
131
132         if (newchannel->state != CHANNEL_OPEN_STATE)
133                 return -EINVAL;
134
135         newchannel->state = CHANNEL_OPENING_STATE;
136         newchannel->onchannel_callback = onchannelcallback;
137         newchannel->channel_callback_context = context;
138
139         err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
140         if (err)
141                 goto error_clean_ring;
142
143         err = hv_ringbuffer_init(&newchannel->inbound,
144                                  &page[send_pages], recv_pages);
145         if (err)
146                 goto error_clean_ring;
147
148         /* Establish the gpadl for the ring buffer */
149         newchannel->ringbuffer_gpadlhandle = 0;
150
151         err = vmbus_establish_gpadl(newchannel,
152                                     page_address(newchannel->ringbuffer_page),
153                                     (send_pages + recv_pages) << PAGE_SHIFT,
154                                     &newchannel->ringbuffer_gpadlhandle);
155         if (err)
156                 goto error_clean_ring;
157
158         /* Create and init the channel open message */
159         open_info = kmalloc(sizeof(*open_info) +
160                            sizeof(struct vmbus_channel_open_channel),
161                            GFP_KERNEL);
162         if (!open_info) {
163                 err = -ENOMEM;
164                 goto error_free_gpadl;
165         }
166
167         init_completion(&open_info->waitevent);
168         open_info->waiting_channel = newchannel;
169
170         open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
171         open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
172         open_msg->openid = newchannel->offermsg.child_relid;
173         open_msg->child_relid = newchannel->offermsg.child_relid;
174         open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
175         open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
176         open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
177
178         if (userdatalen)
179                 memcpy(open_msg->userdata, userdata, userdatalen);
180
181         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
182         list_add_tail(&open_info->msglistentry,
183                       &vmbus_connection.chn_msg_list);
184         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
185
186         if (newchannel->rescind) {
187                 err = -ENODEV;
188                 goto error_free_info;
189         }
190
191         err = vmbus_post_msg(open_msg,
192                              sizeof(struct vmbus_channel_open_channel), true);
193
194         trace_vmbus_open(open_msg, err);
195
196         if (err != 0)
197                 goto error_clean_msglist;
198
199         wait_for_completion(&open_info->waitevent);
200
201         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
202         list_del(&open_info->msglistentry);
203         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
204
205         if (newchannel->rescind) {
206                 err = -ENODEV;
207                 goto error_free_info;
208         }
209
210         if (open_info->response.open_result.status) {
211                 err = -EAGAIN;
212                 goto error_free_info;
213         }
214
215         newchannel->state = CHANNEL_OPENED_STATE;
216         kfree(open_info);
217         return 0;
218
219 error_clean_msglist:
220         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
221         list_del(&open_info->msglistentry);
222         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
223 error_free_info:
224         kfree(open_info);
225 error_free_gpadl:
226         vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
227         newchannel->ringbuffer_gpadlhandle = 0;
228 error_clean_ring:
229         hv_ringbuffer_cleanup(&newchannel->outbound);
230         hv_ringbuffer_cleanup(&newchannel->inbound);
231         newchannel->state = CHANNEL_OPEN_STATE;
232         return err;
233 }
234
235 /*
236  * vmbus_connect_ring - Open the channel but reuse ring buffer
237  */
238 int vmbus_connect_ring(struct vmbus_channel *newchannel,
239                        void (*onchannelcallback)(void *context), void *context)
240 {
241         return  __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
242 }
243 EXPORT_SYMBOL_GPL(vmbus_connect_ring);
244
245 /*
246  * vmbus_open - Open the specified channel.
247  */
248 int vmbus_open(struct vmbus_channel *newchannel,
249                u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
250                void *userdata, u32 userdatalen,
251                void (*onchannelcallback)(void *context), void *context)
252 {
253         int err;
254
255         err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
256                                recv_ringbuffer_size);
257         if (err)
258                 return err;
259
260         err = __vmbus_open(newchannel, userdata, userdatalen,
261                            onchannelcallback, context);
262         if (err)
263                 vmbus_free_ring(newchannel);
264
265         return err;
266 }
267 EXPORT_SYMBOL_GPL(vmbus_open);
268
269 /* Used for Hyper-V Socket: a guest client's connect() to the host */
270 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
271                                   const guid_t *shv_host_servie_id)
272 {
273         struct vmbus_channel_tl_connect_request conn_msg;
274         int ret;
275
276         memset(&conn_msg, 0, sizeof(conn_msg));
277         conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
278         conn_msg.guest_endpoint_id = *shv_guest_servie_id;
279         conn_msg.host_service_id = *shv_host_servie_id;
280
281         ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
282
283         trace_vmbus_send_tl_connect_request(&conn_msg, ret);
284
285         return ret;
286 }
287 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
288
289 /*
290  * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
291  *
292  * CHANNELMSG_MODIFYCHANNEL messages are aynchronous.  Also, Hyper-V does not
293  * ACK such messages.  IOW we can't know when the host will stop interrupting
294  * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
295  *
296  * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
297  * VERSION_WIN10_V4_1.
298  */
299 int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
300 {
301         struct vmbus_channel_modifychannel conn_msg;
302         int ret;
303
304         memset(&conn_msg, 0, sizeof(conn_msg));
305         conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
306         conn_msg.child_relid = child_relid;
307         conn_msg.target_vp = target_vp;
308
309         ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
310
311         trace_vmbus_send_modifychannel(&conn_msg, ret);
312
313         return ret;
314 }
315 EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
316
317 /*
318  * create_gpadl_header - Creates a gpadl for the specified buffer
319  */
320 static int create_gpadl_header(void *kbuffer, u32 size,
321                                struct vmbus_channel_msginfo **msginfo)
322 {
323         int i;
324         int pagecount;
325         struct vmbus_channel_gpadl_header *gpadl_header;
326         struct vmbus_channel_gpadl_body *gpadl_body;
327         struct vmbus_channel_msginfo *msgheader;
328         struct vmbus_channel_msginfo *msgbody = NULL;
329         u32 msgsize;
330
331         int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
332
333         pagecount = size >> PAGE_SHIFT;
334
335         /* do we need a gpadl body msg */
336         pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
337                   sizeof(struct vmbus_channel_gpadl_header) -
338                   sizeof(struct gpa_range);
339         pfncount = pfnsize / sizeof(u64);
340
341         if (pagecount > pfncount) {
342                 /* we need a gpadl body */
343                 /* fill in the header */
344                 msgsize = sizeof(struct vmbus_channel_msginfo) +
345                           sizeof(struct vmbus_channel_gpadl_header) +
346                           sizeof(struct gpa_range) + pfncount * sizeof(u64);
347                 msgheader =  kzalloc(msgsize, GFP_KERNEL);
348                 if (!msgheader)
349                         goto nomem;
350
351                 INIT_LIST_HEAD(&msgheader->submsglist);
352                 msgheader->msgsize = msgsize;
353
354                 gpadl_header = (struct vmbus_channel_gpadl_header *)
355                         msgheader->msg;
356                 gpadl_header->rangecount = 1;
357                 gpadl_header->range_buflen = sizeof(struct gpa_range) +
358                                          pagecount * sizeof(u64);
359                 gpadl_header->range[0].byte_offset = 0;
360                 gpadl_header->range[0].byte_count = size;
361                 for (i = 0; i < pfncount; i++)
362                         gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
363                                 kbuffer + PAGE_SIZE * i);
364                 *msginfo = msgheader;
365
366                 pfnsum = pfncount;
367                 pfnleft = pagecount - pfncount;
368
369                 /* how many pfns can we fit */
370                 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
371                           sizeof(struct vmbus_channel_gpadl_body);
372                 pfncount = pfnsize / sizeof(u64);
373
374                 /* fill in the body */
375                 while (pfnleft) {
376                         if (pfnleft > pfncount)
377                                 pfncurr = pfncount;
378                         else
379                                 pfncurr = pfnleft;
380
381                         msgsize = sizeof(struct vmbus_channel_msginfo) +
382                                   sizeof(struct vmbus_channel_gpadl_body) +
383                                   pfncurr * sizeof(u64);
384                         msgbody = kzalloc(msgsize, GFP_KERNEL);
385
386                         if (!msgbody) {
387                                 struct vmbus_channel_msginfo *pos = NULL;
388                                 struct vmbus_channel_msginfo *tmp = NULL;
389                                 /*
390                                  * Free up all the allocated messages.
391                                  */
392                                 list_for_each_entry_safe(pos, tmp,
393                                         &msgheader->submsglist,
394                                         msglistentry) {
395
396                                         list_del(&pos->msglistentry);
397                                         kfree(pos);
398                                 }
399
400                                 goto nomem;
401                         }
402
403                         msgbody->msgsize = msgsize;
404                         gpadl_body =
405                                 (struct vmbus_channel_gpadl_body *)msgbody->msg;
406
407                         /*
408                          * Gpadl is u32 and we are using a pointer which could
409                          * be 64-bit
410                          * This is governed by the guest/host protocol and
411                          * so the hypervisor guarantees that this is ok.
412                          */
413                         for (i = 0; i < pfncurr; i++)
414                                 gpadl_body->pfn[i] = virt_to_hvpfn(
415                                         kbuffer + PAGE_SIZE * (pfnsum + i));
416
417                         /* add to msg header */
418                         list_add_tail(&msgbody->msglistentry,
419                                       &msgheader->submsglist);
420                         pfnsum += pfncurr;
421                         pfnleft -= pfncurr;
422                 }
423         } else {
424                 /* everything fits in a header */
425                 msgsize = sizeof(struct vmbus_channel_msginfo) +
426                           sizeof(struct vmbus_channel_gpadl_header) +
427                           sizeof(struct gpa_range) + pagecount * sizeof(u64);
428                 msgheader = kzalloc(msgsize, GFP_KERNEL);
429                 if (msgheader == NULL)
430                         goto nomem;
431
432                 INIT_LIST_HEAD(&msgheader->submsglist);
433                 msgheader->msgsize = msgsize;
434
435                 gpadl_header = (struct vmbus_channel_gpadl_header *)
436                         msgheader->msg;
437                 gpadl_header->rangecount = 1;
438                 gpadl_header->range_buflen = sizeof(struct gpa_range) +
439                                          pagecount * sizeof(u64);
440                 gpadl_header->range[0].byte_offset = 0;
441                 gpadl_header->range[0].byte_count = size;
442                 for (i = 0; i < pagecount; i++)
443                         gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
444                                 kbuffer + PAGE_SIZE * i);
445
446                 *msginfo = msgheader;
447         }
448
449         return 0;
450 nomem:
451         kfree(msgheader);
452         kfree(msgbody);
453         return -ENOMEM;
454 }
455
456 /*
457  * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
458  *
459  * @channel: a channel
460  * @kbuffer: from kmalloc or vmalloc
461  * @size: page-size multiple
462  * @gpadl_handle: some funky thing
463  */
464 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
465                                u32 size, u32 *gpadl_handle)
466 {
467         struct vmbus_channel_gpadl_header *gpadlmsg;
468         struct vmbus_channel_gpadl_body *gpadl_body;
469         struct vmbus_channel_msginfo *msginfo = NULL;
470         struct vmbus_channel_msginfo *submsginfo, *tmp;
471         struct list_head *curr;
472         u32 next_gpadl_handle;
473         unsigned long flags;
474         int ret = 0;
475
476         next_gpadl_handle =
477                 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
478
479         ret = create_gpadl_header(kbuffer, size, &msginfo);
480         if (ret)
481                 return ret;
482
483         init_completion(&msginfo->waitevent);
484         msginfo->waiting_channel = channel;
485
486         gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
487         gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
488         gpadlmsg->child_relid = channel->offermsg.child_relid;
489         gpadlmsg->gpadl = next_gpadl_handle;
490
491
492         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
493         list_add_tail(&msginfo->msglistentry,
494                       &vmbus_connection.chn_msg_list);
495
496         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
497
498         if (channel->rescind) {
499                 ret = -ENODEV;
500                 goto cleanup;
501         }
502
503         ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
504                              sizeof(*msginfo), true);
505
506         trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
507
508         if (ret != 0)
509                 goto cleanup;
510
511         list_for_each(curr, &msginfo->submsglist) {
512                 submsginfo = (struct vmbus_channel_msginfo *)curr;
513                 gpadl_body =
514                         (struct vmbus_channel_gpadl_body *)submsginfo->msg;
515
516                 gpadl_body->header.msgtype =
517                         CHANNELMSG_GPADL_BODY;
518                 gpadl_body->gpadl = next_gpadl_handle;
519
520                 ret = vmbus_post_msg(gpadl_body,
521                                      submsginfo->msgsize - sizeof(*submsginfo),
522                                      true);
523
524                 trace_vmbus_establish_gpadl_body(gpadl_body, ret);
525
526                 if (ret != 0)
527                         goto cleanup;
528
529         }
530         wait_for_completion(&msginfo->waitevent);
531
532         if (msginfo->response.gpadl_created.creation_status != 0) {
533                 pr_err("Failed to establish GPADL: err = 0x%x\n",
534                        msginfo->response.gpadl_created.creation_status);
535
536                 ret = -EDQUOT;
537                 goto cleanup;
538         }
539
540         if (channel->rescind) {
541                 ret = -ENODEV;
542                 goto cleanup;
543         }
544
545         /* At this point, we received the gpadl created msg */
546         *gpadl_handle = gpadlmsg->gpadl;
547
548 cleanup:
549         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
550         list_del(&msginfo->msglistentry);
551         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
552         list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
553                                  msglistentry) {
554                 kfree(submsginfo);
555         }
556
557         kfree(msginfo);
558         return ret;
559 }
560 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
561
562 /*
563  * vmbus_teardown_gpadl -Teardown the specified GPADL handle
564  */
565 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
566 {
567         struct vmbus_channel_gpadl_teardown *msg;
568         struct vmbus_channel_msginfo *info;
569         unsigned long flags;
570         int ret;
571
572         info = kmalloc(sizeof(*info) +
573                        sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
574         if (!info)
575                 return -ENOMEM;
576
577         init_completion(&info->waitevent);
578         info->waiting_channel = channel;
579
580         msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
581
582         msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
583         msg->child_relid = channel->offermsg.child_relid;
584         msg->gpadl = gpadl_handle;
585
586         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
587         list_add_tail(&info->msglistentry,
588                       &vmbus_connection.chn_msg_list);
589         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
590
591         if (channel->rescind)
592                 goto post_msg_err;
593
594         ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
595                              true);
596
597         trace_vmbus_teardown_gpadl(msg, ret);
598
599         if (ret)
600                 goto post_msg_err;
601
602         wait_for_completion(&info->waitevent);
603
604 post_msg_err:
605         /*
606          * If the channel has been rescinded;
607          * we will be awakened by the rescind
608          * handler; set the error code to zero so we don't leak memory.
609          */
610         if (channel->rescind)
611                 ret = 0;
612
613         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
614         list_del(&info->msglistentry);
615         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
616
617         kfree(info);
618         return ret;
619 }
620 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
621
622 void vmbus_reset_channel_cb(struct vmbus_channel *channel)
623 {
624         unsigned long flags;
625
626         /*
627          * vmbus_on_event(), running in the per-channel tasklet, can race
628          * with vmbus_close_internal() in the case of SMP guest, e.g., when
629          * the former is accessing channel->inbound.ring_buffer, the latter
630          * could be freeing the ring_buffer pages, so here we must stop it
631          * first.
632          *
633          * vmbus_chan_sched() might call the netvsc driver callback function
634          * that ends up scheduling NAPI work that accesses the ring buffer.
635          * At this point, we have to ensure that any such work is completed
636          * and that the channel ring buffer is no longer being accessed, cf.
637          * the calls to napi_disable() in netvsc_device_remove().
638          */
639         tasklet_disable(&channel->callback_event);
640
641         /* See the inline comments in vmbus_chan_sched(). */
642         spin_lock_irqsave(&channel->sched_lock, flags);
643         channel->onchannel_callback = NULL;
644         spin_unlock_irqrestore(&channel->sched_lock, flags);
645
646         channel->sc_creation_callback = NULL;
647
648         /* Re-enable tasklet for use on re-open */
649         tasklet_enable(&channel->callback_event);
650 }
651
652 static int vmbus_close_internal(struct vmbus_channel *channel)
653 {
654         struct vmbus_channel_close_channel *msg;
655         int ret;
656
657         vmbus_reset_channel_cb(channel);
658
659         /*
660          * In case a device driver's probe() fails (e.g.,
661          * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
662          * rescinded later (e.g., we dynamically disable an Integrated Service
663          * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
664          * here we should skip most of the below cleanup work.
665          */
666         if (channel->state != CHANNEL_OPENED_STATE)
667                 return -EINVAL;
668
669         channel->state = CHANNEL_OPEN_STATE;
670
671         /* Send a closing message */
672
673         msg = &channel->close_msg.msg;
674
675         msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
676         msg->child_relid = channel->offermsg.child_relid;
677
678         ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
679                              true);
680
681         trace_vmbus_close_internal(msg, ret);
682
683         if (ret) {
684                 pr_err("Close failed: close post msg return is %d\n", ret);
685                 /*
686                  * If we failed to post the close msg,
687                  * it is perhaps better to leak memory.
688                  */
689         }
690
691         /* Tear down the gpadl for the channel's ring buffer */
692         else if (channel->ringbuffer_gpadlhandle) {
693                 ret = vmbus_teardown_gpadl(channel,
694                                            channel->ringbuffer_gpadlhandle);
695                 if (ret) {
696                         pr_err("Close failed: teardown gpadl return %d\n", ret);
697                         /*
698                          * If we failed to teardown gpadl,
699                          * it is perhaps better to leak memory.
700                          */
701                 }
702
703                 channel->ringbuffer_gpadlhandle = 0;
704         }
705
706         return ret;
707 }
708
709 /* disconnect ring - close all channels */
710 int vmbus_disconnect_ring(struct vmbus_channel *channel)
711 {
712         struct vmbus_channel *cur_channel, *tmp;
713         int ret;
714
715         if (channel->primary_channel != NULL)
716                 return -EINVAL;
717
718         list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
719                 if (cur_channel->rescind)
720                         wait_for_completion(&cur_channel->rescind_event);
721
722                 mutex_lock(&vmbus_connection.channel_mutex);
723                 if (vmbus_close_internal(cur_channel) == 0) {
724                         vmbus_free_ring(cur_channel);
725
726                         if (cur_channel->rescind)
727                                 hv_process_channel_removal(cur_channel);
728                 }
729                 mutex_unlock(&vmbus_connection.channel_mutex);
730         }
731
732         /*
733          * Now close the primary.
734          */
735         mutex_lock(&vmbus_connection.channel_mutex);
736         ret = vmbus_close_internal(channel);
737         mutex_unlock(&vmbus_connection.channel_mutex);
738
739         return ret;
740 }
741 EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
742
743 /*
744  * vmbus_close - Close the specified channel
745  */
746 void vmbus_close(struct vmbus_channel *channel)
747 {
748         if (vmbus_disconnect_ring(channel) == 0)
749                 vmbus_free_ring(channel);
750 }
751 EXPORT_SYMBOL_GPL(vmbus_close);
752
753 /**
754  * vmbus_sendpacket() - Send the specified buffer on the given channel
755  * @channel: Pointer to vmbus_channel structure
756  * @buffer: Pointer to the buffer you want to send the data from.
757  * @bufferlen: Maximum size of what the buffer holds.
758  * @requestid: Identifier of the request
759  * @type: Type of packet that is being sent e.g. negotiate, time
760  *        packet etc.
761  * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
762  *
763  * Sends data in @buffer directly to Hyper-V via the vmbus.
764  * This will send the data unparsed to Hyper-V.
765  *
766  * Mainly used by Hyper-V drivers.
767  */
768 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
769                            u32 bufferlen, u64 requestid,
770                            enum vmbus_packet_type type, u32 flags)
771 {
772         struct vmpacket_descriptor desc;
773         u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
774         u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
775         struct kvec bufferlist[3];
776         u64 aligned_data = 0;
777         int num_vecs = ((bufferlen != 0) ? 3 : 1);
778
779
780         /* Setup the descriptor */
781         desc.type = type; /* VmbusPacketTypeDataInBand; */
782         desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
783         /* in 8-bytes granularity */
784         desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
785         desc.len8 = (u16)(packetlen_aligned >> 3);
786         desc.trans_id = requestid;
787
788         bufferlist[0].iov_base = &desc;
789         bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
790         bufferlist[1].iov_base = buffer;
791         bufferlist[1].iov_len = bufferlen;
792         bufferlist[2].iov_base = &aligned_data;
793         bufferlist[2].iov_len = (packetlen_aligned - packetlen);
794
795         return hv_ringbuffer_write(channel, bufferlist, num_vecs);
796 }
797 EXPORT_SYMBOL(vmbus_sendpacket);
798
799 /*
800  * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
801  * packets using a GPADL Direct packet type. This interface allows you
802  * to control notifying the host. This will be useful for sending
803  * batched data. Also the sender can control the send flags
804  * explicitly.
805  */
806 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
807                                 struct hv_page_buffer pagebuffers[],
808                                 u32 pagecount, void *buffer, u32 bufferlen,
809                                 u64 requestid)
810 {
811         int i;
812         struct vmbus_channel_packet_page_buffer desc;
813         u32 descsize;
814         u32 packetlen;
815         u32 packetlen_aligned;
816         struct kvec bufferlist[3];
817         u64 aligned_data = 0;
818
819         if (pagecount > MAX_PAGE_BUFFER_COUNT)
820                 return -EINVAL;
821
822         /*
823          * Adjust the size down since vmbus_channel_packet_page_buffer is the
824          * largest size we support
825          */
826         descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
827                           ((MAX_PAGE_BUFFER_COUNT - pagecount) *
828                           sizeof(struct hv_page_buffer));
829         packetlen = descsize + bufferlen;
830         packetlen_aligned = ALIGN(packetlen, sizeof(u64));
831
832         /* Setup the descriptor */
833         desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
834         desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
835         desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
836         desc.length8 = (u16)(packetlen_aligned >> 3);
837         desc.transactionid = requestid;
838         desc.reserved = 0;
839         desc.rangecount = pagecount;
840
841         for (i = 0; i < pagecount; i++) {
842                 desc.range[i].len = pagebuffers[i].len;
843                 desc.range[i].offset = pagebuffers[i].offset;
844                 desc.range[i].pfn        = pagebuffers[i].pfn;
845         }
846
847         bufferlist[0].iov_base = &desc;
848         bufferlist[0].iov_len = descsize;
849         bufferlist[1].iov_base = buffer;
850         bufferlist[1].iov_len = bufferlen;
851         bufferlist[2].iov_base = &aligned_data;
852         bufferlist[2].iov_len = (packetlen_aligned - packetlen);
853
854         return hv_ringbuffer_write(channel, bufferlist, 3);
855 }
856 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
857
858 /*
859  * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
860  * using a GPADL Direct packet type.
861  * The buffer includes the vmbus descriptor.
862  */
863 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
864                               struct vmbus_packet_mpb_array *desc,
865                               u32 desc_size,
866                               void *buffer, u32 bufferlen, u64 requestid)
867 {
868         u32 packetlen;
869         u32 packetlen_aligned;
870         struct kvec bufferlist[3];
871         u64 aligned_data = 0;
872
873         packetlen = desc_size + bufferlen;
874         packetlen_aligned = ALIGN(packetlen, sizeof(u64));
875
876         /* Setup the descriptor */
877         desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
878         desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
879         desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
880         desc->length8 = (u16)(packetlen_aligned >> 3);
881         desc->transactionid = requestid;
882         desc->reserved = 0;
883         desc->rangecount = 1;
884
885         bufferlist[0].iov_base = desc;
886         bufferlist[0].iov_len = desc_size;
887         bufferlist[1].iov_base = buffer;
888         bufferlist[1].iov_len = bufferlen;
889         bufferlist[2].iov_base = &aligned_data;
890         bufferlist[2].iov_len = (packetlen_aligned - packetlen);
891
892         return hv_ringbuffer_write(channel, bufferlist, 3);
893 }
894 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
895
896 /**
897  * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
898  * @channel: Pointer to vmbus_channel structure
899  * @buffer: Pointer to the buffer you want to receive the data into.
900  * @bufferlen: Maximum size of what the buffer can hold.
901  * @buffer_actual_len: The actual size of the data after it was received.
902  * @requestid: Identifier of the request
903  * @raw: true means keep the vmpacket_descriptor header in the received data.
904  *
905  * Receives directly from the hyper-v vmbus and puts the data it received
906  * into Buffer. This will receive the data unparsed from hyper-v.
907  *
908  * Mainly used by Hyper-V drivers.
909  */
910 static inline int
911 __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
912                    u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
913                    bool raw)
914 {
915         return hv_ringbuffer_read(channel, buffer, bufferlen,
916                                   buffer_actual_len, requestid, raw);
917
918 }
919
920 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
921                      u32 bufferlen, u32 *buffer_actual_len,
922                      u64 *requestid)
923 {
924         return __vmbus_recvpacket(channel, buffer, bufferlen,
925                                   buffer_actual_len, requestid, false);
926 }
927 EXPORT_SYMBOL(vmbus_recvpacket);
928
929 /*
930  * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
931  */
932 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
933                               u32 bufferlen, u32 *buffer_actual_len,
934                               u64 *requestid)
935 {
936         return __vmbus_recvpacket(channel, buffer, bufferlen,
937                                   buffer_actual_len, requestid, true);
938 }
939 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);