1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2009, Microsoft Corporation.
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/wait.h>
15 #include <linux/slab.h>
16 #include <linux/module.h>
17 #include <linux/hyperv.h>
18 #include <linux/uio.h>
19 #include <linux/interrupt.h>
21 #include <asm/mshyperv.h>
23 #include "hyperv_vmbus.h"
25 #define NUM_PAGES_SPANNED(addr, len) \
26 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
28 static unsigned long virt_to_hvpfn(void *addr)
32 if (is_vmalloc_addr(addr))
33 paddr = page_to_phys(vmalloc_to_page(addr)) +
38 return paddr >> PAGE_SHIFT;
42 * vmbus_setevent- Trigger an event notification on the specified
45 void vmbus_setevent(struct vmbus_channel *channel)
47 struct hv_monitor_page *monitorpage;
49 trace_vmbus_setevent(channel);
52 * For channels marked as in "low latency" mode
53 * bypass the monitor page mechanism.
55 if (channel->offermsg.monitor_allocated && !channel->low_latency) {
56 vmbus_send_interrupt(channel->offermsg.child_relid);
58 /* Get the child to parent monitor page */
59 monitorpage = vmbus_connection.monitor_pages[1];
61 sync_set_bit(channel->monitor_bit,
62 (unsigned long *)&monitorpage->trigger_group
63 [channel->monitor_grp].pending);
66 vmbus_set_event(channel);
69 EXPORT_SYMBOL_GPL(vmbus_setevent);
71 /* vmbus_free_ring - drop mapping of ring buffer */
72 void vmbus_free_ring(struct vmbus_channel *channel)
74 hv_ringbuffer_cleanup(&channel->outbound);
75 hv_ringbuffer_cleanup(&channel->inbound);
77 if (channel->ringbuffer_page) {
78 __free_pages(channel->ringbuffer_page,
79 get_order(channel->ringbuffer_pagecount
81 channel->ringbuffer_page = NULL;
84 EXPORT_SYMBOL_GPL(vmbus_free_ring);
86 /* vmbus_alloc_ring - allocate and map pages for ring buffer */
87 int vmbus_alloc_ring(struct vmbus_channel *newchannel,
88 u32 send_size, u32 recv_size)
93 if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
96 /* Allocate the ring buffer */
97 order = get_order(send_size + recv_size);
98 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
99 GFP_KERNEL|__GFP_ZERO, order);
102 page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
107 newchannel->ringbuffer_page = page;
108 newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
109 newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
113 EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
115 static int __vmbus_open(struct vmbus_channel *newchannel,
116 void *userdata, u32 userdatalen,
117 void (*onchannelcallback)(void *context), void *context)
119 struct vmbus_channel_open_channel *open_msg;
120 struct vmbus_channel_msginfo *open_info = NULL;
121 struct page *page = newchannel->ringbuffer_page;
122 u32 send_pages, recv_pages;
126 if (userdatalen > MAX_USER_DEFINED_BYTES)
129 send_pages = newchannel->ringbuffer_send_offset;
130 recv_pages = newchannel->ringbuffer_pagecount - send_pages;
132 if (newchannel->state != CHANNEL_OPEN_STATE)
135 newchannel->state = CHANNEL_OPENING_STATE;
136 newchannel->onchannel_callback = onchannelcallback;
137 newchannel->channel_callback_context = context;
139 err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
141 goto error_clean_ring;
143 err = hv_ringbuffer_init(&newchannel->inbound,
144 &page[send_pages], recv_pages);
146 goto error_clean_ring;
148 /* Establish the gpadl for the ring buffer */
149 newchannel->ringbuffer_gpadlhandle = 0;
151 err = vmbus_establish_gpadl(newchannel,
152 page_address(newchannel->ringbuffer_page),
153 (send_pages + recv_pages) << PAGE_SHIFT,
154 &newchannel->ringbuffer_gpadlhandle);
156 goto error_clean_ring;
158 /* Create and init the channel open message */
159 open_info = kmalloc(sizeof(*open_info) +
160 sizeof(struct vmbus_channel_open_channel),
164 goto error_free_gpadl;
167 init_completion(&open_info->waitevent);
168 open_info->waiting_channel = newchannel;
170 open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
171 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
172 open_msg->openid = newchannel->offermsg.child_relid;
173 open_msg->child_relid = newchannel->offermsg.child_relid;
174 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
175 open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
176 open_msg->target_vp = hv_cpu_number_to_vp_number(newchannel->target_cpu);
179 memcpy(open_msg->userdata, userdata, userdatalen);
181 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
182 list_add_tail(&open_info->msglistentry,
183 &vmbus_connection.chn_msg_list);
184 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
186 if (newchannel->rescind) {
188 goto error_free_info;
191 err = vmbus_post_msg(open_msg,
192 sizeof(struct vmbus_channel_open_channel), true);
194 trace_vmbus_open(open_msg, err);
197 goto error_clean_msglist;
199 wait_for_completion(&open_info->waitevent);
201 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
202 list_del(&open_info->msglistentry);
203 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
205 if (newchannel->rescind) {
207 goto error_free_info;
210 if (open_info->response.open_result.status) {
212 goto error_free_info;
215 newchannel->state = CHANNEL_OPENED_STATE;
220 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
221 list_del(&open_info->msglistentry);
222 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
226 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
227 newchannel->ringbuffer_gpadlhandle = 0;
229 hv_ringbuffer_cleanup(&newchannel->outbound);
230 hv_ringbuffer_cleanup(&newchannel->inbound);
231 newchannel->state = CHANNEL_OPEN_STATE;
236 * vmbus_connect_ring - Open the channel but reuse ring buffer
238 int vmbus_connect_ring(struct vmbus_channel *newchannel,
239 void (*onchannelcallback)(void *context), void *context)
241 return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
243 EXPORT_SYMBOL_GPL(vmbus_connect_ring);
246 * vmbus_open - Open the specified channel.
248 int vmbus_open(struct vmbus_channel *newchannel,
249 u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
250 void *userdata, u32 userdatalen,
251 void (*onchannelcallback)(void *context), void *context)
255 err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
256 recv_ringbuffer_size);
260 err = __vmbus_open(newchannel, userdata, userdatalen,
261 onchannelcallback, context);
263 vmbus_free_ring(newchannel);
267 EXPORT_SYMBOL_GPL(vmbus_open);
269 /* Used for Hyper-V Socket: a guest client's connect() to the host */
270 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
271 const guid_t *shv_host_servie_id)
273 struct vmbus_channel_tl_connect_request conn_msg;
276 memset(&conn_msg, 0, sizeof(conn_msg));
277 conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
278 conn_msg.guest_endpoint_id = *shv_guest_servie_id;
279 conn_msg.host_service_id = *shv_host_servie_id;
281 ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
283 trace_vmbus_send_tl_connect_request(&conn_msg, ret);
287 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
290 * Set/change the vCPU (@target_vp) the channel (@child_relid) will interrupt.
292 * CHANNELMSG_MODIFYCHANNEL messages are aynchronous. Also, Hyper-V does not
293 * ACK such messages. IOW we can't know when the host will stop interrupting
294 * the "old" vCPU and start interrupting the "new" vCPU for the given channel.
296 * The CHANNELMSG_MODIFYCHANNEL message type is supported since VMBus version
297 * VERSION_WIN10_V4_1.
299 int vmbus_send_modifychannel(u32 child_relid, u32 target_vp)
301 struct vmbus_channel_modifychannel conn_msg;
304 memset(&conn_msg, 0, sizeof(conn_msg));
305 conn_msg.header.msgtype = CHANNELMSG_MODIFYCHANNEL;
306 conn_msg.child_relid = child_relid;
307 conn_msg.target_vp = target_vp;
309 ret = vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
311 trace_vmbus_send_modifychannel(&conn_msg, ret);
315 EXPORT_SYMBOL_GPL(vmbus_send_modifychannel);
318 * create_gpadl_header - Creates a gpadl for the specified buffer
320 static int create_gpadl_header(void *kbuffer, u32 size,
321 struct vmbus_channel_msginfo **msginfo)
325 struct vmbus_channel_gpadl_header *gpadl_header;
326 struct vmbus_channel_gpadl_body *gpadl_body;
327 struct vmbus_channel_msginfo *msgheader;
328 struct vmbus_channel_msginfo *msgbody = NULL;
331 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
333 pagecount = size >> PAGE_SHIFT;
335 /* do we need a gpadl body msg */
336 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
337 sizeof(struct vmbus_channel_gpadl_header) -
338 sizeof(struct gpa_range);
339 pfncount = pfnsize / sizeof(u64);
341 if (pagecount > pfncount) {
342 /* we need a gpadl body */
343 /* fill in the header */
344 msgsize = sizeof(struct vmbus_channel_msginfo) +
345 sizeof(struct vmbus_channel_gpadl_header) +
346 sizeof(struct gpa_range) + pfncount * sizeof(u64);
347 msgheader = kzalloc(msgsize, GFP_KERNEL);
351 INIT_LIST_HEAD(&msgheader->submsglist);
352 msgheader->msgsize = msgsize;
354 gpadl_header = (struct vmbus_channel_gpadl_header *)
356 gpadl_header->rangecount = 1;
357 gpadl_header->range_buflen = sizeof(struct gpa_range) +
358 pagecount * sizeof(u64);
359 gpadl_header->range[0].byte_offset = 0;
360 gpadl_header->range[0].byte_count = size;
361 for (i = 0; i < pfncount; i++)
362 gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
363 kbuffer + PAGE_SIZE * i);
364 *msginfo = msgheader;
367 pfnleft = pagecount - pfncount;
369 /* how many pfns can we fit */
370 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
371 sizeof(struct vmbus_channel_gpadl_body);
372 pfncount = pfnsize / sizeof(u64);
374 /* fill in the body */
376 if (pfnleft > pfncount)
381 msgsize = sizeof(struct vmbus_channel_msginfo) +
382 sizeof(struct vmbus_channel_gpadl_body) +
383 pfncurr * sizeof(u64);
384 msgbody = kzalloc(msgsize, GFP_KERNEL);
387 struct vmbus_channel_msginfo *pos = NULL;
388 struct vmbus_channel_msginfo *tmp = NULL;
390 * Free up all the allocated messages.
392 list_for_each_entry_safe(pos, tmp,
393 &msgheader->submsglist,
396 list_del(&pos->msglistentry);
403 msgbody->msgsize = msgsize;
405 (struct vmbus_channel_gpadl_body *)msgbody->msg;
408 * Gpadl is u32 and we are using a pointer which could
410 * This is governed by the guest/host protocol and
411 * so the hypervisor guarantees that this is ok.
413 for (i = 0; i < pfncurr; i++)
414 gpadl_body->pfn[i] = virt_to_hvpfn(
415 kbuffer + PAGE_SIZE * (pfnsum + i));
417 /* add to msg header */
418 list_add_tail(&msgbody->msglistentry,
419 &msgheader->submsglist);
424 /* everything fits in a header */
425 msgsize = sizeof(struct vmbus_channel_msginfo) +
426 sizeof(struct vmbus_channel_gpadl_header) +
427 sizeof(struct gpa_range) + pagecount * sizeof(u64);
428 msgheader = kzalloc(msgsize, GFP_KERNEL);
429 if (msgheader == NULL)
432 INIT_LIST_HEAD(&msgheader->submsglist);
433 msgheader->msgsize = msgsize;
435 gpadl_header = (struct vmbus_channel_gpadl_header *)
437 gpadl_header->rangecount = 1;
438 gpadl_header->range_buflen = sizeof(struct gpa_range) +
439 pagecount * sizeof(u64);
440 gpadl_header->range[0].byte_offset = 0;
441 gpadl_header->range[0].byte_count = size;
442 for (i = 0; i < pagecount; i++)
443 gpadl_header->range[0].pfn_array[i] = virt_to_hvpfn(
444 kbuffer + PAGE_SIZE * i);
446 *msginfo = msgheader;
457 * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
459 * @channel: a channel
460 * @kbuffer: from kmalloc or vmalloc
461 * @size: page-size multiple
462 * @gpadl_handle: some funky thing
464 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
465 u32 size, u32 *gpadl_handle)
467 struct vmbus_channel_gpadl_header *gpadlmsg;
468 struct vmbus_channel_gpadl_body *gpadl_body;
469 struct vmbus_channel_msginfo *msginfo = NULL;
470 struct vmbus_channel_msginfo *submsginfo, *tmp;
471 struct list_head *curr;
472 u32 next_gpadl_handle;
477 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
479 ret = create_gpadl_header(kbuffer, size, &msginfo);
483 init_completion(&msginfo->waitevent);
484 msginfo->waiting_channel = channel;
486 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
487 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
488 gpadlmsg->child_relid = channel->offermsg.child_relid;
489 gpadlmsg->gpadl = next_gpadl_handle;
492 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
493 list_add_tail(&msginfo->msglistentry,
494 &vmbus_connection.chn_msg_list);
496 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
498 if (channel->rescind) {
503 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
504 sizeof(*msginfo), true);
506 trace_vmbus_establish_gpadl_header(gpadlmsg, ret);
511 list_for_each(curr, &msginfo->submsglist) {
512 submsginfo = (struct vmbus_channel_msginfo *)curr;
514 (struct vmbus_channel_gpadl_body *)submsginfo->msg;
516 gpadl_body->header.msgtype =
517 CHANNELMSG_GPADL_BODY;
518 gpadl_body->gpadl = next_gpadl_handle;
520 ret = vmbus_post_msg(gpadl_body,
521 submsginfo->msgsize - sizeof(*submsginfo),
524 trace_vmbus_establish_gpadl_body(gpadl_body, ret);
530 wait_for_completion(&msginfo->waitevent);
532 if (msginfo->response.gpadl_created.creation_status != 0) {
533 pr_err("Failed to establish GPADL: err = 0x%x\n",
534 msginfo->response.gpadl_created.creation_status);
540 if (channel->rescind) {
545 /* At this point, we received the gpadl created msg */
546 *gpadl_handle = gpadlmsg->gpadl;
549 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
550 list_del(&msginfo->msglistentry);
551 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
552 list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
560 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
563 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
565 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
567 struct vmbus_channel_gpadl_teardown *msg;
568 struct vmbus_channel_msginfo *info;
572 info = kmalloc(sizeof(*info) +
573 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
577 init_completion(&info->waitevent);
578 info->waiting_channel = channel;
580 msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
582 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
583 msg->child_relid = channel->offermsg.child_relid;
584 msg->gpadl = gpadl_handle;
586 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
587 list_add_tail(&info->msglistentry,
588 &vmbus_connection.chn_msg_list);
589 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
591 if (channel->rescind)
594 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
597 trace_vmbus_teardown_gpadl(msg, ret);
602 wait_for_completion(&info->waitevent);
606 * If the channel has been rescinded;
607 * we will be awakened by the rescind
608 * handler; set the error code to zero so we don't leak memory.
610 if (channel->rescind)
613 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
614 list_del(&info->msglistentry);
615 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
620 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
622 void vmbus_reset_channel_cb(struct vmbus_channel *channel)
627 * vmbus_on_event(), running in the per-channel tasklet, can race
628 * with vmbus_close_internal() in the case of SMP guest, e.g., when
629 * the former is accessing channel->inbound.ring_buffer, the latter
630 * could be freeing the ring_buffer pages, so here we must stop it
633 * vmbus_chan_sched() might call the netvsc driver callback function
634 * that ends up scheduling NAPI work that accesses the ring buffer.
635 * At this point, we have to ensure that any such work is completed
636 * and that the channel ring buffer is no longer being accessed, cf.
637 * the calls to napi_disable() in netvsc_device_remove().
639 tasklet_disable(&channel->callback_event);
641 /* See the inline comments in vmbus_chan_sched(). */
642 spin_lock_irqsave(&channel->sched_lock, flags);
643 channel->onchannel_callback = NULL;
644 spin_unlock_irqrestore(&channel->sched_lock, flags);
646 channel->sc_creation_callback = NULL;
648 /* Re-enable tasklet for use on re-open */
649 tasklet_enable(&channel->callback_event);
652 static int vmbus_close_internal(struct vmbus_channel *channel)
654 struct vmbus_channel_close_channel *msg;
657 vmbus_reset_channel_cb(channel);
660 * In case a device driver's probe() fails (e.g.,
661 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
662 * rescinded later (e.g., we dynamically disable an Integrated Service
663 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
664 * here we should skip most of the below cleanup work.
666 if (channel->state != CHANNEL_OPENED_STATE)
669 channel->state = CHANNEL_OPEN_STATE;
671 /* Send a closing message */
673 msg = &channel->close_msg.msg;
675 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
676 msg->child_relid = channel->offermsg.child_relid;
678 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
681 trace_vmbus_close_internal(msg, ret);
684 pr_err("Close failed: close post msg return is %d\n", ret);
686 * If we failed to post the close msg,
687 * it is perhaps better to leak memory.
691 /* Tear down the gpadl for the channel's ring buffer */
692 else if (channel->ringbuffer_gpadlhandle) {
693 ret = vmbus_teardown_gpadl(channel,
694 channel->ringbuffer_gpadlhandle);
696 pr_err("Close failed: teardown gpadl return %d\n", ret);
698 * If we failed to teardown gpadl,
699 * it is perhaps better to leak memory.
703 channel->ringbuffer_gpadlhandle = 0;
709 /* disconnect ring - close all channels */
710 int vmbus_disconnect_ring(struct vmbus_channel *channel)
712 struct vmbus_channel *cur_channel, *tmp;
715 if (channel->primary_channel != NULL)
718 list_for_each_entry_safe(cur_channel, tmp, &channel->sc_list, sc_list) {
719 if (cur_channel->rescind)
720 wait_for_completion(&cur_channel->rescind_event);
722 mutex_lock(&vmbus_connection.channel_mutex);
723 if (vmbus_close_internal(cur_channel) == 0) {
724 vmbus_free_ring(cur_channel);
726 if (cur_channel->rescind)
727 hv_process_channel_removal(cur_channel);
729 mutex_unlock(&vmbus_connection.channel_mutex);
733 * Now close the primary.
735 mutex_lock(&vmbus_connection.channel_mutex);
736 ret = vmbus_close_internal(channel);
737 mutex_unlock(&vmbus_connection.channel_mutex);
741 EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
744 * vmbus_close - Close the specified channel
746 void vmbus_close(struct vmbus_channel *channel)
748 if (vmbus_disconnect_ring(channel) == 0)
749 vmbus_free_ring(channel);
751 EXPORT_SYMBOL_GPL(vmbus_close);
754 * vmbus_sendpacket() - Send the specified buffer on the given channel
755 * @channel: Pointer to vmbus_channel structure
756 * @buffer: Pointer to the buffer you want to send the data from.
757 * @bufferlen: Maximum size of what the buffer holds.
758 * @requestid: Identifier of the request
759 * @type: Type of packet that is being sent e.g. negotiate, time
761 * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
763 * Sends data in @buffer directly to Hyper-V via the vmbus.
764 * This will send the data unparsed to Hyper-V.
766 * Mainly used by Hyper-V drivers.
768 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
769 u32 bufferlen, u64 requestid,
770 enum vmbus_packet_type type, u32 flags)
772 struct vmpacket_descriptor desc;
773 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
774 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
775 struct kvec bufferlist[3];
776 u64 aligned_data = 0;
777 int num_vecs = ((bufferlen != 0) ? 3 : 1);
780 /* Setup the descriptor */
781 desc.type = type; /* VmbusPacketTypeDataInBand; */
782 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
783 /* in 8-bytes granularity */
784 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
785 desc.len8 = (u16)(packetlen_aligned >> 3);
786 desc.trans_id = requestid;
788 bufferlist[0].iov_base = &desc;
789 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
790 bufferlist[1].iov_base = buffer;
791 bufferlist[1].iov_len = bufferlen;
792 bufferlist[2].iov_base = &aligned_data;
793 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
795 return hv_ringbuffer_write(channel, bufferlist, num_vecs);
797 EXPORT_SYMBOL(vmbus_sendpacket);
800 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
801 * packets using a GPADL Direct packet type. This interface allows you
802 * to control notifying the host. This will be useful for sending
803 * batched data. Also the sender can control the send flags
806 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
807 struct hv_page_buffer pagebuffers[],
808 u32 pagecount, void *buffer, u32 bufferlen,
812 struct vmbus_channel_packet_page_buffer desc;
815 u32 packetlen_aligned;
816 struct kvec bufferlist[3];
817 u64 aligned_data = 0;
819 if (pagecount > MAX_PAGE_BUFFER_COUNT)
823 * Adjust the size down since vmbus_channel_packet_page_buffer is the
824 * largest size we support
826 descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
827 ((MAX_PAGE_BUFFER_COUNT - pagecount) *
828 sizeof(struct hv_page_buffer));
829 packetlen = descsize + bufferlen;
830 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
832 /* Setup the descriptor */
833 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
834 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
835 desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
836 desc.length8 = (u16)(packetlen_aligned >> 3);
837 desc.transactionid = requestid;
839 desc.rangecount = pagecount;
841 for (i = 0; i < pagecount; i++) {
842 desc.range[i].len = pagebuffers[i].len;
843 desc.range[i].offset = pagebuffers[i].offset;
844 desc.range[i].pfn = pagebuffers[i].pfn;
847 bufferlist[0].iov_base = &desc;
848 bufferlist[0].iov_len = descsize;
849 bufferlist[1].iov_base = buffer;
850 bufferlist[1].iov_len = bufferlen;
851 bufferlist[2].iov_base = &aligned_data;
852 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
854 return hv_ringbuffer_write(channel, bufferlist, 3);
856 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
859 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
860 * using a GPADL Direct packet type.
861 * The buffer includes the vmbus descriptor.
863 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
864 struct vmbus_packet_mpb_array *desc,
866 void *buffer, u32 bufferlen, u64 requestid)
869 u32 packetlen_aligned;
870 struct kvec bufferlist[3];
871 u64 aligned_data = 0;
873 packetlen = desc_size + bufferlen;
874 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
876 /* Setup the descriptor */
877 desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
878 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
879 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
880 desc->length8 = (u16)(packetlen_aligned >> 3);
881 desc->transactionid = requestid;
883 desc->rangecount = 1;
885 bufferlist[0].iov_base = desc;
886 bufferlist[0].iov_len = desc_size;
887 bufferlist[1].iov_base = buffer;
888 bufferlist[1].iov_len = bufferlen;
889 bufferlist[2].iov_base = &aligned_data;
890 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
892 return hv_ringbuffer_write(channel, bufferlist, 3);
894 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
897 * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
898 * @channel: Pointer to vmbus_channel structure
899 * @buffer: Pointer to the buffer you want to receive the data into.
900 * @bufferlen: Maximum size of what the buffer can hold.
901 * @buffer_actual_len: The actual size of the data after it was received.
902 * @requestid: Identifier of the request
903 * @raw: true means keep the vmpacket_descriptor header in the received data.
905 * Receives directly from the hyper-v vmbus and puts the data it received
906 * into Buffer. This will receive the data unparsed from hyper-v.
908 * Mainly used by Hyper-V drivers.
911 __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
912 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
915 return hv_ringbuffer_read(channel, buffer, bufferlen,
916 buffer_actual_len, requestid, raw);
920 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
921 u32 bufferlen, u32 *buffer_actual_len,
924 return __vmbus_recvpacket(channel, buffer, bufferlen,
925 buffer_actual_len, requestid, false);
927 EXPORT_SYMBOL(vmbus_recvpacket);
930 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
932 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
933 u32 bufferlen, u32 *buffer_actual_len,
936 return __vmbus_recvpacket(channel, buffer, bufferlen,
937 buffer_actual_len, requestid, true);
939 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);