2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/hyperv.h>
30 #include <linux/uio.h>
31 #include <linux/interrupt.h>
33 #include "hyperv_vmbus.h"
35 #define NUM_PAGES_SPANNED(addr, len) \
36 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
39 * vmbus_setevent- Trigger an event notification on the specified
42 void vmbus_setevent(struct vmbus_channel *channel)
44 struct hv_monitor_page *monitorpage;
47 * For channels marked as in "low latency" mode
48 * bypass the monitor page mechanism.
50 if ((channel->offermsg.monitor_allocated) &&
51 (!channel->low_latency)) {
52 /* Each u32 represents 32 channels */
53 sync_set_bit(channel->offermsg.child_relid & 31,
54 (unsigned long *) vmbus_connection.send_int_page +
55 (channel->offermsg.child_relid >> 5));
57 /* Get the child to parent monitor page */
58 monitorpage = vmbus_connection.monitor_pages[1];
60 sync_set_bit(channel->monitor_bit,
61 (unsigned long *)&monitorpage->trigger_group
62 [channel->monitor_grp].pending);
65 vmbus_set_event(channel);
68 EXPORT_SYMBOL_GPL(vmbus_setevent);
71 * vmbus_open - Open the specified channel.
73 int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
74 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
75 void (*onchannelcallback)(void *context), void *context)
77 struct vmbus_channel_open_channel *open_msg;
78 struct vmbus_channel_msginfo *open_info = NULL;
83 if (send_ringbuffer_size % PAGE_SIZE ||
84 recv_ringbuffer_size % PAGE_SIZE)
87 spin_lock_irqsave(&newchannel->lock, flags);
88 if (newchannel->state == CHANNEL_OPEN_STATE) {
89 newchannel->state = CHANNEL_OPENING_STATE;
91 spin_unlock_irqrestore(&newchannel->lock, flags);
94 spin_unlock_irqrestore(&newchannel->lock, flags);
96 newchannel->onchannel_callback = onchannelcallback;
97 newchannel->channel_callback_context = context;
99 /* Allocate the ring buffer */
100 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
101 GFP_KERNEL|__GFP_ZERO,
102 get_order(send_ringbuffer_size +
103 recv_ringbuffer_size));
106 page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
107 get_order(send_ringbuffer_size +
108 recv_ringbuffer_size));
112 goto error_set_chnstate;
115 newchannel->ringbuffer_pages = page_address(page);
116 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
117 recv_ringbuffer_size) >> PAGE_SHIFT;
119 ret = hv_ringbuffer_init(&newchannel->outbound, page,
120 send_ringbuffer_size >> PAGE_SHIFT);
124 goto error_free_pages;
127 ret = hv_ringbuffer_init(&newchannel->inbound,
128 &page[send_ringbuffer_size >> PAGE_SHIFT],
129 recv_ringbuffer_size >> PAGE_SHIFT);
132 goto error_free_pages;
136 /* Establish the gpadl for the ring buffer */
137 newchannel->ringbuffer_gpadlhandle = 0;
139 ret = vmbus_establish_gpadl(newchannel,
141 send_ringbuffer_size +
142 recv_ringbuffer_size,
143 &newchannel->ringbuffer_gpadlhandle);
147 goto error_free_pages;
150 /* Create and init the channel open message */
151 open_info = kmalloc(sizeof(*open_info) +
152 sizeof(struct vmbus_channel_open_channel),
156 goto error_free_gpadl;
159 init_completion(&open_info->waitevent);
160 open_info->waiting_channel = newchannel;
162 open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
163 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
164 open_msg->openid = newchannel->offermsg.child_relid;
165 open_msg->child_relid = newchannel->offermsg.child_relid;
166 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
167 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
169 open_msg->target_vp = newchannel->target_vp;
171 if (userdatalen > MAX_USER_DEFINED_BYTES) {
173 goto error_free_gpadl;
177 memcpy(open_msg->userdata, userdata, userdatalen);
179 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
180 list_add_tail(&open_info->msglistentry,
181 &vmbus_connection.chn_msg_list);
182 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
184 ret = vmbus_post_msg(open_msg,
185 sizeof(struct vmbus_channel_open_channel), true);
189 goto error_clean_msglist;
192 wait_for_completion(&open_info->waitevent);
194 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
195 list_del(&open_info->msglistentry);
196 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
198 if (newchannel->rescind) {
200 goto error_free_gpadl;
203 if (open_info->response.open_result.status) {
205 goto error_free_gpadl;
208 newchannel->state = CHANNEL_OPENED_STATE;
213 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
214 list_del(&open_info->msglistentry);
215 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
218 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
221 hv_ringbuffer_cleanup(&newchannel->outbound);
222 hv_ringbuffer_cleanup(&newchannel->inbound);
224 get_order(send_ringbuffer_size + recv_ringbuffer_size));
226 newchannel->state = CHANNEL_OPEN_STATE;
229 EXPORT_SYMBOL_GPL(vmbus_open);
231 /* Used for Hyper-V Socket: a guest client's connect() to the host */
232 int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
233 const uuid_le *shv_host_servie_id)
235 struct vmbus_channel_tl_connect_request conn_msg;
237 memset(&conn_msg, 0, sizeof(conn_msg));
238 conn_msg.header.msgtype = CHANNELMSG_TL_CONNECT_REQUEST;
239 conn_msg.guest_endpoint_id = *shv_guest_servie_id;
240 conn_msg.host_service_id = *shv_host_servie_id;
242 return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
244 EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
247 * create_gpadl_header - Creates a gpadl for the specified buffer
249 static int create_gpadl_header(void *kbuffer, u32 size,
250 struct vmbus_channel_msginfo **msginfo)
254 struct vmbus_channel_gpadl_header *gpadl_header;
255 struct vmbus_channel_gpadl_body *gpadl_body;
256 struct vmbus_channel_msginfo *msgheader;
257 struct vmbus_channel_msginfo *msgbody = NULL;
260 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
262 pagecount = size >> PAGE_SHIFT;
264 /* do we need a gpadl body msg */
265 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
266 sizeof(struct vmbus_channel_gpadl_header) -
267 sizeof(struct gpa_range);
268 pfncount = pfnsize / sizeof(u64);
270 if (pagecount > pfncount) {
271 /* we need a gpadl body */
272 /* fill in the header */
273 msgsize = sizeof(struct vmbus_channel_msginfo) +
274 sizeof(struct vmbus_channel_gpadl_header) +
275 sizeof(struct gpa_range) + pfncount * sizeof(u64);
276 msgheader = kzalloc(msgsize, GFP_KERNEL);
280 INIT_LIST_HEAD(&msgheader->submsglist);
281 msgheader->msgsize = msgsize;
283 gpadl_header = (struct vmbus_channel_gpadl_header *)
285 gpadl_header->rangecount = 1;
286 gpadl_header->range_buflen = sizeof(struct gpa_range) +
287 pagecount * sizeof(u64);
288 gpadl_header->range[0].byte_offset = 0;
289 gpadl_header->range[0].byte_count = size;
290 for (i = 0; i < pfncount; i++)
291 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
292 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
293 *msginfo = msgheader;
296 pfnleft = pagecount - pfncount;
298 /* how many pfns can we fit */
299 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
300 sizeof(struct vmbus_channel_gpadl_body);
301 pfncount = pfnsize / sizeof(u64);
303 /* fill in the body */
305 if (pfnleft > pfncount)
310 msgsize = sizeof(struct vmbus_channel_msginfo) +
311 sizeof(struct vmbus_channel_gpadl_body) +
312 pfncurr * sizeof(u64);
313 msgbody = kzalloc(msgsize, GFP_KERNEL);
316 struct vmbus_channel_msginfo *pos = NULL;
317 struct vmbus_channel_msginfo *tmp = NULL;
319 * Free up all the allocated messages.
321 list_for_each_entry_safe(pos, tmp,
322 &msgheader->submsglist,
325 list_del(&pos->msglistentry);
332 msgbody->msgsize = msgsize;
334 (struct vmbus_channel_gpadl_body *)msgbody->msg;
337 * Gpadl is u32 and we are using a pointer which could
339 * This is governed by the guest/host protocol and
340 * so the hypervisor gurantees that this is ok.
342 for (i = 0; i < pfncurr; i++)
343 gpadl_body->pfn[i] = slow_virt_to_phys(
344 kbuffer + PAGE_SIZE * (pfnsum + i)) >>
347 /* add to msg header */
348 list_add_tail(&msgbody->msglistentry,
349 &msgheader->submsglist);
354 /* everything fits in a header */
355 msgsize = sizeof(struct vmbus_channel_msginfo) +
356 sizeof(struct vmbus_channel_gpadl_header) +
357 sizeof(struct gpa_range) + pagecount * sizeof(u64);
358 msgheader = kzalloc(msgsize, GFP_KERNEL);
359 if (msgheader == NULL)
362 INIT_LIST_HEAD(&msgheader->submsglist);
363 msgheader->msgsize = msgsize;
365 gpadl_header = (struct vmbus_channel_gpadl_header *)
367 gpadl_header->rangecount = 1;
368 gpadl_header->range_buflen = sizeof(struct gpa_range) +
369 pagecount * sizeof(u64);
370 gpadl_header->range[0].byte_offset = 0;
371 gpadl_header->range[0].byte_count = size;
372 for (i = 0; i < pagecount; i++)
373 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
374 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
376 *msginfo = msgheader;
387 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
389 * @channel: a channel
390 * @kbuffer: from kmalloc or vmalloc
391 * @size: page-size multiple
392 * @gpadl_handle: some funky thing
394 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
395 u32 size, u32 *gpadl_handle)
397 struct vmbus_channel_gpadl_header *gpadlmsg;
398 struct vmbus_channel_gpadl_body *gpadl_body;
399 struct vmbus_channel_msginfo *msginfo = NULL;
400 struct vmbus_channel_msginfo *submsginfo, *tmp;
401 struct list_head *curr;
402 u32 next_gpadl_handle;
407 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
409 ret = create_gpadl_header(kbuffer, size, &msginfo);
413 init_completion(&msginfo->waitevent);
414 msginfo->waiting_channel = channel;
416 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
417 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
418 gpadlmsg->child_relid = channel->offermsg.child_relid;
419 gpadlmsg->gpadl = next_gpadl_handle;
422 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
423 list_add_tail(&msginfo->msglistentry,
424 &vmbus_connection.chn_msg_list);
426 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
428 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
429 sizeof(*msginfo), true);
433 list_for_each(curr, &msginfo->submsglist) {
434 submsginfo = (struct vmbus_channel_msginfo *)curr;
436 (struct vmbus_channel_gpadl_body *)submsginfo->msg;
438 gpadl_body->header.msgtype =
439 CHANNELMSG_GPADL_BODY;
440 gpadl_body->gpadl = next_gpadl_handle;
442 ret = vmbus_post_msg(gpadl_body,
443 submsginfo->msgsize - sizeof(*submsginfo),
449 wait_for_completion(&msginfo->waitevent);
451 if (msginfo->response.gpadl_created.creation_status != 0) {
452 pr_err("Failed to establish GPADL: err = 0x%x\n",
453 msginfo->response.gpadl_created.creation_status);
459 if (channel->rescind) {
464 /* At this point, we received the gpadl created msg */
465 *gpadl_handle = gpadlmsg->gpadl;
468 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
469 list_del(&msginfo->msglistentry);
470 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
471 list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
479 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
482 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
484 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
486 struct vmbus_channel_gpadl_teardown *msg;
487 struct vmbus_channel_msginfo *info;
491 info = kmalloc(sizeof(*info) +
492 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
496 init_completion(&info->waitevent);
497 info->waiting_channel = channel;
499 msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
501 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
502 msg->child_relid = channel->offermsg.child_relid;
503 msg->gpadl = gpadl_handle;
505 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
506 list_add_tail(&info->msglistentry,
507 &vmbus_connection.chn_msg_list);
508 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
509 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
515 wait_for_completion(&info->waitevent);
519 * If the channel has been rescinded;
520 * we will be awakened by the rescind
521 * handler; set the error code to zero so we don't leak memory.
523 if (channel->rescind)
526 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
527 list_del(&info->msglistentry);
528 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
533 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
535 static void reset_channel_cb(void *arg)
537 struct vmbus_channel *channel = arg;
539 channel->onchannel_callback = NULL;
542 static int vmbus_close_internal(struct vmbus_channel *channel)
544 struct vmbus_channel_close_channel *msg;
548 * process_chn_event(), running in the tasklet, can race
549 * with vmbus_close_internal() in the case of SMP guest, e.g., when
550 * the former is accessing channel->inbound.ring_buffer, the latter
551 * could be freeing the ring_buffer pages.
553 * To resolve the race, we can serialize them by disabling the
554 * tasklet when the latter is running here.
556 hv_event_tasklet_disable(channel);
559 * In case a device driver's probe() fails (e.g.,
560 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
561 * rescinded later (e.g., we dynamically disble an Integrated Service
562 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
563 * here we should skip most of the below cleanup work.
565 if (channel->state != CHANNEL_OPENED_STATE) {
570 channel->state = CHANNEL_OPEN_STATE;
571 channel->sc_creation_callback = NULL;
572 /* Stop callback and cancel the timer asap */
573 if (channel->target_cpu != get_cpu()) {
575 smp_call_function_single(channel->target_cpu, reset_channel_cb,
578 reset_channel_cb(channel);
582 /* Send a closing message */
584 msg = &channel->close_msg.msg;
586 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
587 msg->child_relid = channel->offermsg.child_relid;
589 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
593 pr_err("Close failed: close post msg return is %d\n", ret);
595 * If we failed to post the close msg,
596 * it is perhaps better to leak memory.
601 /* Tear down the gpadl for the channel's ring buffer */
602 if (channel->ringbuffer_gpadlhandle) {
603 ret = vmbus_teardown_gpadl(channel,
604 channel->ringbuffer_gpadlhandle);
606 pr_err("Close failed: teardown gpadl return %d\n", ret);
608 * If we failed to teardown gpadl,
609 * it is perhaps better to leak memory.
615 /* Cleanup the ring buffers for this channel */
616 hv_ringbuffer_cleanup(&channel->outbound);
617 hv_ringbuffer_cleanup(&channel->inbound);
619 free_pages((unsigned long)channel->ringbuffer_pages,
620 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
623 hv_event_tasklet_enable(channel);
629 * vmbus_close - Close the specified channel
631 void vmbus_close(struct vmbus_channel *channel)
633 struct list_head *cur, *tmp;
634 struct vmbus_channel *cur_channel;
636 if (channel->primary_channel != NULL) {
638 * We will only close sub-channels when
639 * the primary is closed.
644 * Close all the sub-channels first and then close the
647 list_for_each_safe(cur, tmp, &channel->sc_list) {
648 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
649 if (cur_channel->state != CHANNEL_OPENED_STATE)
651 vmbus_close_internal(cur_channel);
654 * Now close the primary.
656 vmbus_close_internal(channel);
658 EXPORT_SYMBOL_GPL(vmbus_close);
660 int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
661 u32 bufferlen, u64 requestid,
662 enum vmbus_packet_type type, u32 flags, bool kick_q)
664 struct vmpacket_descriptor desc;
665 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
666 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
667 struct kvec bufferlist[3];
668 u64 aligned_data = 0;
669 bool lock = channel->acquire_ring_lock;
670 int num_vecs = ((bufferlen != 0) ? 3 : 1);
673 /* Setup the descriptor */
674 desc.type = type; /* VmbusPacketTypeDataInBand; */
675 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
676 /* in 8-bytes granularity */
677 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
678 desc.len8 = (u16)(packetlen_aligned >> 3);
679 desc.trans_id = requestid;
681 bufferlist[0].iov_base = &desc;
682 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
683 bufferlist[1].iov_base = buffer;
684 bufferlist[1].iov_len = bufferlen;
685 bufferlist[2].iov_base = &aligned_data;
686 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
688 return hv_ringbuffer_write(channel, bufferlist, num_vecs,
692 EXPORT_SYMBOL(vmbus_sendpacket_ctl);
695 * vmbus_sendpacket() - Send the specified buffer on the given channel
696 * @channel: Pointer to vmbus_channel structure.
697 * @buffer: Pointer to the buffer you want to receive the data into.
698 * @bufferlen: Maximum size of what the the buffer will hold
699 * @requestid: Identifier of the request
700 * @type: Type of packet that is being send e.g. negotiate, time
703 * Sends data in @buffer directly to hyper-v via the vmbus
704 * This will send the data unparsed to hyper-v.
706 * Mainly used by Hyper-V drivers.
708 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
709 u32 bufferlen, u64 requestid,
710 enum vmbus_packet_type type, u32 flags)
712 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
715 EXPORT_SYMBOL(vmbus_sendpacket);
718 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
719 * packets using a GPADL Direct packet type. This interface allows you
720 * to control notifying the host. This will be useful for sending
721 * batched data. Also the sender can control the send flags
724 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
725 struct hv_page_buffer pagebuffers[],
726 u32 pagecount, void *buffer, u32 bufferlen,
732 struct vmbus_channel_packet_page_buffer desc;
735 u32 packetlen_aligned;
736 struct kvec bufferlist[3];
737 u64 aligned_data = 0;
738 bool lock = channel->acquire_ring_lock;
740 if (pagecount > MAX_PAGE_BUFFER_COUNT)
745 * Adjust the size down since vmbus_channel_packet_page_buffer is the
746 * largest size we support
748 descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
749 ((MAX_PAGE_BUFFER_COUNT - pagecount) *
750 sizeof(struct hv_page_buffer));
751 packetlen = descsize + bufferlen;
752 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
754 /* Setup the descriptor */
755 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
757 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
758 desc.length8 = (u16)(packetlen_aligned >> 3);
759 desc.transactionid = requestid;
760 desc.rangecount = pagecount;
762 for (i = 0; i < pagecount; i++) {
763 desc.range[i].len = pagebuffers[i].len;
764 desc.range[i].offset = pagebuffers[i].offset;
765 desc.range[i].pfn = pagebuffers[i].pfn;
768 bufferlist[0].iov_base = &desc;
769 bufferlist[0].iov_len = descsize;
770 bufferlist[1].iov_base = buffer;
771 bufferlist[1].iov_len = bufferlen;
772 bufferlist[2].iov_base = &aligned_data;
773 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
775 return hv_ringbuffer_write(channel, bufferlist, 3,
778 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
781 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
782 * packets using a GPADL Direct packet type.
784 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
785 struct hv_page_buffer pagebuffers[],
786 u32 pagecount, void *buffer, u32 bufferlen,
789 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
790 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
791 buffer, bufferlen, requestid,
795 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
798 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
799 * using a GPADL Direct packet type.
800 * The buffer includes the vmbus descriptor.
802 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
803 struct vmbus_packet_mpb_array *desc,
805 void *buffer, u32 bufferlen, u64 requestid)
808 u32 packetlen_aligned;
809 struct kvec bufferlist[3];
810 u64 aligned_data = 0;
811 bool lock = channel->acquire_ring_lock;
813 packetlen = desc_size + bufferlen;
814 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
816 /* Setup the descriptor */
817 desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
818 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
819 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
820 desc->length8 = (u16)(packetlen_aligned >> 3);
821 desc->transactionid = requestid;
822 desc->rangecount = 1;
824 bufferlist[0].iov_base = desc;
825 bufferlist[0].iov_len = desc_size;
826 bufferlist[1].iov_base = buffer;
827 bufferlist[1].iov_len = bufferlen;
828 bufferlist[2].iov_base = &aligned_data;
829 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
831 return hv_ringbuffer_write(channel, bufferlist, 3,
834 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
837 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
838 * using a GPADL Direct packet type.
840 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
841 struct hv_multipage_buffer *multi_pagebuffer,
842 void *buffer, u32 bufferlen, u64 requestid)
844 struct vmbus_channel_packet_multipage_buffer desc;
847 u32 packetlen_aligned;
848 struct kvec bufferlist[3];
849 u64 aligned_data = 0;
850 bool lock = channel->acquire_ring_lock;
851 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
852 multi_pagebuffer->len);
854 if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT)
858 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
859 * the largest size we support
861 descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
862 ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
864 packetlen = descsize + bufferlen;
865 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
868 /* Setup the descriptor */
869 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
870 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
871 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
872 desc.length8 = (u16)(packetlen_aligned >> 3);
873 desc.transactionid = requestid;
876 desc.range.len = multi_pagebuffer->len;
877 desc.range.offset = multi_pagebuffer->offset;
879 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
880 pfncount * sizeof(u64));
882 bufferlist[0].iov_base = &desc;
883 bufferlist[0].iov_len = descsize;
884 bufferlist[1].iov_base = buffer;
885 bufferlist[1].iov_len = bufferlen;
886 bufferlist[2].iov_base = &aligned_data;
887 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
889 return hv_ringbuffer_write(channel, bufferlist, 3,
892 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
895 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
896 * @channel: Pointer to vmbus_channel structure.
897 * @buffer: Pointer to the buffer you want to receive the data into.
898 * @bufferlen: Maximum size of what the the buffer will hold
899 * @buffer_actual_len: The actual size of the data after it was received
900 * @requestid: Identifier of the request
902 * Receives directly from the hyper-v vmbus and puts the data it received
903 * into Buffer. This will receive the data unparsed from hyper-v.
905 * Mainly used by Hyper-V drivers.
908 __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
909 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
912 return hv_ringbuffer_read(channel, buffer, bufferlen,
913 buffer_actual_len, requestid, raw);
917 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
918 u32 bufferlen, u32 *buffer_actual_len,
921 return __vmbus_recvpacket(channel, buffer, bufferlen,
922 buffer_actual_len, requestid, false);
924 EXPORT_SYMBOL(vmbus_recvpacket);
927 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
929 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
930 u32 bufferlen, u32 *buffer_actual_len,
933 return __vmbus_recvpacket(channel, buffer, bufferlen,
934 buffer_actual_len, requestid, true);
936 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);