1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright (c) 2011, Microsoft Corporation.
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
15 #include <uapi/linux/hyperv.h>
18 #include <linux/types.h>
19 #include <linux/scatterlist.h>
20 #include <linux/list.h>
21 #include <linux/timer.h>
22 #include <linux/completion.h>
23 #include <linux/device.h>
24 #include <linux/mod_devicetable.h>
25 #include <linux/interrupt.h>
26 #include <linux/reciprocal_div.h>
27 #include <asm/hyperv-tlfs.h>
29 #define MAX_PAGE_BUFFER_COUNT 32
30 #define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
35 * Types for GPADL, decides is how GPADL header is created.
37 * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
38 * same as HV_HYP_PAGE_SIZE.
40 * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
41 * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
42 * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
43 * HV_HYP_PAGE will be different between different types of GPADL, for example
44 * if PAGE_SIZE is 64K:
48 * gva: |-- 64k --|-- 64k --| ... |
49 * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
50 * index: 0 1 2 15 16 17 18 .. 31 32 ...
51 * | | ... | | | ... | ...
53 * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
54 * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
58 * | header | data | header | data |
59 * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
60 * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
61 * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
64 * | / / ... / ... | / ... /
68 * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
69 * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
76 /* Single-page buffer */
77 struct hv_page_buffer {
83 /* Multiple-page buffer */
84 struct hv_multipage_buffer {
85 /* Length and Offset determines the # of pfns in the array */
88 u64 pfn_array[MAX_MULTIPAGE_BUFFER_COUNT];
92 * Multiple-page buffer array; the pfn array is variable size:
93 * The number of entries in the PFN array is determined by
97 /* Length and Offset determines the # of pfns in the array */
103 /* 0x18 includes the proprietary packet header */
104 #define MAX_PAGE_BUFFER_PACKET (0x18 + \
105 (sizeof(struct hv_page_buffer) * \
106 MAX_PAGE_BUFFER_COUNT))
107 #define MAX_MULTIPAGE_BUFFER_PACKET (0x18 + \
108 sizeof(struct hv_multipage_buffer))
113 struct hv_ring_buffer {
114 /* Offset in bytes from the start of ring data below */
117 /* Offset in bytes from the start of ring data below */
123 * WS2012/Win8 and later versions of Hyper-V implement interrupt
124 * driven flow management. The feature bit feat_pending_send_sz
125 * is set by the host on the host->guest ring buffer, and by the
126 * guest on the guest->host ring buffer.
128 * The meaning of the feature bit is a bit complex in that it has
129 * semantics that apply to both ring buffers. If the guest sets
130 * the feature bit in the guest->host ring buffer, the guest is
131 * telling the host that:
132 * 1) It will set the pending_send_sz field in the guest->host ring
133 * buffer when it is waiting for space to become available, and
134 * 2) It will read the pending_send_sz field in the host->guest
135 * ring buffer and interrupt the host when it frees enough space
137 * Similarly, if the host sets the feature bit in the host->guest
138 * ring buffer, the host is telling the guest that:
139 * 1) It will set the pending_send_sz field in the host->guest ring
140 * buffer when it is waiting for space to become available, and
141 * 2) It will read the pending_send_sz field in the guest->host
142 * ring buffer and interrupt the guest when it frees enough space
144 * If either the guest or host does not set the feature bit that it
145 * owns, that guest or host must do polling if it encounters a full
146 * ring buffer, and not signal the other end with an interrupt.
152 u32 feat_pending_send_sz:1;
157 /* Pad it to PAGE_SIZE so that data starts on page boundary */
158 u8 reserved2[PAGE_SIZE - 68];
161 * Ring data starts here + RingDataStartOffset
162 * !!! DO NOT place any fields below this !!!
169 * If the requested ring buffer size is at least 8 times the size of the
170 * header, steal space from the ring buffer for the header. Otherwise, add
171 * space for the header so that is doesn't take too much of the ring buffer
174 * The factor of 8 is somewhat arbitrary. The goal is to prevent adding a
175 * relatively small header (4 Kbytes on x86) to a large-ish power-of-2 ring
176 * buffer size (such as 128 Kbytes) and so end up making a nearly twice as
177 * large allocation that will be almost half wasted. As a contrasting example,
178 * on ARM64 with 64 Kbyte page size, we don't want to take 64 Kbytes for the
179 * header from a 128 Kbyte allocation, leaving only 64 Kbytes for the ring.
180 * In this latter case, we must add 64 Kbytes for the header and not worry
181 * about what's wasted.
183 #define VMBUS_HEADER_ADJ(payload_sz) \
184 ((payload_sz) >= 8 * sizeof(struct hv_ring_buffer) ? \
185 0 : sizeof(struct hv_ring_buffer))
187 /* Calculate the proper size of a ringbuffer, it must be page-aligned */
188 #define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(VMBUS_HEADER_ADJ(payload_sz) + \
191 struct hv_ring_buffer_info {
192 struct hv_ring_buffer *ring_buffer;
193 u32 ring_size; /* Include the shared header */
194 struct reciprocal_value ring_size_div10_reciprocal;
195 spinlock_t ring_lock;
197 u32 ring_datasize; /* < ring_size */
200 * The ring buffer mutex lock. This lock prevents the ring buffer from
201 * being freed while the ring buffer is being accessed.
203 struct mutex ring_buffer_mutex;
207 static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
209 u32 read_loc, write_loc, dsize, read;
211 dsize = rbi->ring_datasize;
212 read_loc = rbi->ring_buffer->read_index;
213 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
215 read = write_loc >= read_loc ? (write_loc - read_loc) :
216 (dsize - read_loc) + write_loc;
221 static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
223 u32 read_loc, write_loc, dsize, write;
225 dsize = rbi->ring_datasize;
226 read_loc = READ_ONCE(rbi->ring_buffer->read_index);
227 write_loc = rbi->ring_buffer->write_index;
229 write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
230 read_loc - write_loc;
234 static inline u32 hv_get_avail_to_write_percent(
235 const struct hv_ring_buffer_info *rbi)
237 u32 avail_write = hv_get_bytes_to_write(rbi);
239 return reciprocal_divide(
240 (avail_write << 3) + (avail_write << 1),
241 rbi->ring_size_div10_reciprocal);
245 * VMBUS version is 32 bit entity broken up into
246 * two 16 bit quantities: major_number. minor_number.
248 * 0 . 13 (Windows Server 2008)
251 * 3 . 0 (Windows 8 R2)
253 * 4 . 1 (Windows 10 RS3)
254 * 5 . 0 (Newer Windows 10)
255 * 5 . 1 (Windows 10 RS4)
256 * 5 . 2 (Windows Server 2019, RS5)
259 #define VERSION_WS2008 ((0 << 16) | (13))
260 #define VERSION_WIN7 ((1 << 16) | (1))
261 #define VERSION_WIN8 ((2 << 16) | (4))
262 #define VERSION_WIN8_1 ((3 << 16) | (0))
263 #define VERSION_WIN10 ((4 << 16) | (0))
264 #define VERSION_WIN10_V4_1 ((4 << 16) | (1))
265 #define VERSION_WIN10_V5 ((5 << 16) | (0))
266 #define VERSION_WIN10_V5_1 ((5 << 16) | (1))
267 #define VERSION_WIN10_V5_2 ((5 << 16) | (2))
269 /* Make maximum size of pipe payload of 16K */
270 #define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
272 /* Define PipeMode values. */
273 #define VMBUS_PIPE_TYPE_BYTE 0x00000000
274 #define VMBUS_PIPE_TYPE_MESSAGE 0x00000004
276 /* The size of the user defined data buffer for non-pipe offers. */
277 #define MAX_USER_DEFINED_BYTES 120
279 /* The size of the user defined data buffer for pipe offers. */
280 #define MAX_PIPE_USER_DEFINED_BYTES 116
283 * At the center of the Channel Management library is the Channel Offer. This
284 * struct contains the fundamental information about an offer.
286 struct vmbus_channel_offer {
291 * These two fields are not currently used.
297 u16 mmio_megabytes; /* in bytes * 1024 * 1024 */
300 /* Non-pipes: The user has MAX_USER_DEFINED_BYTES bytes. */
302 unsigned char user_def[MAX_USER_DEFINED_BYTES];
307 * The following sructure is an integrated pipe protocol, which
308 * is implemented on top of standard user-defined data. Pipe
309 * clients have MAX_PIPE_USER_DEFINED_BYTES left for their own
314 unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
318 * The sub_channel_index is defined in Win8: a value of zero means a
319 * primary channel and a value of non-zero means a sub-channel.
321 * Before Win8, the field is reserved, meaning it's always zero.
323 u16 sub_channel_index;
328 #define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
329 #define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
330 #define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
331 #define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
332 #define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
333 #define VMBUS_CHANNEL_PARENT_OFFER 0x200
334 #define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
335 #define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
337 struct vmpacket_descriptor {
345 struct vmpacket_header {
346 u32 prev_pkt_start_offset;
347 struct vmpacket_descriptor descriptor;
350 struct vmtransfer_page_range {
355 struct vmtransfer_page_packet_header {
356 struct vmpacket_descriptor d;
361 struct vmtransfer_page_range ranges[1];
364 struct vmgpadl_packet_header {
365 struct vmpacket_descriptor d;
370 struct vmadd_remove_transfer_page_set {
371 struct vmpacket_descriptor d;
378 * This structure defines a range in guest physical space that can be made to
379 * look virtually contiguous.
388 * This is the format for an Establish Gpadl packet, which contains a handle by
389 * which this GPADL will be known and a set of GPA ranges associated with it.
390 * This can be converted to a MDL by the guest OS. If there are multiple GPA
391 * ranges, then the resulting MDL will be "chained," representing multiple VA
394 struct vmestablish_gpadl {
395 struct vmpacket_descriptor d;
398 struct gpa_range range[1];
402 * This is the format for a Teardown Gpadl packet, which indicates that the
403 * GPADL handle in the Establish Gpadl packet will never be referenced again.
405 struct vmteardown_gpadl {
406 struct vmpacket_descriptor d;
408 u32 reserved; /* for alignment to a 8-byte boundary */
412 * This is the format for a GPA-Direct packet, which contains a set of GPA
413 * ranges, in addition to commands and/or data.
415 struct vmdata_gpa_direct {
416 struct vmpacket_descriptor d;
419 struct gpa_range range[1];
422 /* This is the format for a Additional Data Packet. */
423 struct vmadditional_data {
424 struct vmpacket_descriptor d;
428 unsigned char data[1];
431 union vmpacket_largest_possible_header {
432 struct vmpacket_descriptor simple_hdr;
433 struct vmtransfer_page_packet_header xfer_page_hdr;
434 struct vmgpadl_packet_header gpadl_hdr;
435 struct vmadd_remove_transfer_page_set add_rm_xfer_page_hdr;
436 struct vmestablish_gpadl establish_gpadl_hdr;
437 struct vmteardown_gpadl teardown_gpadl_hdr;
438 struct vmdata_gpa_direct data_gpa_direct_hdr;
441 #define VMPACKET_DATA_START_ADDRESS(__packet) \
442 (void *)(((unsigned char *)__packet) + \
443 ((struct vmpacket_descriptor)__packet)->offset8 * 8)
445 #define VMPACKET_DATA_LENGTH(__packet) \
446 ((((struct vmpacket_descriptor)__packet)->len8 - \
447 ((struct vmpacket_descriptor)__packet)->offset8) * 8)
449 #define VMPACKET_TRANSFER_MODE(__packet) \
450 (((struct IMPACT)__packet)->type)
452 enum vmbus_packet_type {
453 VM_PKT_INVALID = 0x0,
455 VM_PKT_ADD_XFER_PAGESET = 0x2,
456 VM_PKT_RM_XFER_PAGESET = 0x3,
457 VM_PKT_ESTABLISH_GPADL = 0x4,
458 VM_PKT_TEARDOWN_GPADL = 0x5,
459 VM_PKT_DATA_INBAND = 0x6,
460 VM_PKT_DATA_USING_XFER_PAGES = 0x7,
461 VM_PKT_DATA_USING_GPADL = 0x8,
462 VM_PKT_DATA_USING_GPA_DIRECT = 0x9,
463 VM_PKT_CANCEL_REQUEST = 0xa,
465 VM_PKT_DATA_USING_ADDITIONAL_PKT = 0xc,
466 VM_PKT_ADDITIONAL_DATA = 0xd
469 #define VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED 1
472 /* Version 1 messages */
473 enum vmbus_channel_message_type {
474 CHANNELMSG_INVALID = 0,
475 CHANNELMSG_OFFERCHANNEL = 1,
476 CHANNELMSG_RESCIND_CHANNELOFFER = 2,
477 CHANNELMSG_REQUESTOFFERS = 3,
478 CHANNELMSG_ALLOFFERS_DELIVERED = 4,
479 CHANNELMSG_OPENCHANNEL = 5,
480 CHANNELMSG_OPENCHANNEL_RESULT = 6,
481 CHANNELMSG_CLOSECHANNEL = 7,
482 CHANNELMSG_GPADL_HEADER = 8,
483 CHANNELMSG_GPADL_BODY = 9,
484 CHANNELMSG_GPADL_CREATED = 10,
485 CHANNELMSG_GPADL_TEARDOWN = 11,
486 CHANNELMSG_GPADL_TORNDOWN = 12,
487 CHANNELMSG_RELID_RELEASED = 13,
488 CHANNELMSG_INITIATE_CONTACT = 14,
489 CHANNELMSG_VERSION_RESPONSE = 15,
490 CHANNELMSG_UNLOAD = 16,
491 CHANNELMSG_UNLOAD_RESPONSE = 17,
495 CHANNELMSG_TL_CONNECT_REQUEST = 21,
496 CHANNELMSG_MODIFYCHANNEL = 22,
497 CHANNELMSG_TL_CONNECT_RESULT = 23,
501 /* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
502 #define INVALID_RELID U32_MAX
504 struct vmbus_channel_message_header {
505 enum vmbus_channel_message_type msgtype;
509 /* Query VMBus Version parameters */
510 struct vmbus_channel_query_vmbus_version {
511 struct vmbus_channel_message_header header;
515 /* VMBus Version Supported parameters */
516 struct vmbus_channel_version_supported {
517 struct vmbus_channel_message_header header;
518 u8 version_supported;
521 /* Offer Channel parameters */
522 struct vmbus_channel_offer_channel {
523 struct vmbus_channel_message_header header;
524 struct vmbus_channel_offer offer;
528 * win7 and beyond splits this field into a bit field.
530 u8 monitor_allocated:1;
533 * These are new fields added in win7 and later.
534 * Do not access these fields without checking the
535 * negotiated protocol.
537 * If "is_dedicated_interrupt" is set, we must not set the
538 * associated bit in the channel bitmap while sending the
539 * interrupt to the host.
541 * connection_id is to be used in signaling the host.
543 u16 is_dedicated_interrupt:1;
548 /* Rescind Offer parameters */
549 struct vmbus_channel_rescind_offer {
550 struct vmbus_channel_message_header header;
555 hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
557 return rbi->ring_buffer->pending_send_sz;
561 * Request Offer -- no parameters, SynIC message contains the partition ID
562 * Set Snoop -- no parameters, SynIC message contains the partition ID
563 * Clear Snoop -- no parameters, SynIC message contains the partition ID
564 * All Offers Delivered -- no parameters, SynIC message contains the partition
566 * Flush Client -- no parameters, SynIC message contains the partition ID
569 /* Open Channel parameters */
570 struct vmbus_channel_open_channel {
571 struct vmbus_channel_message_header header;
573 /* Identifies the specific VMBus channel that is being opened. */
576 /* ID making a particular open request at a channel offer unique. */
579 /* GPADL for the channel's ring buffer. */
580 u32 ringbuffer_gpadlhandle;
583 * Starting with win8, this field will be used to specify
584 * the target virtual processor on which to deliver the interrupt for
585 * the host to guest communication.
586 * Prior to win8, incoming channel interrupts would only
587 * be delivered on cpu 0. Setting this value to 0 would
588 * preserve the earlier behavior.
593 * The upstream ring buffer begins at offset zero in the memory
594 * described by RingBufferGpadlHandle. The downstream ring buffer
595 * follows it at this offset (in pages).
597 u32 downstream_ringbuffer_pageoffset;
599 /* User-specific data to be passed along to the server endpoint. */
600 unsigned char userdata[MAX_USER_DEFINED_BYTES];
603 /* Open Channel Result parameters */
604 struct vmbus_channel_open_result {
605 struct vmbus_channel_message_header header;
611 /* Close channel parameters; */
612 struct vmbus_channel_close_channel {
613 struct vmbus_channel_message_header header;
617 /* Channel Message GPADL */
618 #define GPADL_TYPE_RING_BUFFER 1
619 #define GPADL_TYPE_SERVER_SAVE_AREA 2
620 #define GPADL_TYPE_TRANSACTION 8
623 * The number of PFNs in a GPADL message is defined by the number of
624 * pages that would be spanned by ByteCount and ByteOffset. If the
625 * implied number of PFNs won't fit in this packet, there will be a
626 * follow-up packet that contains more.
628 struct vmbus_channel_gpadl_header {
629 struct vmbus_channel_message_header header;
634 struct gpa_range range[];
637 /* This is the followup packet that contains more PFNs. */
638 struct vmbus_channel_gpadl_body {
639 struct vmbus_channel_message_header header;
645 struct vmbus_channel_gpadl_created {
646 struct vmbus_channel_message_header header;
652 struct vmbus_channel_gpadl_teardown {
653 struct vmbus_channel_message_header header;
658 struct vmbus_channel_gpadl_torndown {
659 struct vmbus_channel_message_header header;
663 struct vmbus_channel_relid_released {
664 struct vmbus_channel_message_header header;
668 struct vmbus_channel_initiate_contact {
669 struct vmbus_channel_message_header header;
670 u32 vmbus_version_requested;
671 u32 target_vcpu; /* The VCPU the host should respond to */
684 /* Hyper-V socket: guest's connect()-ing to host */
685 struct vmbus_channel_tl_connect_request {
686 struct vmbus_channel_message_header header;
687 guid_t guest_endpoint_id;
688 guid_t host_service_id;
691 /* Modify Channel parameters, cf. vmbus_send_modifychannel() */
692 struct vmbus_channel_modifychannel {
693 struct vmbus_channel_message_header header;
698 struct vmbus_channel_version_response {
699 struct vmbus_channel_message_header header;
700 u8 version_supported;
706 * On new hosts that support VMBus protocol 5.0, we must use
707 * VMBUS_MESSAGE_CONNECTION_ID_4 for the Initiate Contact Message,
708 * and for subsequent messages, we must use the Message Connection ID
709 * field in the host-returned Version Response Message.
711 * On old hosts, we should always use VMBUS_MESSAGE_CONNECTION_ID (1).
716 enum vmbus_channel_state {
718 CHANNEL_OPENING_STATE,
720 CHANNEL_OPENED_STATE,
724 * Represents each channel msg on the vmbus connection This is a
725 * variable-size data structure depending on the msg type itself
727 struct vmbus_channel_msginfo {
728 /* Bookkeeping stuff */
729 struct list_head msglistentry;
731 /* So far, this is only used to handle gpadl body message */
732 struct list_head submsglist;
734 /* Synchronize the request/response if needed */
735 struct completion waitevent;
736 struct vmbus_channel *waiting_channel;
738 struct vmbus_channel_version_supported version_supported;
739 struct vmbus_channel_open_result open_result;
740 struct vmbus_channel_gpadl_torndown gpadl_torndown;
741 struct vmbus_channel_gpadl_created gpadl_created;
742 struct vmbus_channel_version_response version_response;
747 * The channel message that goes out on the "wire".
748 * It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
753 struct vmbus_close_msg {
754 struct vmbus_channel_msginfo info;
755 struct vmbus_channel_close_channel msg;
758 /* Define connection identifier type. */
759 union hv_connection_id {
767 enum vmbus_device_type {
788 * Provides request ids for VMBus. Encapsulates guest memory
789 * addresses and stores the next available slot in req_arr
790 * to generate new ids in constant time.
792 struct vmbus_requestor {
794 unsigned long *req_bitmap; /* is a given slot available? */
797 spinlock_t req_lock; /* provides atomicity */
800 #define VMBUS_NO_RQSTOR U64_MAX
801 #define VMBUS_RQST_ERROR (U64_MAX - 1)
802 #define VMBUS_RQST_ID_NO_RESPONSE (U64_MAX - 2)
804 struct vmbus_device {
810 struct vmbus_channel {
811 struct list_head listentry;
813 struct hv_device *device_obj;
815 enum vmbus_channel_state state;
817 struct vmbus_channel_offer_channel offermsg;
819 * These are based on the OfferMsg.MonitorId.
820 * Save it here for easy access.
825 bool rescind; /* got rescind msg */
826 struct completion rescind_event;
828 u32 ringbuffer_gpadlhandle;
830 /* Allocated memory for ring buffer */
831 struct page *ringbuffer_page;
832 u32 ringbuffer_pagecount;
833 u32 ringbuffer_send_offset;
834 struct hv_ring_buffer_info outbound; /* send to parent */
835 struct hv_ring_buffer_info inbound; /* receive from parent */
837 struct vmbus_close_msg close_msg;
840 u64 interrupts; /* Host to Guest interrupts */
841 u64 sig_events; /* Guest to Host events */
844 * Guest to host interrupts caused by the outbound ring buffer changing
845 * from empty to not empty.
850 * Indicates that a full outbound ring buffer was encountered. The flag
851 * is set to true when a full outbound ring buffer is encountered and
852 * set to false when a write to the outbound ring buffer is completed.
856 /* Channel callback's invoked in softirq context */
857 struct tasklet_struct callback_event;
858 void (*onchannel_callback)(void *context);
859 void *channel_callback_context;
861 void (*change_target_cpu_callback)(struct vmbus_channel *channel,
865 * Synchronize channel scheduling and channel removal; see the inline
866 * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
868 spinlock_t sched_lock;
871 * A channel can be marked for one of three modes of reading:
872 * BATCHED - callback called from taslket and should read
873 * channel until empty. Interrupts from the host
874 * are masked while read is in process (default).
875 * DIRECT - callback called from tasklet (softirq).
876 * ISR - callback called in interrupt context and must
877 * invoke its own deferred processing.
878 * Host interrupts are disabled and must be re-enabled
879 * when ring is empty.
881 enum hv_callback_mode {
887 bool is_dedicated_interrupt;
891 * Starting with win8, this field will be used to specify the
892 * target CPU on which to deliver the interrupt for the host
893 * to guest communication.
895 * Prior to win8, incoming channel interrupts would only be
896 * delivered on CPU 0. Setting this value to 0 would preserve
897 * the earlier behavior.
901 * Support for sub-channels. For high performance devices,
902 * it will be useful to have multiple sub-channels to support
903 * a scalable communication infrastructure with the host.
904 * The support for sub-channels is implemented as an extention
905 * to the current infrastructure.
906 * The initial offer is considered the primary channel and this
907 * offer message will indicate if the host supports sub-channels.
908 * The guest is free to ask for sub-channels to be offerred and can
909 * open these sub-channels as a normal "primary" channel. However,
910 * all sub-channels will have the same type and instance guids as the
911 * primary channel. Requests sent on a given channel will result in a
912 * response on the same channel.
916 * Sub-channel creation callback. This callback will be called in
917 * process context when a sub-channel offer is received from the host.
918 * The guest can open the sub-channel in the context of this callback.
920 void (*sc_creation_callback)(struct vmbus_channel *new_sc);
923 * Channel rescind callback. Some channels (the hvsock ones), need to
924 * register a callback which is invoked in vmbus_onoffer_rescind().
926 void (*chn_rescind_callback)(struct vmbus_channel *channel);
929 * All Sub-channels of a primary channel are linked here.
931 struct list_head sc_list;
933 * The primary channel this sub-channel belongs to.
934 * This will be NULL for the primary channel.
936 struct vmbus_channel *primary_channel;
938 * Support per-channel state for use by vmbus drivers.
940 void *per_channel_state;
943 * Defer freeing channel until after all cpu's have
944 * gone through grace period.
949 * For sysfs per-channel properties.
954 * For performance critical channels (storage, networking
955 * etc,), Hyper-V has a mechanism to enhance the throughput
956 * at the expense of latency:
957 * When the host is to be signaled, we just set a bit in a shared page
958 * and this bit will be inspected by the hypervisor within a certain
959 * window and if the bit is set, the host will be signaled. The window
960 * of time is the monitor latency - currently around 100 usecs. This
961 * mechanism improves throughput by:
963 * A) Making the host more efficient - each time it wakes up,
964 * potentially it will process morev number of packets. The
965 * monitor latency allows a batch to build up.
966 * B) By deferring the hypercall to signal, we will also minimize
969 * Clearly, these optimizations improve throughput at the expense of
970 * latency. Furthermore, since the channel is shared for both
971 * control and data messages, control messages currently suffer
972 * unnecessary latency adversley impacting performance and boot
973 * time. To fix this issue, permit tagging the channel as being
974 * in "low latency" mode. In this mode, we will bypass the monitor
982 * Cache the device ID here for easy access; this is useful, in
983 * particular, in situations where the channel's device_obj has
984 * not been allocated/initialized yet.
989 * We must offload the handling of the primary/sub channels
990 * from the single-threaded vmbus_connection.work_queue to
991 * two different workqueue, otherwise we can block
992 * vmbus_connection.work_queue and hang: see vmbus_process_offer().
994 struct work_struct add_channel_work;
997 * Guest to host interrupts caused by the inbound ring buffer changing
998 * from full to not full while a packet is waiting.
1003 * The total number of write operations that encountered a full
1004 * outbound ring buffer.
1009 * The number of write operations that were the first to encounter a
1010 * full outbound ring buffer.
1014 /* enabling/disabling fuzz testing on the channel (default is false)*/
1015 bool fuzz_testing_state;
1018 * Interrupt delay will delay the guest from emptying the ring buffer
1019 * for a specific amount of time. The delay is in microseconds and will
1020 * be between 1 to a maximum of 1000, its default is 0 (no delay).
1021 * The Message delay will delay guest reading on a per message basis
1022 * in microseconds between 1 to 1000 with the default being 0
1025 u32 fuzz_testing_interrupt_delay;
1026 u32 fuzz_testing_message_delay;
1028 /* request/transaction ids for VMBus */
1029 struct vmbus_requestor requestor;
1033 u64 vmbus_next_request_id(struct vmbus_requestor *rqstor, u64 rqst_addr);
1034 u64 vmbus_request_addr(struct vmbus_requestor *rqstor, u64 trans_id);
1036 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
1038 return !!(c->offermsg.offer.chn_flags &
1039 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
1042 static inline bool is_sub_channel(const struct vmbus_channel *c)
1044 return c->offermsg.offer.sub_channel_index != 0;
1047 static inline void set_channel_read_mode(struct vmbus_channel *c,
1048 enum hv_callback_mode mode)
1050 c->callback_mode = mode;
1053 static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
1055 c->per_channel_state = s;
1058 static inline void *get_per_channel_state(struct vmbus_channel *c)
1060 return c->per_channel_state;
1063 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
1066 unsigned long flags;
1069 spin_lock_irqsave(&c->outbound.ring_lock, flags);
1070 ++c->out_full_total;
1072 if (!c->out_full_flag) {
1073 ++c->out_full_first;
1074 c->out_full_flag = true;
1076 spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
1078 c->out_full_flag = false;
1081 c->outbound.ring_buffer->pending_send_sz = size;
1084 static inline void set_low_latency_mode(struct vmbus_channel *c)
1086 c->low_latency = true;
1089 static inline void clear_low_latency_mode(struct vmbus_channel *c)
1091 c->low_latency = false;
1094 void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
1096 int vmbus_request_offers(void);
1099 * APIs for managing sub-channels.
1102 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1103 void (*sc_cr_cb)(struct vmbus_channel *new_sc));
1105 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1106 void (*chn_rescind_cb)(struct vmbus_channel *));
1109 * Check if sub-channels have already been offerred. This API will be useful
1110 * when the driver is unloaded after establishing sub-channels. In this case,
1111 * when the driver is re-loaded, the driver would have to check if the
1112 * subchannels have already been established before attempting to request
1113 * the creation of sub-channels.
1114 * This function returns TRUE to indicate that subchannels have already been
1116 * This function should be invoked after setting the callback function for
1117 * sub-channel creation.
1119 bool vmbus_are_subchannels_present(struct vmbus_channel *primary);
1121 /* The format must be the same as struct vmdata_gpa_direct */
1122 struct vmbus_channel_packet_page_buffer {
1130 struct hv_page_buffer range[MAX_PAGE_BUFFER_COUNT];
1133 /* The format must be the same as struct vmdata_gpa_direct */
1134 struct vmbus_channel_packet_multipage_buffer {
1141 u32 rangecount; /* Always 1 in this case */
1142 struct hv_multipage_buffer range;
1145 /* The format must be the same as struct vmdata_gpa_direct */
1146 struct vmbus_packet_mpb_array {
1153 u32 rangecount; /* Always 1 in this case */
1154 struct hv_mpb_array range;
1157 int vmbus_alloc_ring(struct vmbus_channel *channel,
1158 u32 send_size, u32 recv_size);
1159 void vmbus_free_ring(struct vmbus_channel *channel);
1161 int vmbus_connect_ring(struct vmbus_channel *channel,
1162 void (*onchannel_callback)(void *context),
1164 int vmbus_disconnect_ring(struct vmbus_channel *channel);
1166 extern int vmbus_open(struct vmbus_channel *channel,
1167 u32 send_ringbuffersize,
1168 u32 recv_ringbuffersize,
1171 void (*onchannel_callback)(void *context),
1174 extern void vmbus_close(struct vmbus_channel *channel);
1176 extern int vmbus_sendpacket(struct vmbus_channel *channel,
1180 enum vmbus_packet_type type,
1183 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
1184 struct hv_page_buffer pagebuffers[],
1190 extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
1191 struct vmbus_packet_mpb_array *mpb,
1197 extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
1202 extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
1205 void vmbus_reset_channel_cb(struct vmbus_channel *channel);
1207 extern int vmbus_recvpacket(struct vmbus_channel *channel,
1210 u32 *buffer_actual_len,
1213 extern int vmbus_recvpacket_raw(struct vmbus_channel *channel,
1216 u32 *buffer_actual_len,
1220 extern void vmbus_ontimer(unsigned long data);
1222 /* Base driver object */
1227 * A hvsock offer, which has a VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER
1228 * channel flag, actually doesn't mean a synthetic device because the
1229 * offer's if_type/if_instance can change for every new hvsock
1232 * However, to facilitate the notification of new-offer/rescind-offer
1233 * from vmbus driver to hvsock driver, we can handle hvsock offer as
1234 * a special vmbus device, and hence we need the below flag to
1235 * indicate if the driver is the hvsock driver or not: we need to
1236 * specially treat the hvosck offer & driver in vmbus_match().
1240 /* the device type supported by this driver */
1242 const struct hv_vmbus_device_id *id_table;
1244 struct device_driver driver;
1246 /* dynamic device GUID's */
1249 struct list_head list;
1252 int (*probe)(struct hv_device *, const struct hv_vmbus_device_id *);
1253 int (*remove)(struct hv_device *);
1254 void (*shutdown)(struct hv_device *);
1256 int (*suspend)(struct hv_device *);
1257 int (*resume)(struct hv_device *);
1261 /* Base device object */
1263 /* the device type id of this device */
1266 /* the device instance id of this device */
1267 guid_t dev_instance;
1271 struct device device;
1272 char *driver_override; /* Driver name to force a match */
1274 struct vmbus_channel *channel;
1275 struct kset *channels_kset;
1277 /* place holder to keep track of the dir for hv device in debugfs */
1278 struct dentry *debug_dir;
1283 static inline struct hv_device *device_to_hv_device(struct device *d)
1285 return container_of(d, struct hv_device, device);
1288 static inline struct hv_driver *drv_to_hv_drv(struct device_driver *d)
1290 return container_of(d, struct hv_driver, driver);
1293 static inline void hv_set_drvdata(struct hv_device *dev, void *data)
1295 dev_set_drvdata(&dev->device, data);
1298 static inline void *hv_get_drvdata(struct hv_device *dev)
1300 return dev_get_drvdata(&dev->device);
1303 struct hv_ring_buffer_debug_info {
1304 u32 current_interrupt_mask;
1305 u32 current_read_index;
1306 u32 current_write_index;
1307 u32 bytes_avail_toread;
1308 u32 bytes_avail_towrite;
1312 int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
1313 struct hv_ring_buffer_debug_info *debug_info);
1315 bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
1317 /* Vmbus interface */
1318 #define vmbus_driver_register(driver) \
1319 __vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
1320 int __must_check __vmbus_driver_register(struct hv_driver *hv_driver,
1321 struct module *owner,
1322 const char *mod_name);
1323 void vmbus_driver_unregister(struct hv_driver *hv_driver);
1325 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel);
1327 int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
1328 resource_size_t min, resource_size_t max,
1329 resource_size_t size, resource_size_t align,
1330 bool fb_overlap_ok);
1331 void vmbus_free_mmio(resource_size_t start, resource_size_t size);
1334 * GUID definitions of various offer types - services offered to the guest.
1339 * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
1341 #define HV_NIC_GUID \
1342 .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
1343 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
1347 * {32412632-86cb-44a2-9b5c-50d1417354f5}
1349 #define HV_IDE_GUID \
1350 .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
1351 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
1355 * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
1357 #define HV_SCSI_GUID \
1358 .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
1359 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
1363 * {0e0b6031-5213-4934-818b-38d90ced39db}
1365 #define HV_SHUTDOWN_GUID \
1366 .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
1367 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
1371 * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
1373 #define HV_TS_GUID \
1374 .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
1375 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
1379 * {57164f39-9115-4e78-ab55-382f3bd5422d}
1381 #define HV_HEART_BEAT_GUID \
1382 .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
1383 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
1387 * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
1389 #define HV_KVP_GUID \
1390 .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
1391 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
1394 * Dynamic memory GUID
1395 * {525074dc-8985-46e2-8057-a307dc18a502}
1397 #define HV_DM_GUID \
1398 .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
1399 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1403 * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
1405 #define HV_MOUSE_GUID \
1406 .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
1407 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
1411 * {f912ad6d-2b17-48ea-bd65-f927a61c7684}
1413 #define HV_KBD_GUID \
1414 .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
1415 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
1418 * VSS (Backup/Restore) GUID
1420 #define HV_VSS_GUID \
1421 .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
1422 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
1424 * Synthetic Video GUID
1425 * {DA0A7802-E377-4aac-8E77-0558EB1073F8}
1427 #define HV_SYNTHVID_GUID \
1428 .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
1429 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
1433 * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
1435 #define HV_SYNTHFC_GUID \
1436 .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
1437 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
1440 * Guest File Copy Service
1441 * {34D14BE3-DEE4-41c8-9AE7-6B174977C192}
1444 #define HV_FCOPY_GUID \
1445 .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
1446 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
1449 * NetworkDirect. This is the guest RDMA service.
1450 * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
1452 #define HV_ND_GUID \
1453 .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
1454 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
1457 * PCI Express Pass Through
1458 * {44C4F61D-4444-4400-9D52-802E27EDE19F}
1461 #define HV_PCIE_GUID \
1462 .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
1463 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
1466 * Linux doesn't support the 3 devices: the first two are for
1467 * Automatic Virtual Machine Activation, and the third is for
1468 * Remote Desktop Virtualization.
1469 * {f8e65716-3cb3-4a06-9a60-1889c5cccab5}
1470 * {3375baf4-9e15-4b30-b765-67acb10d607b}
1471 * {276aacf4-ac15-426c-98dd-7521ad3f01fe}
1474 #define HV_AVMA1_GUID \
1475 .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
1476 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
1478 #define HV_AVMA2_GUID \
1479 .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
1480 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
1482 #define HV_RDV_GUID \
1483 .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
1484 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
1487 * Common header for Hyper-V ICs
1490 #define ICMSGTYPE_NEGOTIATE 0
1491 #define ICMSGTYPE_HEARTBEAT 1
1492 #define ICMSGTYPE_KVPEXCHANGE 2
1493 #define ICMSGTYPE_SHUTDOWN 3
1494 #define ICMSGTYPE_TIMESYNC 4
1495 #define ICMSGTYPE_VSS 5
1497 #define ICMSGHDRFLAG_TRANSACTION 1
1498 #define ICMSGHDRFLAG_REQUEST 2
1499 #define ICMSGHDRFLAG_RESPONSE 4
1503 * While we want to handle util services as regular devices,
1504 * there is only one instance of each of these services; so
1505 * we statically allocate the service specific state.
1508 struct hv_util_service {
1511 void (*util_cb)(void *);
1512 int (*util_init)(struct hv_util_service *);
1513 void (*util_deinit)(void);
1514 int (*util_pre_suspend)(void);
1515 int (*util_pre_resume)(void);
1518 struct vmbuspipe_hdr {
1529 struct ic_version icverframe;
1531 struct ic_version icvermsg;
1534 u8 ictransaction_id;
1539 struct icmsg_negotiate {
1543 struct ic_version icversion_data[1]; /* any size array */
1546 struct shutdown_msg_data {
1548 u32 timeout_seconds;
1550 u8 display_message[2048];
1553 struct heartbeat_msg_data {
1558 /* Time Sync IC defs */
1559 #define ICTIMESYNCFLAG_PROBE 0
1560 #define ICTIMESYNCFLAG_SYNC 1
1561 #define ICTIMESYNCFLAG_SAMPLE 2
1564 #define WLTIMEDELTA 116444736000000000L /* in 100ns unit */
1566 #define WLTIMEDELTA 116444736000000000LL
1569 struct ictimesync_data {
1576 struct ictimesync_ref_data {
1578 u64 vmreferencetime;
1585 struct hyperv_service_callback {
1589 struct vmbus_channel *channel;
1590 void (*callback)(void *context);
1593 #define MAX_SRV_VER 0x7ffffff
1594 extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
1595 const int *fw_version, int fw_vercnt,
1596 const int *srv_version, int srv_vercnt,
1597 int *nego_fw_version, int *nego_srv_version);
1599 void hv_process_channel_removal(struct vmbus_channel *channel);
1601 void vmbus_setevent(struct vmbus_channel *channel);
1603 * Negotiated version with the Host.
1606 extern __u32 vmbus_proto_version;
1608 int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
1609 const guid_t *shv_host_servie_id);
1610 int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
1611 void vmbus_set_event(struct vmbus_channel *channel);
1613 /* Get the start of the ring buffer. */
1614 static inline void *
1615 hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
1617 return ring_info->ring_buffer->buffer;
1621 * Mask off host interrupt callback notifications
1623 static inline void hv_begin_read(struct hv_ring_buffer_info *rbi)
1625 rbi->ring_buffer->interrupt_mask = 1;
1627 /* make sure mask update is not reordered */
1632 * Re-enable host callback and return number of outstanding bytes
1634 static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
1637 rbi->ring_buffer->interrupt_mask = 0;
1639 /* make sure mask update is not reordered */
1643 * Now check to see if the ring buffer is still empty.
1644 * If it is not, we raced and we need to process new
1645 * incoming messages.
1647 return hv_get_bytes_to_read(rbi);
1651 * An API to support in-place processing of incoming VMBUS packets.
1654 /* Get data payload associated with descriptor */
1655 static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
1657 return (void *)((unsigned long)desc + (desc->offset8 << 3));
1660 /* Get data size associated with descriptor */
1661 static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
1663 return (desc->len8 << 3) - (desc->offset8 << 3);
1667 struct vmpacket_descriptor *
1668 hv_pkt_iter_first(struct vmbus_channel *channel);
1670 struct vmpacket_descriptor *
1671 __hv_pkt_iter_next(struct vmbus_channel *channel,
1672 const struct vmpacket_descriptor *pkt);
1674 void hv_pkt_iter_close(struct vmbus_channel *channel);
1677 * Get next packet descriptor from iterator
1678 * If at end of list, return NULL and update host.
1680 static inline struct vmpacket_descriptor *
1681 hv_pkt_iter_next(struct vmbus_channel *channel,
1682 const struct vmpacket_descriptor *pkt)
1684 struct vmpacket_descriptor *nxt;
1686 nxt = __hv_pkt_iter_next(channel, pkt);
1688 hv_pkt_iter_close(channel);
1693 #define foreach_vmbus_pkt(pkt, channel) \
1694 for (pkt = hv_pkt_iter_first(channel); pkt; \
1695 pkt = hv_pkt_iter_next(channel, pkt))
1698 * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
1699 * sends requests to read and write blocks. Each block must be 128 bytes or
1700 * smaller. Optionally, the VF driver can register a callback function which
1701 * will be invoked when the host says that one or more of the first 64 block
1702 * IDs is "invalid" which means that the VF driver should reread them.
1704 #define HV_CONFIG_BLOCK_SIZE_MAX 128
1706 int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
1707 unsigned int block_id, unsigned int *bytes_returned);
1708 int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
1709 unsigned int block_id);
1710 int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
1711 void (*block_invalidate)(void *context,
1714 struct hyperv_pci_block_ops {
1715 int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
1716 unsigned int block_id, unsigned int *bytes_returned);
1717 int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
1718 unsigned int block_id);
1719 int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
1720 void (*block_invalidate)(void *context,
1724 extern struct hyperv_pci_block_ops hvpci_block_ops;
1726 static inline unsigned long virt_to_hvpfn(void *addr)
1730 if (is_vmalloc_addr(addr))
1731 paddr = page_to_phys(vmalloc_to_page(addr)) +
1732 offset_in_page(addr);
1736 return paddr >> HV_HYP_PAGE_SHIFT;
1739 #define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
1740 #define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
1741 #define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
1742 #define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
1744 #endif /* _HYPERV_H */