2 * Copyright (c) Microsoft Corporation.
5 * Jake Oshins <jakeo@microsoft.com>
7 * This driver acts as a paravirtual front-end for PCI Express root buses.
8 * When a PCI Express function (either an entire device or an SR-IOV
9 * Virtual Function) is being passed through to the VM, this driver exposes
10 * a new bus to the guest VM. This is modeled as a root PCI bus because
11 * no bridges are being exposed to the VM. In fact, with a "Generation 2"
12 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
13 * until a device as been exposed using this driver.
15 * Each root PCI bus has its own PCI domain, which is called "Segment" in
16 * the PCI Firmware Specifications. Thus while each device passed through
17 * to the VM using this front-end will appear at "device 0", the domain will
18 * be unique. Typically, each bus will have one PCI function on it, though
19 * this driver does support more than one.
21 * In order to map the interrupts from the device through to the guest VM,
22 * this driver also implements an IRQ Domain, which handles interrupts (either
23 * MSI or MSI-X) associated with the functions on the bus. As interrupts are
24 * set up, torn down, or reaffined, this driver communicates with the
25 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
26 * interrupt will be delivered to the correct virtual processor at the right
27 * vector. This driver does not support level-triggered (line-based)
28 * interrupts, and will report that the Interrupt Line register in the
29 * function's configuration space is zero.
31 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
32 * facilities. For instance, the configuration space of a function exposed
33 * by Hyper-V is mapped into a single page of memory space, and the
34 * read and write handlers for config space must be aware of this mechanism.
35 * Similarly, device setup and teardown involves messages sent to and from
36 * the PCI back-end driver in Hyper-V.
38 * This program is free software; you can redistribute it and/or modify it
39 * under the terms of the GNU General Public License version 2 as published
40 * by the Free Software Foundation.
42 * This program is distributed in the hope that it will be useful, but
43 * WITHOUT ANY WARRANTY; without even the implied warranty of
44 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
45 * NON INFRINGEMENT. See the GNU General Public License for more
50 #include <linux/kernel.h>
51 #include <linux/module.h>
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/semaphore.h>
55 #include <linux/irqdomain.h>
56 #include <linux/irq.h>
58 #include <asm/irqdomain.h>
60 #include <linux/msi.h>
61 #include <linux/hyperv.h>
62 #include <linux/refcount.h>
63 #include <asm/mshyperv.h>
66 * Protocol versions. The low word is the minor version, the high word the
70 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
71 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
72 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
74 enum pci_protocol_version_t {
75 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
76 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
79 #define CPU_AFFINITY_ALL -1ULL
82 * Supported protocol versions in the order of probing - highest go
85 static enum pci_protocol_version_t pci_protocol_versions[] = {
86 PCI_PROTOCOL_VERSION_1_2,
87 PCI_PROTOCOL_VERSION_1_1,
91 * Protocol version negotiated by hv_pci_protocol_negotiation().
93 static enum pci_protocol_version_t pci_protocol_version;
95 #define PCI_CONFIG_MMIO_LENGTH 0x2000
96 #define CFG_PAGE_OFFSET 0x1000
97 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
99 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
101 #define STATUS_REVISION_MISMATCH 0xC0000059
103 /* space for 32bit serial number as string */
104 #define SLOT_NAME_SIZE 11
110 enum pci_message_type {
114 PCI_MESSAGE_BASE = 0x42490000,
115 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
116 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
117 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
118 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
119 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
120 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
121 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
122 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
123 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
124 PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
125 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
126 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
127 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
128 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
129 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
130 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
131 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
132 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
133 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
134 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
135 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
136 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
137 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
142 * Structures defining the virtual PCI Express protocol.
154 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
155 * which is all this driver does. This representation is the one used in
156 * Windows, which is what is expected when sending this back and forth with
157 * the Hyper-V parent partition.
159 union win_slot_encoding {
169 * Pretty much as defined in the PCI Specifications.
171 struct pci_function_description {
172 u16 v_id; /* vendor ID */
173 u16 d_id; /* device ID */
179 union win_slot_encoding win_slot;
180 u32 ser; /* serial number */
186 * @delivery_mode: As defined in Intel's Programmer's
187 * Reference Manual, Volume 3, Chapter 8.
188 * @vector_count: Number of contiguous entries in the
189 * Interrupt Descriptor Table that are
190 * occupied by this Message-Signaled
191 * Interrupt. For "MSI", as first defined
192 * in PCI 2.2, this can be between 1 and
193 * 32. For "MSI-X," as first defined in PCI
194 * 3.0, this must be 1, as each MSI-X table
195 * entry would have its own descriptor.
196 * @reserved: Empty space
197 * @cpu_mask: All the target virtual processors.
208 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
210 * @delivery_mode: As defined in Intel's Programmer's
211 * Reference Manual, Volume 3, Chapter 8.
212 * @vector_count: Number of contiguous entries in the
213 * Interrupt Descriptor Table that are
214 * occupied by this Message-Signaled
215 * Interrupt. For "MSI", as first defined
216 * in PCI 2.2, this can be between 1 and
217 * 32. For "MSI-X," as first defined in PCI
218 * 3.0, this must be 1, as each MSI-X table
219 * entry would have its own descriptor.
220 * @processor_count: number of bits enabled in array.
221 * @processor_array: All the target virtual processors.
223 struct hv_msi_desc2 {
228 u16 processor_array[32];
232 * struct tran_int_desc
233 * @reserved: unused, padding
234 * @vector_count: same as in hv_msi_desc
235 * @data: This is the "data payload" value that is
236 * written by the device when it generates
237 * a message-signaled interrupt, either MSI
239 * @address: This is the address to which the data
240 * payload is written on interrupt
243 struct tran_int_desc {
251 * A generic message format for virtual PCI.
252 * Specific message formats are defined later in the file.
259 struct pci_child_message {
260 struct pci_message message_type;
261 union win_slot_encoding wslot;
264 struct pci_incoming_message {
265 struct vmpacket_descriptor hdr;
266 struct pci_message message_type;
269 struct pci_response {
270 struct vmpacket_descriptor hdr;
271 s32 status; /* negative values are failures */
275 void (*completion_func)(void *context, struct pci_response *resp,
276 int resp_packet_size);
279 struct pci_message message[0];
283 * Specific message types supporting the PCI protocol.
287 * Version negotiation message. Sent from the guest to the host.
288 * The guest is free to try different versions until the host
289 * accepts the version.
291 * pci_version: The protocol version requested.
292 * is_last_attempt: If TRUE, this is the last version guest will request.
293 * reservedz: Reserved field, set to zero.
296 struct pci_version_request {
297 struct pci_message message_type;
298 u32 protocol_version;
302 * Bus D0 Entry. This is sent from the guest to the host when the virtual
303 * bus (PCI Express port) is ready for action.
306 struct pci_bus_d0_entry {
307 struct pci_message message_type;
312 struct pci_bus_relations {
313 struct pci_incoming_message incoming;
315 struct pci_function_description func[0];
318 struct pci_q_res_req_response {
319 struct vmpacket_descriptor hdr;
320 s32 status; /* negative values are failures */
324 struct pci_set_power {
325 struct pci_message message_type;
326 union win_slot_encoding wslot;
327 u32 power_state; /* In Windows terms */
331 struct pci_set_power_response {
332 struct vmpacket_descriptor hdr;
333 s32 status; /* negative values are failures */
334 union win_slot_encoding wslot;
335 u32 resultant_state; /* In Windows terms */
339 struct pci_resources_assigned {
340 struct pci_message message_type;
341 union win_slot_encoding wslot;
342 u8 memory_range[0x14][6]; /* not used here */
347 struct pci_resources_assigned2 {
348 struct pci_message message_type;
349 union win_slot_encoding wslot;
350 u8 memory_range[0x14][6]; /* not used here */
351 u32 msi_descriptor_count;
355 struct pci_create_interrupt {
356 struct pci_message message_type;
357 union win_slot_encoding wslot;
358 struct hv_msi_desc int_desc;
361 struct pci_create_int_response {
362 struct pci_response response;
364 struct tran_int_desc int_desc;
367 struct pci_create_interrupt2 {
368 struct pci_message message_type;
369 union win_slot_encoding wslot;
370 struct hv_msi_desc2 int_desc;
373 struct pci_delete_interrupt {
374 struct pci_message message_type;
375 union win_slot_encoding wslot;
376 struct tran_int_desc int_desc;
379 struct pci_dev_incoming {
380 struct pci_incoming_message incoming;
381 union win_slot_encoding wslot;
384 struct pci_eject_response {
385 struct pci_message message_type;
386 union win_slot_encoding wslot;
390 static int pci_ring_size = (4 * PAGE_SIZE);
393 * Definitions or interrupt steering hypercall.
395 #define HV_PARTITION_ID_SELF ((u64)-1)
396 #define HVCALL_RETARGET_INTERRUPT 0x7e
398 struct hv_interrupt_entry {
399 u32 source; /* 1 for MSI(-X) */
405 #define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */
408 u64 format; /* 0 (HvGenericSetSparse4k) */
410 u64 masks[HV_VP_SET_BANK_COUNT_MAX];
414 * flags for hv_device_interrupt_target.flags
416 #define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1
417 #define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2
419 struct hv_device_interrupt_target {
424 struct hv_vp_set vp_set;
428 struct retarget_msi_interrupt {
429 u64 partition_id; /* use "self" */
431 struct hv_interrupt_entry int_entry;
433 struct hv_device_interrupt_target int_target;
437 * Driver specific state.
440 enum hv_pcibus_state {
448 struct hv_pcibus_device {
449 struct pci_sysdata sysdata;
450 enum hv_pcibus_state state;
451 atomic_t remove_lock;
452 struct hv_device *hdev;
453 resource_size_t low_mmio_space;
454 resource_size_t high_mmio_space;
455 struct resource *mem_config;
456 struct resource *low_mmio_res;
457 struct resource *high_mmio_res;
458 struct completion *survey_event;
459 struct completion remove_event;
460 struct pci_bus *pci_bus;
461 spinlock_t config_lock; /* Avoid two threads writing index page */
462 spinlock_t device_list_lock; /* Protect lists below */
463 void __iomem *cfg_addr;
465 struct list_head resources_for_children;
467 struct list_head children;
468 struct list_head dr_list;
470 struct msi_domain_info msi_info;
471 struct msi_controller msi_chip;
472 struct irq_domain *irq_domain;
474 /* hypercall arg, must not cross page boundary */
475 struct retarget_msi_interrupt retarget_msi_interrupt_params;
477 spinlock_t retarget_msi_interrupt_lock;
479 struct workqueue_struct *wq;
483 * Tracks "Device Relations" messages from the host, which must be both
484 * processed in order and deferred so that they don't run in the context
485 * of the incoming packet callback.
488 struct work_struct wrk;
489 struct hv_pcibus_device *bus;
493 struct list_head list_entry;
495 struct pci_function_description func[0];
498 enum hv_pcichild_state {
499 hv_pcichild_init = 0,
500 hv_pcichild_requirements,
501 hv_pcichild_resourced,
502 hv_pcichild_ejecting,
506 enum hv_pcidev_ref_reason {
507 hv_pcidev_ref_invalid = 0,
508 hv_pcidev_ref_initial,
509 hv_pcidev_ref_by_slot,
510 hv_pcidev_ref_packet,
512 hv_pcidev_ref_childlist,
518 /* List protected by pci_rescan_remove_lock */
519 struct list_head list_entry;
521 enum hv_pcichild_state state;
522 struct pci_slot *pci_slot;
523 struct pci_function_description desc;
524 bool reported_missing;
525 struct hv_pcibus_device *hbus;
526 struct work_struct wrk;
529 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
530 * read it back, for each of the BAR offsets within config space.
535 struct hv_pci_compl {
536 struct completion host_event;
537 s32 completion_status;
540 static void hv_pci_onchannelcallback(void *context);
543 * hv_pci_generic_compl() - Invoked for a completion packet
544 * @context: Set up by the sender of the packet.
545 * @resp: The response packet
546 * @resp_packet_size: Size in bytes of the packet
548 * This function is used to trigger an event and report status
549 * for any message for which the completion packet contains a
550 * status and nothing else.
552 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
553 int resp_packet_size)
555 struct hv_pci_compl *comp_pkt = context;
557 if (resp_packet_size >= offsetofend(struct pci_response, status))
558 comp_pkt->completion_status = resp->status;
560 comp_pkt->completion_status = -1;
562 complete(&comp_pkt->host_event);
565 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
567 static void get_pcichild(struct hv_pci_dev *hv_pcidev,
568 enum hv_pcidev_ref_reason reason);
569 static void put_pcichild(struct hv_pci_dev *hv_pcidev,
570 enum hv_pcidev_ref_reason reason);
572 static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
573 static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
576 * There is no good way to get notified from vmbus_onoffer_rescind(),
577 * so let's use polling here, since this is not a hot path.
579 static int wait_for_response(struct hv_device *hdev,
580 struct completion *comp)
583 if (hdev->channel->rescind) {
584 dev_warn_once(&hdev->device, "The device is gone.\n");
588 if (wait_for_completion_timeout(comp, HZ / 10))
596 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
597 * @devfn: The Linux representation of PCI slot
599 * Windows uses a slightly different representation of PCI slot.
601 * Return: The Windows representation
603 static u32 devfn_to_wslot(int devfn)
605 union win_slot_encoding wslot;
608 wslot.bits.dev = PCI_SLOT(devfn);
609 wslot.bits.func = PCI_FUNC(devfn);
615 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
616 * @wslot: The Windows representation of PCI slot
618 * Windows uses a slightly different representation of PCI slot.
620 * Return: The Linux representation
622 static int wslot_to_devfn(u32 wslot)
624 union win_slot_encoding slot_no;
626 slot_no.slot = wslot;
627 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
631 * PCI Configuration Space for these root PCI buses is implemented as a pair
632 * of pages in memory-mapped I/O space. Writing to the first page chooses
633 * the PCI function being written or read. Once the first page has been
634 * written to, the following page maps in the entire configuration space of
639 * _hv_pcifront_read_config() - Internal PCI config read
640 * @hpdev: The PCI driver's representation of the device
641 * @where: Offset within config space
642 * @size: Size of the transfer
643 * @val: Pointer to the buffer receiving the data
645 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
649 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
652 * If the attempt is to read the IDs or the ROM BAR, simulate that.
654 if (where + size <= PCI_COMMAND) {
655 memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
656 } else if (where >= PCI_CLASS_REVISION && where + size <=
657 PCI_CACHE_LINE_SIZE) {
658 memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
659 PCI_CLASS_REVISION, size);
660 } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
662 memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
663 PCI_SUBSYSTEM_VENDOR_ID, size);
664 } else if (where >= PCI_ROM_ADDRESS && where + size <=
665 PCI_CAPABILITY_LIST) {
666 /* ROM BARs are unimplemented */
668 } else if (where >= PCI_INTERRUPT_LINE && where + size <=
671 * Interrupt Line and Interrupt PIN are hard-wired to zero
672 * because this front-end only supports message-signaled
676 } else if (where + size <= CFG_PAGE_SIZE) {
677 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
678 /* Choose the function to be read. (See comment above) */
679 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
680 /* Make sure the function was chosen before we start reading. */
682 /* Read from that function's config space. */
695 * Make sure the write was done before we release the spinlock
696 * allowing consecutive reads/writes.
699 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
701 dev_err(&hpdev->hbus->hdev->device,
702 "Attempt to read beyond a function's config space.\n");
706 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
710 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
713 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
715 /* Choose the function to be read. (See comment above) */
716 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
717 /* Make sure the function was chosen before we start reading. */
719 /* Read from that function's config space. */
722 * mb() is not required here, because the spin_unlock_irqrestore()
726 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
732 * _hv_pcifront_write_config() - Internal PCI config write
733 * @hpdev: The PCI driver's representation of the device
734 * @where: Offset within config space
735 * @size: Size of the transfer
736 * @val: The data being transferred
738 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
742 void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where;
744 if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
745 where + size <= PCI_CAPABILITY_LIST) {
746 /* SSIDs and ROM BARs are read-only */
747 } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
748 spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
749 /* Choose the function to be written. (See comment above) */
750 writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
751 /* Make sure the function was chosen before we start writing. */
753 /* Write to that function's config space. */
766 * Make sure the write was done before we release the spinlock
767 * allowing consecutive reads/writes.
770 spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
772 dev_err(&hpdev->hbus->hdev->device,
773 "Attempt to write beyond a function's config space.\n");
778 * hv_pcifront_read_config() - Read configuration space
779 * @bus: PCI Bus structure
780 * @devfn: Device/function
781 * @where: Offset from base
782 * @size: Byte/word/dword
783 * @val: Value to be read
785 * Return: PCIBIOS_SUCCESSFUL on success
786 * PCIBIOS_DEVICE_NOT_FOUND on failure
788 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
789 int where, int size, u32 *val)
791 struct hv_pcibus_device *hbus =
792 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
793 struct hv_pci_dev *hpdev;
795 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
797 return PCIBIOS_DEVICE_NOT_FOUND;
799 _hv_pcifront_read_config(hpdev, where, size, val);
801 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
802 return PCIBIOS_SUCCESSFUL;
806 * hv_pcifront_write_config() - Write configuration space
807 * @bus: PCI Bus structure
808 * @devfn: Device/function
809 * @where: Offset from base
810 * @size: Byte/word/dword
811 * @val: Value to be written to device
813 * Return: PCIBIOS_SUCCESSFUL on success
814 * PCIBIOS_DEVICE_NOT_FOUND on failure
816 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
817 int where, int size, u32 val)
819 struct hv_pcibus_device *hbus =
820 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
821 struct hv_pci_dev *hpdev;
823 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
825 return PCIBIOS_DEVICE_NOT_FOUND;
827 _hv_pcifront_write_config(hpdev, where, size, val);
829 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
830 return PCIBIOS_SUCCESSFUL;
833 /* PCIe operations */
834 static struct pci_ops hv_pcifront_ops = {
835 .read = hv_pcifront_read_config,
836 .write = hv_pcifront_write_config,
839 /* Interrupt management hooks */
840 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
841 struct tran_int_desc *int_desc)
843 struct pci_delete_interrupt *int_pkt;
845 struct pci_packet pkt;
846 u8 buffer[sizeof(struct pci_delete_interrupt)];
849 if (!int_desc->vector_count) {
854 memset(&ctxt, 0, sizeof(ctxt));
855 int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
856 int_pkt->message_type.type =
857 PCI_DELETE_INTERRUPT_MESSAGE;
858 int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
859 int_pkt->int_desc = *int_desc;
860 vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
861 (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0);
866 * hv_msi_free() - Free the MSI.
867 * @domain: The interrupt domain pointer
868 * @info: Extra MSI-related context
869 * @irq: Identifies the IRQ.
871 * The Hyper-V parent partition and hypervisor are tracking the
872 * messages that are in use, keeping the interrupt redirection
873 * table up to date. This callback sends a message that frees
874 * the IRT entry and related tracking nonsense.
876 static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
879 struct hv_pcibus_device *hbus;
880 struct hv_pci_dev *hpdev;
881 struct pci_dev *pdev;
882 struct tran_int_desc *int_desc;
883 struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
884 struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
886 pdev = msi_desc_to_pci_dev(msi);
888 int_desc = irq_data_get_irq_chip_data(irq_data);
892 irq_data->chip_data = NULL;
893 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
899 hv_int_desc_free(hpdev, int_desc);
900 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
903 static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
906 struct irq_data *parent = data->parent_data;
908 return parent->chip->irq_set_affinity(parent, dest, force);
911 static void hv_irq_mask(struct irq_data *data)
913 pci_msi_mask_irq(data);
916 static unsigned int hv_msi_get_int_vector(struct irq_data *data)
918 struct irq_cfg *cfg = irqd_cfg(data);
923 static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
924 int nvec, msi_alloc_info_t *info)
926 int ret = pci_msi_prepare(domain, dev, nvec, info);
929 * By using the interrupt remapper in the hypervisor IOMMU, contiguous
930 * CPU vectors is not needed for multi-MSI
932 if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
933 info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
939 * hv_irq_unmask() - "Unmask" the IRQ by setting its current
941 * @data: Describes the IRQ
943 * Build new a destination for the MSI and make a hypercall to
944 * update the Interrupt Redirection Table. "Device Logical ID"
945 * is built out of this PCI bus's instance GUID and the function
946 * number of the device.
948 static void hv_irq_unmask(struct irq_data *data)
950 struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
951 struct irq_cfg *cfg = irqd_cfg(data);
952 struct retarget_msi_interrupt *params;
953 struct tran_int_desc *int_desc;
954 struct hv_pcibus_device *hbus;
955 struct cpumask *dest;
956 struct pci_bus *pbus;
957 struct pci_dev *pdev;
964 dest = irq_data_get_effective_affinity_mask(data);
965 pdev = msi_desc_to_pci_dev(msi_desc);
967 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
968 int_desc = data->chip_data;
970 spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
972 params = &hbus->retarget_msi_interrupt_params;
973 memset(params, 0, sizeof(*params));
974 params->partition_id = HV_PARTITION_ID_SELF;
975 params->int_entry.source = 1; /* MSI(-X) */
976 params->int_entry.address = int_desc->address & 0xffffffff;
977 params->int_entry.data = int_desc->data;
978 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
979 (hbus->hdev->dev_instance.b[4] << 16) |
980 (hbus->hdev->dev_instance.b[7] << 8) |
981 (hbus->hdev->dev_instance.b[6] & 0xf8) |
982 PCI_FUNC(pdev->devfn);
983 params->int_target.vector = cfg->vector;
986 * Honoring apic->irq_delivery_mode set to dest_Fixed by
987 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
988 * spurious interrupt storm. Not doing so does not seem to have a
989 * negative effect (yet?).
992 if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
994 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
995 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
996 * with >64 VP support.
997 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
998 * is not sufficient for this hypercall.
1000 params->int_target.flags |=
1001 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
1002 params->int_target.vp_set.valid_banks =
1003 (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1;
1006 * var-sized hypercall, var-size starts after vp_mask (thus
1007 * vp_set.format does not count, but vp_set.valid_banks does).
1009 var_size = 1 + HV_VP_SET_BANK_COUNT_MAX;
1011 for_each_cpu_and(cpu, dest, cpu_online_mask) {
1012 cpu_vmbus = hv_cpu_number_to_vp_number(cpu);
1014 if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) {
1015 dev_err(&hbus->hdev->device,
1016 "too high CPU %d", cpu_vmbus);
1021 params->int_target.vp_set.masks[cpu_vmbus / 64] |=
1022 (1ULL << (cpu_vmbus & 63));
1025 for_each_cpu_and(cpu, dest, cpu_online_mask) {
1026 params->int_target.vp_mask |=
1027 (1ULL << hv_cpu_number_to_vp_number(cpu));
1031 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
1035 spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags);
1038 dev_err(&hbus->hdev->device,
1039 "%s() failed: %#llx", __func__, res);
1043 pci_msi_unmask_irq(data);
1046 struct compose_comp_ctxt {
1047 struct hv_pci_compl comp_pkt;
1048 struct tran_int_desc int_desc;
1051 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1052 int resp_packet_size)
1054 struct compose_comp_ctxt *comp_pkt = context;
1055 struct pci_create_int_response *int_resp =
1056 (struct pci_create_int_response *)resp;
1058 comp_pkt->comp_pkt.completion_status = resp->status;
1059 comp_pkt->int_desc = int_resp->int_desc;
1060 complete(&comp_pkt->comp_pkt.host_event);
1063 static u32 hv_compose_msi_req_v1(
1064 struct pci_create_interrupt *int_pkt, struct cpumask *affinity,
1065 u32 slot, u8 vector, u8 vector_count)
1067 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1068 int_pkt->wslot.slot = slot;
1069 int_pkt->int_desc.vector = vector;
1070 int_pkt->int_desc.vector_count = vector_count;
1071 int_pkt->int_desc.delivery_mode =
1072 (apic->irq_delivery_mode == dest_LowestPrio) ?
1073 dest_LowestPrio : dest_Fixed;
1076 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1079 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1081 return sizeof(*int_pkt);
1084 static u32 hv_compose_msi_req_v2(
1085 struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity,
1086 u32 slot, u8 vector, u8 vector_count)
1090 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1091 int_pkt->wslot.slot = slot;
1092 int_pkt->int_desc.vector = vector;
1093 int_pkt->int_desc.vector_count = vector_count;
1094 int_pkt->int_desc.delivery_mode =
1095 (apic->irq_delivery_mode == dest_LowestPrio) ?
1096 dest_LowestPrio : dest_Fixed;
1099 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1100 * by subsequent retarget in hv_irq_unmask().
1102 cpu = cpumask_first_and(affinity, cpu_online_mask);
1103 int_pkt->int_desc.processor_array[0] =
1104 hv_cpu_number_to_vp_number(cpu);
1105 int_pkt->int_desc.processor_count = 1;
1107 return sizeof(*int_pkt);
1111 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1112 * @data: Everything about this MSI
1113 * @msg: Buffer that is filled in by this function
1115 * This function unpacks the IRQ looking for target CPU set, IDT
1116 * vector and mode and sends a message to the parent partition
1117 * asking for a mapping for that tuple in this partition. The
1118 * response supplies a data value and address to which that data
1119 * should be written to trigger that interrupt.
1121 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1123 struct hv_pcibus_device *hbus;
1124 struct hv_pci_dev *hpdev;
1125 struct pci_bus *pbus;
1126 struct pci_dev *pdev;
1127 struct cpumask *dest;
1128 unsigned long flags;
1129 struct compose_comp_ctxt comp;
1130 struct tran_int_desc *int_desc;
1131 struct msi_desc *msi_desc;
1132 u8 vector, vector_count;
1134 struct pci_packet pci_pkt;
1136 struct pci_create_interrupt v1;
1137 struct pci_create_interrupt2 v2;
1144 /* Reuse the previous allocation */
1145 if (data->chip_data) {
1146 int_desc = data->chip_data;
1147 msg->address_hi = int_desc->address >> 32;
1148 msg->address_lo = int_desc->address & 0xffffffff;
1149 msg->data = int_desc->data;
1153 msi_desc = irq_data_get_msi_desc(data);
1154 pdev = msi_desc_to_pci_dev(msi_desc);
1155 dest = irq_data_get_effective_affinity_mask(data);
1157 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1158 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1160 goto return_null_message;
1162 int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1164 goto drop_reference;
1166 if (!msi_desc->msi_attrib.is_msix && msi_desc->nvec_used > 1) {
1168 * If this is not the first MSI of Multi MSI, we already have
1169 * a mapping. Can exit early.
1171 if (msi_desc->irq != data->irq) {
1172 data->chip_data = int_desc;
1173 int_desc->address = msi_desc->msg.address_lo |
1174 (u64)msi_desc->msg.address_hi << 32;
1175 int_desc->data = msi_desc->msg.data +
1176 (data->irq - msi_desc->irq);
1177 msg->address_hi = msi_desc->msg.address_hi;
1178 msg->address_lo = msi_desc->msg.address_lo;
1179 msg->data = int_desc->data;
1180 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
1184 * The vector we select here is a dummy value. The correct
1185 * value gets sent to the hypervisor in unmask(). This needs
1186 * to be aligned with the count, and also not zero. Multi-msi
1187 * is powers of 2 up to 32, so 32 will always work here.
1190 vector_count = msi_desc->nvec_used;
1192 vector = hv_msi_get_int_vector(data);
1196 memset(&ctxt, 0, sizeof(ctxt));
1197 init_completion(&comp.comp_pkt.host_event);
1198 ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1199 ctxt.pci_pkt.compl_ctxt = ∁
1201 switch (pci_protocol_version) {
1202 case PCI_PROTOCOL_VERSION_1_1:
1203 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1205 hpdev->desc.win_slot.slot,
1210 case PCI_PROTOCOL_VERSION_1_2:
1211 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1213 hpdev->desc.win_slot.slot,
1219 /* As we only negotiate protocol versions known to this driver,
1220 * this path should never hit. However, this is it not a hot
1221 * path so we print a message to aid future updates.
1223 dev_err(&hbus->hdev->device,
1224 "Unexpected vPCI protocol, update driver.");
1228 ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1229 size, (unsigned long)&ctxt.pci_pkt,
1231 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1233 dev_err(&hbus->hdev->device,
1234 "Sending request for interrupt failed: 0x%x",
1235 comp.comp_pkt.completion_status);
1240 * Since this function is called with IRQ locks held, can't
1241 * do normal wait for completion; instead poll.
1243 while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1244 /* 0xFFFF means an invalid PCI VENDOR ID. */
1245 if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1246 dev_err_once(&hbus->hdev->device,
1247 "the device has gone\n");
1252 * When the higher level interrupt code calls us with
1253 * interrupt disabled, we must poll the channel by calling
1254 * the channel callback directly when channel->target_cpu is
1255 * the current CPU. When the higher level interrupt code
1256 * calls us with interrupt enabled, let's add the
1257 * local_irq_save()/restore() to avoid race:
1258 * hv_pci_onchannelcallback() can also run in tasklet.
1260 local_irq_save(flags);
1262 if (hbus->hdev->channel->target_cpu == smp_processor_id())
1263 hv_pci_onchannelcallback(hbus);
1265 local_irq_restore(flags);
1267 if (hpdev->state == hv_pcichild_ejecting) {
1268 dev_err_once(&hbus->hdev->device,
1269 "the device is being ejected\n");
1276 if (comp.comp_pkt.completion_status < 0) {
1277 dev_err(&hbus->hdev->device,
1278 "Request for interrupt failed: 0x%x",
1279 comp.comp_pkt.completion_status);
1284 * Record the assignment so that this can be unwound later. Using
1285 * irq_set_chip_data() here would be appropriate, but the lock it takes
1288 *int_desc = comp.int_desc;
1289 data->chip_data = int_desc;
1291 /* Pass up the result. */
1292 msg->address_hi = comp.int_desc.address >> 32;
1293 msg->address_lo = comp.int_desc.address & 0xffffffff;
1294 msg->data = comp.int_desc.data;
1296 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
1302 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
1303 return_null_message:
1304 msg->address_hi = 0;
1305 msg->address_lo = 0;
1309 /* HW Interrupt Chip Descriptor */
1310 static struct irq_chip hv_msi_irq_chip = {
1311 .name = "Hyper-V PCIe MSI",
1312 .irq_compose_msi_msg = hv_compose_msi_msg,
1313 .irq_set_affinity = hv_set_affinity,
1314 .irq_ack = irq_chip_ack_parent,
1315 .irq_mask = hv_irq_mask,
1316 .irq_unmask = hv_irq_unmask,
1319 static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info,
1320 msi_alloc_info_t *arg)
1322 return arg->msi_hwirq;
1325 static struct msi_domain_ops hv_msi_ops = {
1326 .get_hwirq = hv_msi_domain_ops_get_hwirq,
1327 .msi_prepare = hv_msi_prepare,
1328 .set_desc = pci_msi_set_desc,
1329 .msi_free = hv_msi_free,
1333 * hv_pcie_init_irq_domain() - Initialize IRQ domain
1334 * @hbus: The root PCI bus
1336 * This function creates an IRQ domain which will be used for
1337 * interrupts from devices that have been passed through. These
1338 * devices only support MSI and MSI-X, not line-based interrupts
1339 * or simulations of line-based interrupts through PCIe's
1340 * fabric-layer messages. Because interrupts are remapped, we
1341 * can support multi-message MSI here.
1343 * Return: '0' on success and error value on failure
1345 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
1347 hbus->msi_info.chip = &hv_msi_irq_chip;
1348 hbus->msi_info.ops = &hv_msi_ops;
1349 hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
1350 MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
1352 hbus->msi_info.handler = handle_edge_irq;
1353 hbus->msi_info.handler_name = "edge";
1354 hbus->msi_info.data = hbus;
1355 hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode,
1358 if (!hbus->irq_domain) {
1359 dev_err(&hbus->hdev->device,
1360 "Failed to build an MSI IRQ domain\n");
1368 * get_bar_size() - Get the address space consumed by a BAR
1369 * @bar_val: Value that a BAR returned after -1 was written
1372 * This function returns the size of the BAR, rounded up to 1
1373 * page. It has to be rounded up because the hypervisor's page
1374 * table entry that maps the BAR into the VM can't specify an
1375 * offset within a page. The invariant is that the hypervisor
1376 * must place any BARs of smaller than page length at the
1377 * beginning of a page.
1379 * Return: Size in bytes of the consumed MMIO space.
1381 static u64 get_bar_size(u64 bar_val)
1383 return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
1388 * survey_child_resources() - Total all MMIO requirements
1389 * @hbus: Root PCI bus, as understood by this driver
1391 static void survey_child_resources(struct hv_pcibus_device *hbus)
1393 struct list_head *iter;
1394 struct hv_pci_dev *hpdev;
1395 resource_size_t bar_size = 0;
1396 unsigned long flags;
1397 struct completion *event;
1401 /* If nobody is waiting on the answer, don't compute it. */
1402 event = xchg(&hbus->survey_event, NULL);
1406 /* If the answer has already been computed, go with it. */
1407 if (hbus->low_mmio_space || hbus->high_mmio_space) {
1412 spin_lock_irqsave(&hbus->device_list_lock, flags);
1415 * Due to an interesting quirk of the PCI spec, all memory regions
1416 * for a child device are a power of 2 in size and aligned in memory,
1417 * so it's sufficient to just add them up without tracking alignment.
1419 list_for_each(iter, &hbus->children) {
1420 hpdev = container_of(iter, struct hv_pci_dev, list_entry);
1421 for (i = 0; i < 6; i++) {
1422 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
1423 dev_err(&hbus->hdev->device,
1424 "There's an I/O BAR in this list!\n");
1426 if (hpdev->probed_bar[i] != 0) {
1428 * A probed BAR has all the upper bits set that
1432 bar_val = hpdev->probed_bar[i];
1433 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1435 ((u64)hpdev->probed_bar[++i] << 32);
1437 bar_val |= 0xffffffff00000000ULL;
1439 bar_size = get_bar_size(bar_val);
1441 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
1442 hbus->high_mmio_space += bar_size;
1444 hbus->low_mmio_space += bar_size;
1449 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1454 * prepopulate_bars() - Fill in BARs with defaults
1455 * @hbus: Root PCI bus, as understood by this driver
1457 * The core PCI driver code seems much, much happier if the BARs
1458 * for a device have values upon first scan. So fill them in.
1459 * The algorithm below works down from large sizes to small,
1460 * attempting to pack the assignments optimally. The assumption,
1461 * enforced in other parts of the code, is that the beginning of
1462 * the memory-mapped I/O space will be aligned on the largest
1465 static void prepopulate_bars(struct hv_pcibus_device *hbus)
1467 resource_size_t high_size = 0;
1468 resource_size_t low_size = 0;
1469 resource_size_t high_base = 0;
1470 resource_size_t low_base = 0;
1471 resource_size_t bar_size;
1472 struct hv_pci_dev *hpdev;
1473 struct list_head *iter;
1474 unsigned long flags;
1480 if (hbus->low_mmio_space) {
1481 low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
1482 low_base = hbus->low_mmio_res->start;
1485 if (hbus->high_mmio_space) {
1487 (63 - __builtin_clzll(hbus->high_mmio_space));
1488 high_base = hbus->high_mmio_res->start;
1491 spin_lock_irqsave(&hbus->device_list_lock, flags);
1493 /* Pick addresses for the BARs. */
1495 list_for_each(iter, &hbus->children) {
1496 hpdev = container_of(iter, struct hv_pci_dev,
1498 for (i = 0; i < 6; i++) {
1499 bar_val = hpdev->probed_bar[i];
1502 high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
1505 ((u64)hpdev->probed_bar[i + 1]
1508 bar_val |= 0xffffffffULL << 32;
1510 bar_size = get_bar_size(bar_val);
1512 if (high_size != bar_size) {
1516 _hv_pcifront_write_config(hpdev,
1517 PCI_BASE_ADDRESS_0 + (4 * i),
1519 (u32)(high_base & 0xffffff00));
1521 _hv_pcifront_write_config(hpdev,
1522 PCI_BASE_ADDRESS_0 + (4 * i),
1523 4, (u32)(high_base >> 32));
1524 high_base += bar_size;
1526 if (low_size != bar_size)
1528 _hv_pcifront_write_config(hpdev,
1529 PCI_BASE_ADDRESS_0 + (4 * i),
1531 (u32)(low_base & 0xffffff00));
1532 low_base += bar_size;
1535 if (high_size <= 1 && low_size <= 1) {
1536 /* Set the memory enable bit. */
1537 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2,
1539 command |= PCI_COMMAND_MEMORY;
1540 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2,
1548 } while (high_size || low_size);
1550 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1554 * Assign entries in sysfs pci slot directory.
1556 * Note that this function does not need to lock the children list
1557 * because it is called from pci_devices_present_work which
1558 * is serialized with hv_eject_device_work because they are on the
1559 * same ordered workqueue. Therefore hbus->children list will not change
1560 * even when pci_create_slot sleeps.
1562 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
1564 struct hv_pci_dev *hpdev;
1565 char name[SLOT_NAME_SIZE];
1568 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1569 if (hpdev->pci_slot)
1572 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
1573 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
1574 hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
1576 if (!hpdev->pci_slot)
1577 pr_warn("pci_create slot %s failed\n", name);
1582 * Remove entries in sysfs pci slot directory.
1584 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
1586 struct hv_pci_dev *hpdev;
1588 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1589 if (!hpdev->pci_slot)
1591 pci_destroy_slot(hpdev->pci_slot);
1592 hpdev->pci_slot = NULL;
1597 * create_root_hv_pci_bus() - Expose a new root PCI bus
1598 * @hbus: Root PCI bus, as understood by this driver
1600 * Return: 0 on success, -errno on failure
1602 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
1604 /* Register the device */
1605 hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device,
1606 0, /* bus number is always zero */
1609 &hbus->resources_for_children);
1613 hbus->pci_bus->msi = &hbus->msi_chip;
1614 hbus->pci_bus->msi->dev = &hbus->hdev->device;
1616 pci_lock_rescan_remove();
1617 pci_scan_child_bus(hbus->pci_bus);
1618 pci_bus_assign_resources(hbus->pci_bus);
1619 hv_pci_assign_slots(hbus);
1620 pci_bus_add_devices(hbus->pci_bus);
1621 pci_unlock_rescan_remove();
1622 hbus->state = hv_pcibus_installed;
1626 struct q_res_req_compl {
1627 struct completion host_event;
1628 struct hv_pci_dev *hpdev;
1632 * q_resource_requirements() - Query Resource Requirements
1633 * @context: The completion context.
1634 * @resp: The response that came from the host.
1635 * @resp_packet_size: The size in bytes of resp.
1637 * This function is invoked on completion of a Query Resource
1638 * Requirements packet.
1640 static void q_resource_requirements(void *context, struct pci_response *resp,
1641 int resp_packet_size)
1643 struct q_res_req_compl *completion = context;
1644 struct pci_q_res_req_response *q_res_req =
1645 (struct pci_q_res_req_response *)resp;
1648 if (resp->status < 0) {
1649 dev_err(&completion->hpdev->hbus->hdev->device,
1650 "query resource requirements failed: %x\n",
1653 for (i = 0; i < 6; i++) {
1654 completion->hpdev->probed_bar[i] =
1655 q_res_req->probed_bar[i];
1659 complete(&completion->host_event);
1662 static void get_pcichild(struct hv_pci_dev *hpdev,
1663 enum hv_pcidev_ref_reason reason)
1665 refcount_inc(&hpdev->refs);
1668 static void put_pcichild(struct hv_pci_dev *hpdev,
1669 enum hv_pcidev_ref_reason reason)
1671 if (refcount_dec_and_test(&hpdev->refs))
1676 * new_pcichild_device() - Create a new child device
1677 * @hbus: The internal struct tracking this root PCI bus.
1678 * @desc: The information supplied so far from the host
1681 * This function creates the tracking structure for a new child
1682 * device and kicks off the process of figuring out what it is.
1684 * Return: Pointer to the new tracking struct
1686 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
1687 struct pci_function_description *desc)
1689 struct hv_pci_dev *hpdev;
1690 struct pci_child_message *res_req;
1691 struct q_res_req_compl comp_pkt;
1693 struct pci_packet init_packet;
1694 u8 buffer[sizeof(struct pci_child_message)];
1696 unsigned long flags;
1699 hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC);
1705 memset(&pkt, 0, sizeof(pkt));
1706 init_completion(&comp_pkt.host_event);
1707 comp_pkt.hpdev = hpdev;
1708 pkt.init_packet.compl_ctxt = &comp_pkt;
1709 pkt.init_packet.completion_func = q_resource_requirements;
1710 res_req = (struct pci_child_message *)&pkt.init_packet.message;
1711 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
1712 res_req->wslot.slot = desc->win_slot.slot;
1714 ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
1715 sizeof(struct pci_child_message),
1716 (unsigned long)&pkt.init_packet,
1718 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1722 if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
1725 hpdev->desc = *desc;
1726 refcount_set(&hpdev->refs, 1);
1727 get_pcichild(hpdev, hv_pcidev_ref_childlist);
1728 spin_lock_irqsave(&hbus->device_list_lock, flags);
1730 list_add_tail(&hpdev->list_entry, &hbus->children);
1731 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1740 * get_pcichild_wslot() - Find device from slot
1741 * @hbus: Root PCI bus, as understood by this driver
1742 * @wslot: Location on the bus
1744 * This function looks up a PCI device and returns the internal
1745 * representation of it. It acquires a reference on it, so that
1746 * the device won't be deleted while somebody is using it. The
1747 * caller is responsible for calling put_pcichild() to release
1750 * Return: Internal representation of a PCI device
1752 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
1755 unsigned long flags;
1756 struct hv_pci_dev *iter, *hpdev = NULL;
1758 spin_lock_irqsave(&hbus->device_list_lock, flags);
1759 list_for_each_entry(iter, &hbus->children, list_entry) {
1760 if (iter->desc.win_slot.slot == wslot) {
1762 get_pcichild(hpdev, hv_pcidev_ref_by_slot);
1766 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1772 * pci_devices_present_work() - Handle new list of child devices
1773 * @work: Work struct embedded in struct hv_dr_work
1775 * "Bus Relations" is the Windows term for "children of this
1776 * bus." The terminology is preserved here for people trying to
1777 * debug the interaction between Hyper-V and Linux. This
1778 * function is called when the parent partition reports a list
1779 * of functions that should be observed under this PCI Express
1782 * This function updates the list, and must tolerate being
1783 * called multiple times with the same information. The typical
1784 * number of child devices is one, with very atypical cases
1785 * involving three or four, so the algorithms used here can be
1786 * simple and inefficient.
1788 * It must also treat the omission of a previously observed device as
1789 * notification that the device no longer exists.
1791 * Note that this function is serialized with hv_eject_device_work(),
1792 * because both are pushed to the ordered workqueue hbus->wq.
1794 static void pci_devices_present_work(struct work_struct *work)
1798 struct list_head *iter;
1799 struct pci_function_description *new_desc;
1800 struct hv_pci_dev *hpdev;
1801 struct hv_pcibus_device *hbus;
1802 struct list_head removed;
1803 struct hv_dr_work *dr_wrk;
1804 struct hv_dr_state *dr = NULL;
1805 unsigned long flags;
1807 dr_wrk = container_of(work, struct hv_dr_work, wrk);
1811 INIT_LIST_HEAD(&removed);
1813 /* Pull this off the queue and process it if it was the last one. */
1814 spin_lock_irqsave(&hbus->device_list_lock, flags);
1815 while (!list_empty(&hbus->dr_list)) {
1816 dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
1818 list_del(&dr->list_entry);
1820 /* Throw this away if the list still has stuff in it. */
1821 if (!list_empty(&hbus->dr_list)) {
1826 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1833 /* First, mark all existing children as reported missing. */
1834 spin_lock_irqsave(&hbus->device_list_lock, flags);
1835 list_for_each(iter, &hbus->children) {
1836 hpdev = container_of(iter, struct hv_pci_dev,
1838 hpdev->reported_missing = true;
1840 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1842 /* Next, add back any reported devices. */
1843 for (child_no = 0; child_no < dr->device_count; child_no++) {
1845 new_desc = &dr->func[child_no];
1847 spin_lock_irqsave(&hbus->device_list_lock, flags);
1848 list_for_each(iter, &hbus->children) {
1849 hpdev = container_of(iter, struct hv_pci_dev,
1851 if ((hpdev->desc.win_slot.slot ==
1852 new_desc->win_slot.slot) &&
1853 (hpdev->desc.v_id == new_desc->v_id) &&
1854 (hpdev->desc.d_id == new_desc->d_id) &&
1855 (hpdev->desc.ser == new_desc->ser)) {
1856 hpdev->reported_missing = false;
1860 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1863 hpdev = new_pcichild_device(hbus, new_desc);
1865 dev_err(&hbus->hdev->device,
1866 "couldn't record a child device.\n");
1870 /* Move missing children to a list on the stack. */
1871 spin_lock_irqsave(&hbus->device_list_lock, flags);
1874 list_for_each(iter, &hbus->children) {
1875 hpdev = container_of(iter, struct hv_pci_dev,
1877 if (hpdev->reported_missing) {
1879 put_pcichild(hpdev, hv_pcidev_ref_childlist);
1880 list_move_tail(&hpdev->list_entry, &removed);
1885 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1887 /* Delete everything that should no longer exist. */
1888 while (!list_empty(&removed)) {
1889 hpdev = list_first_entry(&removed, struct hv_pci_dev,
1891 list_del(&hpdev->list_entry);
1893 if (hpdev->pci_slot)
1894 pci_destroy_slot(hpdev->pci_slot);
1896 put_pcichild(hpdev, hv_pcidev_ref_initial);
1899 switch (hbus->state) {
1900 case hv_pcibus_installed:
1902 * Tell the core to rescan bus
1903 * because there may have been changes.
1905 pci_lock_rescan_remove();
1906 pci_scan_child_bus(hbus->pci_bus);
1907 hv_pci_assign_slots(hbus);
1908 pci_unlock_rescan_remove();
1911 case hv_pcibus_init:
1912 case hv_pcibus_probed:
1913 survey_child_resources(hbus);
1925 * hv_pci_devices_present() - Handles list of new children
1926 * @hbus: Root PCI bus, as understood by this driver
1927 * @relations: Packet from host listing children
1929 * This function is invoked whenever a new list of devices for
1932 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
1933 struct pci_bus_relations *relations)
1935 struct hv_dr_state *dr;
1936 struct hv_dr_work *dr_wrk;
1937 unsigned long flags;
1939 dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
1943 dr = kzalloc(offsetof(struct hv_dr_state, func) +
1944 (sizeof(struct pci_function_description) *
1945 (relations->device_count)), GFP_NOWAIT);
1951 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
1953 dr->device_count = relations->device_count;
1954 if (dr->device_count != 0) {
1955 memcpy(dr->func, relations->func,
1956 sizeof(struct pci_function_description) *
1960 spin_lock_irqsave(&hbus->device_list_lock, flags);
1961 list_add_tail(&dr->list_entry, &hbus->dr_list);
1962 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1965 queue_work(hbus->wq, &dr_wrk->wrk);
1969 * hv_eject_device_work() - Asynchronously handles ejection
1970 * @work: Work struct embedded in internal device struct
1972 * This function handles ejecting a device. Windows will
1973 * attempt to gracefully eject a device, waiting 60 seconds to
1974 * hear back from the guest OS that this completed successfully.
1975 * If this timer expires, the device will be forcibly removed.
1977 static void hv_eject_device_work(struct work_struct *work)
1979 struct pci_eject_response *ejct_pkt;
1980 struct hv_pcibus_device *hbus;
1981 struct hv_pci_dev *hpdev;
1982 struct pci_dev *pdev;
1983 unsigned long flags;
1986 struct pci_packet pkt;
1987 u8 buffer[sizeof(struct pci_eject_response)];
1990 hpdev = container_of(work, struct hv_pci_dev, wrk);
1993 if (hpdev->state != hv_pcichild_ejecting) {
1994 put_pcichild(hpdev, hv_pcidev_ref_pnp);
1999 * Ejection can come before or after the PCI bus has been set up, so
2000 * attempt to find it and tear down the bus state, if it exists. This
2001 * must be done without constructs like pci_domain_nr(hbus->pci_bus)
2002 * because hbus->pci_bus may not exist yet.
2004 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2005 pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
2007 pci_lock_rescan_remove();
2008 pci_stop_and_remove_bus_device(pdev);
2010 pci_unlock_rescan_remove();
2013 spin_lock_irqsave(&hbus->device_list_lock, flags);
2014 list_del(&hpdev->list_entry);
2015 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2017 if (hpdev->pci_slot)
2018 pci_destroy_slot(hpdev->pci_slot);
2020 memset(&ctxt, 0, sizeof(ctxt));
2021 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2022 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2023 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2024 vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2025 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
2026 VM_PKT_DATA_INBAND, 0);
2028 put_pcichild(hpdev, hv_pcidev_ref_childlist);
2029 put_pcichild(hpdev, hv_pcidev_ref_initial);
2030 put_pcichild(hpdev, hv_pcidev_ref_pnp);
2032 /* hpdev has been freed. Do not use it any more. */
2037 * hv_pci_eject_device() - Handles device ejection
2038 * @hpdev: Internal device tracking struct
2040 * This function is invoked when an ejection packet arrives. It
2041 * just schedules work so that we don't re-enter the packet
2042 * delivery code handling the ejection.
2044 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2046 hpdev->state = hv_pcichild_ejecting;
2047 get_pcichild(hpdev, hv_pcidev_ref_pnp);
2048 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2049 get_hvpcibus(hpdev->hbus);
2050 queue_work(hpdev->hbus->wq, &hpdev->wrk);
2054 * hv_pci_onchannelcallback() - Handles incoming packets
2055 * @context: Internal bus tracking struct
2057 * This function is invoked whenever the host sends a packet to
2058 * this channel (which is private to this root PCI bus).
2060 static void hv_pci_onchannelcallback(void *context)
2062 const int packet_size = 0x100;
2064 struct hv_pcibus_device *hbus = context;
2067 struct vmpacket_descriptor *desc;
2068 unsigned char *buffer;
2069 int bufferlen = packet_size;
2070 struct pci_packet *comp_packet;
2071 struct pci_response *response;
2072 struct pci_incoming_message *new_message;
2073 struct pci_bus_relations *bus_rel;
2074 struct pci_dev_incoming *dev_message;
2075 struct hv_pci_dev *hpdev;
2077 buffer = kmalloc(bufferlen, GFP_ATOMIC);
2082 ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer,
2083 bufferlen, &bytes_recvd, &req_id);
2085 if (ret == -ENOBUFS) {
2087 /* Handle large packet */
2088 bufferlen = bytes_recvd;
2089 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2095 /* Zero length indicates there are no more packets. */
2096 if (ret || !bytes_recvd)
2100 * All incoming packets must be at least as large as a
2103 if (bytes_recvd <= sizeof(struct pci_response))
2105 desc = (struct vmpacket_descriptor *)buffer;
2107 switch (desc->type) {
2111 * The host is trusted, and thus it's safe to interpret
2112 * this transaction ID as a pointer.
2114 comp_packet = (struct pci_packet *)req_id;
2115 response = (struct pci_response *)buffer;
2116 comp_packet->completion_func(comp_packet->compl_ctxt,
2121 case VM_PKT_DATA_INBAND:
2123 new_message = (struct pci_incoming_message *)buffer;
2124 switch (new_message->message_type.type) {
2125 case PCI_BUS_RELATIONS:
2127 bus_rel = (struct pci_bus_relations *)buffer;
2129 offsetof(struct pci_bus_relations, func) +
2130 (sizeof(struct pci_function_description) *
2131 (bus_rel->device_count))) {
2132 dev_err(&hbus->hdev->device,
2133 "bus relations too small\n");
2137 hv_pci_devices_present(hbus, bus_rel);
2142 dev_message = (struct pci_dev_incoming *)buffer;
2143 hpdev = get_pcichild_wslot(hbus,
2144 dev_message->wslot.slot);
2146 hv_pci_eject_device(hpdev);
2148 hv_pcidev_ref_by_slot);
2153 dev_warn(&hbus->hdev->device,
2154 "Unimplemented protocol message %x\n",
2155 new_message->message_type.type);
2161 dev_err(&hbus->hdev->device,
2162 "unhandled packet type %d, tid %llx len %d\n",
2163 desc->type, req_id, bytes_recvd);
2172 * hv_pci_protocol_negotiation() - Set up protocol
2173 * @hdev: VMBus's tracking struct for this root PCI bus
2175 * This driver is intended to support running on Windows 10
2176 * (server) and later versions. It will not run on earlier
2177 * versions, as they assume that many of the operations which
2178 * Linux needs accomplished with a spinlock held were done via
2179 * asynchronous messaging via VMBus. Windows 10 increases the
2180 * surface area of PCI emulation so that these actions can take
2181 * place by suspending a virtual processor for their duration.
2183 * This function negotiates the channel protocol version,
2184 * failing if the host doesn't support the necessary protocol
2187 static int hv_pci_protocol_negotiation(struct hv_device *hdev)
2189 struct pci_version_request *version_req;
2190 struct hv_pci_compl comp_pkt;
2191 struct pci_packet *pkt;
2196 * Initiate the handshake with the host and negotiate
2197 * a version that the host can support. We start with the
2198 * highest version number and go down if the host cannot
2201 pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
2205 init_completion(&comp_pkt.host_event);
2206 pkt->completion_func = hv_pci_generic_compl;
2207 pkt->compl_ctxt = &comp_pkt;
2208 version_req = (struct pci_version_request *)&pkt->message;
2209 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
2211 for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
2212 version_req->protocol_version = pci_protocol_versions[i];
2213 ret = vmbus_sendpacket(hdev->channel, version_req,
2214 sizeof(struct pci_version_request),
2215 (unsigned long)pkt, VM_PKT_DATA_INBAND,
2216 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2218 ret = wait_for_response(hdev, &comp_pkt.host_event);
2221 dev_err(&hdev->device,
2222 "PCI Pass-through VSP failed to request version: %d",
2227 if (comp_pkt.completion_status >= 0) {
2228 pci_protocol_version = pci_protocol_versions[i];
2229 dev_info(&hdev->device,
2230 "PCI VMBus probing: Using version %#x\n",
2231 pci_protocol_version);
2235 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
2236 dev_err(&hdev->device,
2237 "PCI Pass-through VSP failed version request: %#x",
2238 comp_pkt.completion_status);
2243 reinit_completion(&comp_pkt.host_event);
2246 dev_err(&hdev->device,
2247 "PCI pass-through VSP failed to find supported version");
2256 * hv_pci_free_bridge_windows() - Release memory regions for the
2258 * @hbus: Root PCI bus, as understood by this driver
2260 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
2263 * Set the resources back to the way they looked when they
2264 * were allocated by setting IORESOURCE_BUSY again.
2267 if (hbus->low_mmio_space && hbus->low_mmio_res) {
2268 hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
2269 vmbus_free_mmio(hbus->low_mmio_res->start,
2270 resource_size(hbus->low_mmio_res));
2273 if (hbus->high_mmio_space && hbus->high_mmio_res) {
2274 hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
2275 vmbus_free_mmio(hbus->high_mmio_res->start,
2276 resource_size(hbus->high_mmio_res));
2281 * hv_pci_allocate_bridge_windows() - Allocate memory regions
2283 * @hbus: Root PCI bus, as understood by this driver
2285 * This function calls vmbus_allocate_mmio(), which is itself a
2286 * bit of a compromise. Ideally, we might change the pnp layer
2287 * in the kernel such that it comprehends either PCI devices
2288 * which are "grandchildren of ACPI," with some intermediate bus
2289 * node (in this case, VMBus) or change it such that it
2290 * understands VMBus. The pnp layer, however, has been declared
2291 * deprecated, and not subject to change.
2293 * The workaround, implemented here, is to ask VMBus to allocate
2294 * MMIO space for this bus. VMBus itself knows which ranges are
2295 * appropriate by looking at its own ACPI objects. Then, after
2296 * these ranges are claimed, they're modified to look like they
2297 * would have looked if the ACPI and pnp code had allocated
2298 * bridge windows. These descriptors have to exist in this form
2299 * in order to satisfy the code which will get invoked when the
2300 * endpoint PCI function driver calls request_mem_region() or
2301 * request_mem_region_exclusive().
2303 * Return: 0 on success, -errno on failure
2305 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
2307 resource_size_t align;
2310 if (hbus->low_mmio_space) {
2311 align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2312 ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
2313 (u64)(u32)0xffffffff,
2314 hbus->low_mmio_space,
2317 dev_err(&hbus->hdev->device,
2318 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
2319 hbus->low_mmio_space);
2323 /* Modify this resource to become a bridge window. */
2324 hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
2325 hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
2326 pci_add_resource(&hbus->resources_for_children,
2327 hbus->low_mmio_res);
2330 if (hbus->high_mmio_space) {
2331 align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
2332 ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
2334 hbus->high_mmio_space, align,
2337 dev_err(&hbus->hdev->device,
2338 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
2339 hbus->high_mmio_space);
2340 goto release_low_mmio;
2343 /* Modify this resource to become a bridge window. */
2344 hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
2345 hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
2346 pci_add_resource(&hbus->resources_for_children,
2347 hbus->high_mmio_res);
2353 if (hbus->low_mmio_res) {
2354 vmbus_free_mmio(hbus->low_mmio_res->start,
2355 resource_size(hbus->low_mmio_res));
2362 * hv_allocate_config_window() - Find MMIO space for PCI Config
2363 * @hbus: Root PCI bus, as understood by this driver
2365 * This function claims memory-mapped I/O space for accessing
2366 * configuration space for the functions on this bus.
2368 * Return: 0 on success, -errno on failure
2370 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
2375 * Set up a region of MMIO space to use for accessing configuration
2378 ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
2379 PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
2384 * vmbus_allocate_mmio() gets used for allocating both device endpoint
2385 * resource claims (those which cannot be overlapped) and the ranges
2386 * which are valid for the children of this bus, which are intended
2387 * to be overlapped by those children. Set the flag on this claim
2388 * meaning that this region can't be overlapped.
2391 hbus->mem_config->flags |= IORESOURCE_BUSY;
2396 static void hv_free_config_window(struct hv_pcibus_device *hbus)
2398 vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
2402 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
2403 * @hdev: VMBus's tracking struct for this root PCI bus
2405 * Return: 0 on success, -errno on failure
2407 static int hv_pci_enter_d0(struct hv_device *hdev)
2409 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2410 struct pci_bus_d0_entry *d0_entry;
2411 struct hv_pci_compl comp_pkt;
2412 struct pci_packet *pkt;
2416 * Tell the host that the bus is ready to use, and moved into the
2417 * powered-on state. This includes telling the host which region
2418 * of memory-mapped I/O space has been chosen for configuration space
2421 pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
2425 init_completion(&comp_pkt.host_event);
2426 pkt->completion_func = hv_pci_generic_compl;
2427 pkt->compl_ctxt = &comp_pkt;
2428 d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
2429 d0_entry->message_type.type = PCI_BUS_D0ENTRY;
2430 d0_entry->mmio_base = hbus->mem_config->start;
2432 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
2433 (unsigned long)pkt, VM_PKT_DATA_INBAND,
2434 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2436 ret = wait_for_response(hdev, &comp_pkt.host_event);
2441 if (comp_pkt.completion_status < 0) {
2442 dev_err(&hdev->device,
2443 "PCI Pass-through VSP failed D0 Entry with status %x\n",
2444 comp_pkt.completion_status);
2457 * hv_pci_query_relations() - Ask host to send list of child
2459 * @hdev: VMBus's tracking struct for this root PCI bus
2461 * Return: 0 on success, -errno on failure
2463 static int hv_pci_query_relations(struct hv_device *hdev)
2465 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2466 struct pci_message message;
2467 struct completion comp;
2470 /* Ask the host to send along the list of child devices */
2471 init_completion(&comp);
2472 if (cmpxchg(&hbus->survey_event, NULL, &comp))
2475 memset(&message, 0, sizeof(message));
2476 message.type = PCI_QUERY_BUS_RELATIONS;
2478 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
2479 0, VM_PKT_DATA_INBAND, 0);
2481 ret = wait_for_response(hdev, &comp);
2487 * hv_send_resources_allocated() - Report local resource choices
2488 * @hdev: VMBus's tracking struct for this root PCI bus
2490 * The host OS is expecting to be sent a request as a message
2491 * which contains all the resources that the device will use.
2492 * The response contains those same resources, "translated"
2493 * which is to say, the values which should be used by the
2494 * hardware, when it delivers an interrupt. (MMIO resources are
2495 * used in local terms.) This is nice for Windows, and lines up
2496 * with the FDO/PDO split, which doesn't exist in Linux. Linux
2497 * is deeply expecting to scan an emulated PCI configuration
2498 * space. So this message is sent here only to drive the state
2499 * machine on the host forward.
2501 * Return: 0 on success, -errno on failure
2503 static int hv_send_resources_allocated(struct hv_device *hdev)
2505 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2506 struct pci_resources_assigned *res_assigned;
2507 struct pci_resources_assigned2 *res_assigned2;
2508 struct hv_pci_compl comp_pkt;
2509 struct hv_pci_dev *hpdev;
2510 struct pci_packet *pkt;
2515 size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
2516 ? sizeof(*res_assigned) : sizeof(*res_assigned2);
2518 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
2524 for (wslot = 0; wslot < 256; wslot++) {
2525 hpdev = get_pcichild_wslot(hbus, wslot);
2529 memset(pkt, 0, sizeof(*pkt) + size_res);
2530 init_completion(&comp_pkt.host_event);
2531 pkt->completion_func = hv_pci_generic_compl;
2532 pkt->compl_ctxt = &comp_pkt;
2534 if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
2536 (struct pci_resources_assigned *)&pkt->message;
2537 res_assigned->message_type.type =
2538 PCI_RESOURCES_ASSIGNED;
2539 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
2542 (struct pci_resources_assigned2 *)&pkt->message;
2543 res_assigned2->message_type.type =
2544 PCI_RESOURCES_ASSIGNED2;
2545 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
2547 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
2549 ret = vmbus_sendpacket(hdev->channel, &pkt->message,
2550 size_res, (unsigned long)pkt,
2552 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2554 ret = wait_for_response(hdev, &comp_pkt.host_event);
2558 if (comp_pkt.completion_status < 0) {
2560 dev_err(&hdev->device,
2561 "resource allocated returned 0x%x",
2562 comp_pkt.completion_status);
2572 * hv_send_resources_released() - Report local resources
2574 * @hdev: VMBus's tracking struct for this root PCI bus
2576 * Return: 0 on success, -errno on failure
2578 static int hv_send_resources_released(struct hv_device *hdev)
2580 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2581 struct pci_child_message pkt;
2582 struct hv_pci_dev *hpdev;
2586 for (wslot = 0; wslot < 256; wslot++) {
2587 hpdev = get_pcichild_wslot(hbus, wslot);
2591 memset(&pkt, 0, sizeof(pkt));
2592 pkt.message_type.type = PCI_RESOURCES_RELEASED;
2593 pkt.wslot.slot = hpdev->desc.win_slot.slot;
2595 put_pcichild(hpdev, hv_pcidev_ref_by_slot);
2597 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
2598 VM_PKT_DATA_INBAND, 0);
2606 static void get_hvpcibus(struct hv_pcibus_device *hbus)
2608 atomic_inc(&hbus->remove_lock);
2611 static void put_hvpcibus(struct hv_pcibus_device *hbus)
2613 if (atomic_dec_and_test(&hbus->remove_lock))
2614 complete(&hbus->remove_event);
2618 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
2619 * @hdev: VMBus's tracking struct for this root PCI bus
2620 * @dev_id: Identifies the device itself
2622 * Return: 0 on success, -errno on failure
2624 static int hv_pci_probe(struct hv_device *hdev,
2625 const struct hv_vmbus_device_id *dev_id)
2627 struct hv_pcibus_device *hbus;
2631 * hv_pcibus_device contains the hypercall arguments for retargeting in
2632 * hv_irq_unmask(). Those must not cross a page boundary.
2634 BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
2636 hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
2639 hbus->state = hv_pcibus_init;
2642 * The PCI bus "domain" is what is called "segment" in ACPI and
2643 * other specs. Pull it from the instance ID, to get something
2644 * unique. Bytes 8 and 9 are what is used in Windows guests, so
2645 * do the same thing for consistency. Note that, since this code
2646 * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee
2647 * that (1) the only domain in use for something that looks like
2648 * a physical PCI bus (which is actually emulated by the
2649 * hypervisor) is domain 0 and (2) there will be no overlap
2650 * between domains derived from these instance IDs in the same
2653 hbus->sysdata.domain = hdev->dev_instance.b[9] |
2654 hdev->dev_instance.b[8] << 8;
2657 atomic_inc(&hbus->remove_lock);
2658 INIT_LIST_HEAD(&hbus->children);
2659 INIT_LIST_HEAD(&hbus->dr_list);
2660 INIT_LIST_HEAD(&hbus->resources_for_children);
2661 spin_lock_init(&hbus->config_lock);
2662 spin_lock_init(&hbus->device_list_lock);
2663 spin_lock_init(&hbus->retarget_msi_interrupt_lock);
2664 init_completion(&hbus->remove_event);
2665 hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
2666 hbus->sysdata.domain);
2672 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
2673 hv_pci_onchannelcallback, hbus);
2677 hv_set_drvdata(hdev, hbus);
2679 ret = hv_pci_protocol_negotiation(hdev);
2683 ret = hv_allocate_config_window(hbus);
2687 hbus->cfg_addr = ioremap(hbus->mem_config->start,
2688 PCI_CONFIG_MMIO_LENGTH);
2689 if (!hbus->cfg_addr) {
2690 dev_err(&hdev->device,
2691 "Unable to map a virtual address for config space\n");
2696 hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus);
2697 if (!hbus->sysdata.fwnode) {
2702 ret = hv_pcie_init_irq_domain(hbus);
2706 ret = hv_pci_query_relations(hdev);
2708 goto free_irq_domain;
2710 ret = hv_pci_enter_d0(hdev);
2712 goto free_irq_domain;
2714 ret = hv_pci_allocate_bridge_windows(hbus);
2716 goto free_irq_domain;
2718 ret = hv_send_resources_allocated(hdev);
2722 prepopulate_bars(hbus);
2724 hbus->state = hv_pcibus_probed;
2726 ret = create_root_hv_pci_bus(hbus);
2733 hv_pci_free_bridge_windows(hbus);
2735 irq_domain_remove(hbus->irq_domain);
2737 irq_domain_free_fwnode(hbus->sysdata.fwnode);
2739 iounmap(hbus->cfg_addr);
2741 hv_free_config_window(hbus);
2743 vmbus_close(hdev->channel);
2745 destroy_workqueue(hbus->wq);
2747 free_page((unsigned long)hbus);
2751 static void hv_pci_bus_exit(struct hv_device *hdev)
2753 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
2755 struct pci_packet teardown_packet;
2756 u8 buffer[sizeof(struct pci_message)];
2758 struct pci_bus_relations relations;
2759 struct hv_pci_compl comp_pkt;
2763 * After the host sends the RESCIND_CHANNEL message, it doesn't
2764 * access the per-channel ringbuffer any longer.
2766 if (hdev->channel->rescind)
2769 /* Delete any children which might still exist. */
2770 memset(&relations, 0, sizeof(relations));
2771 hv_pci_devices_present(hbus, &relations);
2773 ret = hv_send_resources_released(hdev);
2775 dev_err(&hdev->device,
2776 "Couldn't send resources released packet(s)\n");
2778 memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
2779 init_completion(&comp_pkt.host_event);
2780 pkt.teardown_packet.completion_func = hv_pci_generic_compl;
2781 pkt.teardown_packet.compl_ctxt = &comp_pkt;
2782 pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
2784 ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message,
2785 sizeof(struct pci_message),
2786 (unsigned long)&pkt.teardown_packet,
2788 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2790 wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
2794 * hv_pci_remove() - Remove routine for this VMBus channel
2795 * @hdev: VMBus's tracking struct for this root PCI bus
2797 * Return: 0 on success, -errno on failure
2799 static int hv_pci_remove(struct hv_device *hdev)
2801 struct hv_pcibus_device *hbus;
2803 hbus = hv_get_drvdata(hdev);
2804 if (hbus->state == hv_pcibus_installed) {
2805 /* Remove the bus from PCI's point of view. */
2806 pci_lock_rescan_remove();
2807 pci_stop_root_bus(hbus->pci_bus);
2808 hv_pci_remove_slots(hbus);
2809 pci_remove_root_bus(hbus->pci_bus);
2810 pci_unlock_rescan_remove();
2811 hbus->state = hv_pcibus_removed;
2814 hv_pci_bus_exit(hdev);
2816 vmbus_close(hdev->channel);
2818 iounmap(hbus->cfg_addr);
2819 hv_free_config_window(hbus);
2820 pci_free_resource_list(&hbus->resources_for_children);
2821 hv_pci_free_bridge_windows(hbus);
2822 irq_domain_remove(hbus->irq_domain);
2823 irq_domain_free_fwnode(hbus->sysdata.fwnode);
2825 wait_for_completion(&hbus->remove_event);
2826 destroy_workqueue(hbus->wq);
2827 free_page((unsigned long)hbus);
2831 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
2832 /* PCI Pass-through Class ID */
2833 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
2838 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
2840 static struct hv_driver hv_pci_drv = {
2842 .id_table = hv_pci_id_table,
2843 .probe = hv_pci_probe,
2844 .remove = hv_pci_remove,
2847 static void __exit exit_hv_pci_drv(void)
2849 vmbus_driver_unregister(&hv_pci_drv);
2852 static int __init init_hv_pci_drv(void)
2854 return vmbus_driver_register(&hv_pci_drv);
2857 module_init(init_hv_pci_drv);
2858 module_exit(exit_hv_pci_drv);
2860 MODULE_DESCRIPTION("Hyper-V PCI");
2861 MODULE_LICENSE("GPL v2");