3 * Copyright (C) 2010 - 2015 UNISYS CORPORATION
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
17 #include <linux/acpi.h>
18 #include <linux/cdev.h>
19 #include <linux/ctype.h>
22 #include <linux/nls.h>
23 #include <linux/netdevice.h>
24 #include <linux/platform_device.h>
25 #include <linux/uuid.h>
26 #include <linux/crash_dump.h>
29 #include "visorbus_private.h"
30 #include "vmcallinterface.h"
32 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
34 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST 1
35 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
37 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128)
39 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET 0x00000000
41 #define UNISYS_SPAR_LEAF_ID 0x40000000
43 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
44 #define UNISYS_SPAR_ID_EBX 0x73696e55
45 #define UNISYS_SPAR_ID_ECX 0x70537379
46 #define UNISYS_SPAR_ID_EDX 0x34367261
51 static int visorchipset_major;
54 visorchipset_open(struct inode *inode, struct file *file)
56 unsigned int minor_number = iminor(inode);
60 file->private_data = NULL;
65 visorchipset_release(struct inode *inode, struct file *file)
71 * When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
72 * we switch to slow polling mode. As soon as we get a controlvm
73 * message, we switch back to fast polling mode.
75 #define MIN_IDLE_SECONDS 10
76 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
77 /* when we got our last controlvm message */
78 static unsigned long most_recent_message_jiffies;
80 struct parser_context {
81 unsigned long allocbytes;
82 unsigned long param_bytes;
84 unsigned long bytes_remaining;
89 static struct delayed_work periodic_controlvm_work;
91 static struct cdev file_cdev;
92 static struct visorchannel **file_controlvm_channel;
94 static struct visorchannel *controlvm_channel;
96 /* Manages the request payload in the controlvm channel */
97 struct visor_controlvm_payload_info {
98 u8 *ptr; /* pointer to base address of payload pool */
100 * offset from beginning of controlvm
101 * channel to beginning of payload * pool
103 u32 bytes; /* number of bytes in payload pool */
106 static struct visor_controlvm_payload_info controlvm_payload_info;
107 static unsigned long controlvm_payload_bytes_buffered;
110 * The following globals are used to handle the scenario where we are unable to
111 * offload the payload from a controlvm message due to memory requirements. In
112 * this scenario, we simply stash the controlvm message, then attempt to
113 * process it again the next time controlvm_periodic_work() runs.
115 static struct controlvm_message controlvm_pending_msg;
116 static bool controlvm_pending_msg_valid;
119 * This describes a buffer and its current state of transfer (e.g., how many
120 * bytes have already been supplied as putfile data, and how many bytes are
121 * remaining) for a putfile_request.
123 struct putfile_active_buffer {
124 /* a payload from a controlvm message, containing a file data buffer */
125 struct parser_context *parser_ctx;
126 /* points within data area of parser_ctx to next byte of data */
127 size_t bytes_remaining;
130 #define PUTFILE_REQUEST_SIG 0x0906101302281211
132 * This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
133 * conversation. Structs of this type are dynamically linked into
134 * <Putfile_request_list>.
136 struct putfile_request {
137 u64 sig; /* PUTFILE_REQUEST_SIG */
139 /* header from original TransmitFile request */
140 struct controlvm_message_header controlvm_header;
142 /* link to next struct putfile_request */
143 struct list_head next_putfile_request;
146 * head of putfile_buffer_entry list, which describes the data to be
147 * supplied as putfile data;
148 * - this list is added to when controlvm messages come in that supply
150 * - this list is removed from via the hotplug program that is actually
151 * consuming these buffers to write as file data
153 struct list_head input_buffer_list;
154 spinlock_t req_list_lock; /* lock for input_buffer_list */
156 /* waiters for input_buffer_list to go non-empty */
157 wait_queue_head_t input_buffer_wq;
159 /* data not yet read within current putfile_buffer_entry */
160 struct putfile_active_buffer active_buf;
163 * <0 = failed, 0 = in-progress, >0 = successful;
164 * note that this must be set with req_list_lock, and if you set <0,
165 * it is your responsibility to also free up all of the other objects
166 * in this struct (like input_buffer_list, active_buf.parser_ctx)
167 * before releasing the lock
169 int completion_status;
172 struct parahotplug_request {
173 struct list_head list;
175 unsigned long expiration;
176 struct controlvm_message msg;
179 /* info for /dev/visorchipset */
180 static dev_t major_dev = -1; /*< indicates major num for device */
182 /* prototypes for attributes */
183 static ssize_t toolaction_show(struct device *dev,
184 struct device_attribute *attr,
189 visorchannel_read(controlvm_channel,
190 offsetof(struct spar_controlvm_channel_protocol,
191 tool_action), &tool_action, sizeof(u8));
192 return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
195 static ssize_t toolaction_store(struct device *dev,
196 struct device_attribute *attr,
197 const char *buf, size_t count)
202 if (kstrtou8(buf, 10, &tool_action))
205 ret = visorchannel_write
207 offsetof(struct spar_controlvm_channel_protocol,
209 &tool_action, sizeof(u8));
215 static DEVICE_ATTR_RW(toolaction);
217 static ssize_t boottotool_show(struct device *dev,
218 struct device_attribute *attr,
221 struct efi_spar_indication efi_spar_indication;
223 visorchannel_read(controlvm_channel,
224 offsetof(struct spar_controlvm_channel_protocol,
225 efi_spar_ind), &efi_spar_indication,
226 sizeof(struct efi_spar_indication));
227 return scnprintf(buf, PAGE_SIZE, "%u\n",
228 efi_spar_indication.boot_to_tool);
231 static ssize_t boottotool_store(struct device *dev,
232 struct device_attribute *attr,
233 const char *buf, size_t count)
236 struct efi_spar_indication efi_spar_indication;
238 if (kstrtoint(buf, 10, &val))
241 efi_spar_indication.boot_to_tool = val;
242 ret = visorchannel_write
244 offsetof(struct spar_controlvm_channel_protocol,
245 efi_spar_ind), &(efi_spar_indication),
246 sizeof(struct efi_spar_indication));
252 static DEVICE_ATTR_RW(boottotool);
254 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
259 visorchannel_read(controlvm_channel,
260 offsetof(struct spar_controlvm_channel_protocol,
262 &error, sizeof(u32));
263 return scnprintf(buf, PAGE_SIZE, "%i\n", error);
266 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
267 const char *buf, size_t count)
272 if (kstrtou32(buf, 10, &error))
275 ret = visorchannel_write
277 offsetof(struct spar_controlvm_channel_protocol,
279 &error, sizeof(u32));
284 static DEVICE_ATTR_RW(error);
286 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
293 offsetof(struct spar_controlvm_channel_protocol,
294 installation_text_id),
295 &text_id, sizeof(u32));
296 return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
299 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
300 const char *buf, size_t count)
305 if (kstrtou32(buf, 10, &text_id))
308 ret = visorchannel_write
310 offsetof(struct spar_controlvm_channel_protocol,
311 installation_text_id),
312 &text_id, sizeof(u32));
317 static DEVICE_ATTR_RW(textid);
319 static ssize_t remaining_steps_show(struct device *dev,
320 struct device_attribute *attr, char *buf)
322 u16 remaining_steps = 0;
324 visorchannel_read(controlvm_channel,
325 offsetof(struct spar_controlvm_channel_protocol,
326 installation_remaining_steps),
327 &remaining_steps, sizeof(u16));
328 return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
331 static ssize_t remaining_steps_store(struct device *dev,
332 struct device_attribute *attr,
333 const char *buf, size_t count)
338 if (kstrtou16(buf, 10, &remaining_steps))
341 ret = visorchannel_write
343 offsetof(struct spar_controlvm_channel_protocol,
344 installation_remaining_steps),
345 &remaining_steps, sizeof(u16));
350 static DEVICE_ATTR_RW(remaining_steps);
353 parser_id_get(struct parser_context *ctx)
355 struct spar_controlvm_parameters_header *phdr = NULL;
359 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
364 * Describes the state from the perspective of which controlvm messages have
365 * been received for a bus or device.
368 enum PARSER_WHICH_STRING {
369 PARSERSTRING_INITIATOR,
371 PARSERSTRING_CONNECTION,
372 PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
376 parser_param_start(struct parser_context *ctx,
377 enum PARSER_WHICH_STRING which_string)
379 struct spar_controlvm_parameters_header *phdr = NULL;
384 phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
385 switch (which_string) {
386 case PARSERSTRING_INITIATOR:
387 ctx->curr = ctx->data + phdr->initiator_offset;
388 ctx->bytes_remaining = phdr->initiator_length;
390 case PARSERSTRING_TARGET:
391 ctx->curr = ctx->data + phdr->target_offset;
392 ctx->bytes_remaining = phdr->target_length;
394 case PARSERSTRING_CONNECTION:
395 ctx->curr = ctx->data + phdr->connection_offset;
396 ctx->bytes_remaining = phdr->connection_length;
398 case PARSERSTRING_NAME:
399 ctx->curr = ctx->data + phdr->name_offset;
400 ctx->bytes_remaining = phdr->name_length;
407 static void parser_done(struct parser_context *ctx)
411 controlvm_payload_bytes_buffered -= ctx->param_bytes;
416 parser_string_get(struct parser_context *ctx)
420 int value_length = -1;
427 nscan = ctx->bytes_remaining;
432 for (i = 0, value_length = -1; i < nscan; i++)
433 if (pscan[i] == '\0') {
437 if (value_length < 0) /* '\0' was not included in the length */
438 value_length = nscan;
439 value = kmalloc(value_length + 1, GFP_KERNEL | __GFP_NORETRY);
442 if (value_length > 0)
443 memcpy(value, pscan, value_length);
444 ((u8 *)(value))[value_length] = '\0';
448 struct visor_busdev {
453 static int match_visorbus_dev_by_id(struct device *dev, void *data)
455 struct visor_device *vdev = to_visor_device(dev);
456 struct visor_busdev *id = data;
457 u32 bus_no = id->bus_no;
458 u32 dev_no = id->dev_no;
460 if ((vdev->chipset_bus_no == bus_no) &&
461 (vdev->chipset_dev_no == dev_no))
467 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
468 struct visor_device *from)
471 struct device *dev_start = NULL;
472 struct visor_device *vdev = NULL;
473 struct visor_busdev id = {
479 dev_start = &from->device;
480 dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
481 match_visorbus_dev_by_id);
483 vdev = to_visor_device(dev);
488 controlvm_init_response(struct controlvm_message *msg,
489 struct controlvm_message_header *msg_hdr, int response)
491 memset(msg, 0, sizeof(struct controlvm_message));
492 memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
493 msg->hdr.payload_bytes = 0;
494 msg->hdr.payload_vm_offset = 0;
495 msg->hdr.payload_max_bytes = 0;
497 msg->hdr.flags.failed = 1;
498 msg->hdr.completion_status = (u32)(-response);
503 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
505 enum ultra_chipset_feature features)
507 struct controlvm_message outmsg;
509 controlvm_init_response(&outmsg, msg_hdr, response);
510 outmsg.cmd.init_chipset.features = features;
511 if (visorchannel_signalinsert(controlvm_channel,
512 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
518 chipset_init(struct controlvm_message *inmsg)
520 static int chipset_inited;
521 enum ultra_chipset_feature features = 0;
522 int rc = CONTROLVM_RESP_SUCCESS;
524 POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
525 if (chipset_inited) {
526 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
530 POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
533 * Set features to indicate we support parahotplug (if Command
537 inmsg->cmd.init_chipset.
538 features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
541 * Set the "reply" bit so Command knows this is a
542 * features-aware driver.
544 features |= ULTRA_CHIPSET_FEATURE_REPLY;
547 if (inmsg->hdr.flags.response_expected)
548 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
552 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
554 struct controlvm_message outmsg;
556 controlvm_init_response(&outmsg, msg_hdr, response);
557 if (outmsg.hdr.flags.test_message == 1)
560 if (visorchannel_signalinsert(controlvm_channel,
561 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
566 static void controlvm_respond_physdev_changestate(
567 struct controlvm_message_header *msg_hdr, int response,
568 struct spar_segment_state state)
570 struct controlvm_message outmsg;
572 controlvm_init_response(&outmsg, msg_hdr, response);
573 outmsg.cmd.device_change_state.state = state;
574 outmsg.cmd.device_change_state.flags.phys_device = 1;
575 if (visorchannel_signalinsert(controlvm_channel,
576 CONTROLVM_QUEUE_REQUEST, &outmsg)) {
581 enum crash_obj_type {
587 save_crash_message(struct controlvm_message *msg, enum crash_obj_type typ)
589 u32 local_crash_msg_offset;
590 u16 local_crash_msg_count;
592 if (visorchannel_read(controlvm_channel,
593 offsetof(struct spar_controlvm_channel_protocol,
594 saved_crash_message_count),
595 &local_crash_msg_count, sizeof(u16)) < 0) {
596 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
597 POSTCODE_SEVERITY_ERR);
601 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
602 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
603 local_crash_msg_count,
604 POSTCODE_SEVERITY_ERR);
608 if (visorchannel_read(controlvm_channel,
609 offsetof(struct spar_controlvm_channel_protocol,
610 saved_crash_message_offset),
611 &local_crash_msg_offset, sizeof(u32)) < 0) {
612 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
613 POSTCODE_SEVERITY_ERR);
617 if (typ == CRASH_BUS) {
618 if (visorchannel_write(controlvm_channel,
619 local_crash_msg_offset,
621 sizeof(struct controlvm_message)) < 0) {
622 POSTCODE_LINUX_2(SAVE_MSG_BUS_FAILURE_PC,
623 POSTCODE_SEVERITY_ERR);
627 local_crash_msg_offset += sizeof(struct controlvm_message);
628 if (visorchannel_write(controlvm_channel,
629 local_crash_msg_offset,
631 sizeof(struct controlvm_message)) < 0) {
632 POSTCODE_LINUX_2(SAVE_MSG_DEV_FAILURE_PC,
633 POSTCODE_SEVERITY_ERR);
640 bus_responder(enum controlvm_id cmd_id,
641 struct controlvm_message_header *pending_msg_hdr,
644 if (!pending_msg_hdr)
645 return; /* no controlvm response needed */
647 if (pending_msg_hdr->id != (u32)cmd_id)
650 controlvm_respond(pending_msg_hdr, response);
654 device_changestate_responder(enum controlvm_id cmd_id,
655 struct visor_device *p, int response,
656 struct spar_segment_state response_state)
658 struct controlvm_message outmsg;
659 u32 bus_no = p->chipset_bus_no;
660 u32 dev_no = p->chipset_dev_no;
662 if (!p->pending_msg_hdr)
663 return; /* no controlvm response needed */
664 if (p->pending_msg_hdr->id != cmd_id)
667 controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
669 outmsg.cmd.device_change_state.bus_no = bus_no;
670 outmsg.cmd.device_change_state.dev_no = dev_no;
671 outmsg.cmd.device_change_state.state = response_state;
673 if (visorchannel_signalinsert(controlvm_channel,
674 CONTROLVM_QUEUE_REQUEST, &outmsg))
679 device_responder(enum controlvm_id cmd_id,
680 struct controlvm_message_header *pending_msg_hdr,
683 if (!pending_msg_hdr)
684 return; /* no controlvm response needed */
686 if (pending_msg_hdr->id != (u32)cmd_id)
689 controlvm_respond(pending_msg_hdr, response);
693 bus_epilog(struct visor_device *bus_info,
694 u32 cmd, struct controlvm_message_header *msg_hdr,
695 int response, bool need_response)
697 struct controlvm_message_header *pmsg_hdr = NULL;
701 * relying on a valid passed in response code
702 * be lazy and re-use msg_hdr for this failure, is this ok??
708 if (bus_info->pending_msg_hdr) {
709 /* only non-NULL if dev is still waiting on a response */
710 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
711 pmsg_hdr = bus_info->pending_msg_hdr;
716 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
718 POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
719 bus_info->chipset_bus_no,
720 POSTCODE_SEVERITY_ERR);
724 memcpy(pmsg_hdr, msg_hdr,
725 sizeof(struct controlvm_message_header));
726 bus_info->pending_msg_hdr = pmsg_hdr;
729 if (response == CONTROLVM_RESP_SUCCESS) {
731 case CONTROLVM_BUS_CREATE:
732 chipset_bus_create(bus_info);
734 case CONTROLVM_BUS_DESTROY:
735 chipset_bus_destroy(bus_info);
741 bus_responder(cmd, pmsg_hdr, response);
745 device_epilog(struct visor_device *dev_info,
746 struct spar_segment_state state, u32 cmd,
747 struct controlvm_message_header *msg_hdr, int response,
748 bool need_response, bool for_visorbus)
750 struct controlvm_message_header *pmsg_hdr = NULL;
754 * relying on a valid passed in response code
755 * be lazy and re-use msg_hdr for this failure, is this ok??
761 if (dev_info->pending_msg_hdr) {
762 /* only non-NULL if dev is still waiting on a response */
763 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
764 pmsg_hdr = dev_info->pending_msg_hdr;
769 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
771 response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
775 memcpy(pmsg_hdr, msg_hdr,
776 sizeof(struct controlvm_message_header));
777 dev_info->pending_msg_hdr = pmsg_hdr;
782 case CONTROLVM_DEVICE_CREATE:
783 chipset_device_create(dev_info);
785 case CONTROLVM_DEVICE_CHANGESTATE:
786 /* ServerReady / ServerRunning / SegmentStateRunning */
787 if (state.alive == segment_state_running.alive &&
789 segment_state_running.operating) {
790 chipset_device_resume(dev_info);
792 /* ServerNotReady / ServerLost / SegmentStateStandby */
793 else if (state.alive == segment_state_standby.alive &&
795 segment_state_standby.operating) {
797 * technically this is standby case
798 * where server is lost
800 chipset_device_pause(dev_info);
803 case CONTROLVM_DEVICE_DESTROY:
804 chipset_device_destroy(dev_info);
810 device_responder(cmd, pmsg_hdr, response);
814 bus_create(struct controlvm_message *inmsg)
816 struct controlvm_message_packet *cmd = &inmsg->cmd;
817 u32 bus_no = cmd->create_bus.bus_no;
818 int rc = CONTROLVM_RESP_SUCCESS;
819 struct visor_device *bus_info;
820 struct visorchannel *visorchannel;
822 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
823 if (bus_info && (bus_info->state.created == 1)) {
824 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
825 POSTCODE_SEVERITY_ERR);
826 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
829 bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
831 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
832 POSTCODE_SEVERITY_ERR);
833 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
837 INIT_LIST_HEAD(&bus_info->list_all);
838 bus_info->chipset_bus_no = bus_no;
839 bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
841 POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
843 visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
844 cmd->create_bus.channel_bytes,
846 cmd->create_bus.bus_data_type_uuid);
849 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
850 POSTCODE_SEVERITY_ERR);
851 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
856 bus_info->visorchannel = visorchannel;
857 if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0)
858 save_crash_message(inmsg, CRASH_BUS);
860 POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
863 bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
864 rc, inmsg->hdr.flags.response_expected == 1);
868 bus_destroy(struct controlvm_message *inmsg)
870 struct controlvm_message_packet *cmd = &inmsg->cmd;
871 u32 bus_no = cmd->destroy_bus.bus_no;
872 struct visor_device *bus_info;
873 int rc = CONTROLVM_RESP_SUCCESS;
875 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
877 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
878 else if (bus_info->state.created == 0)
879 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
881 bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
882 rc, inmsg->hdr.flags.response_expected == 1);
884 /* bus_info is freed as part of the busdevice_release function */
888 bus_configure(struct controlvm_message *inmsg,
889 struct parser_context *parser_ctx)
891 struct controlvm_message_packet *cmd = &inmsg->cmd;
893 struct visor_device *bus_info;
894 int rc = CONTROLVM_RESP_SUCCESS;
896 bus_no = cmd->configure_bus.bus_no;
897 POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
898 POSTCODE_SEVERITY_INFO);
900 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
902 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
903 POSTCODE_SEVERITY_ERR);
904 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
905 } else if (bus_info->state.created == 0) {
906 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
907 POSTCODE_SEVERITY_ERR);
908 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
909 } else if (bus_info->pending_msg_hdr) {
910 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
911 POSTCODE_SEVERITY_ERR);
912 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
914 visorchannel_set_clientpartition
915 (bus_info->visorchannel,
916 cmd->configure_bus.guest_handle);
917 bus_info->partition_uuid = parser_id_get(parser_ctx);
918 parser_param_start(parser_ctx, PARSERSTRING_NAME);
919 bus_info->name = parser_string_get(parser_ctx);
921 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
922 POSTCODE_SEVERITY_INFO);
924 bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
925 rc, inmsg->hdr.flags.response_expected == 1);
929 my_device_create(struct controlvm_message *inmsg)
931 struct controlvm_message_packet *cmd = &inmsg->cmd;
932 u32 bus_no = cmd->create_device.bus_no;
933 u32 dev_no = cmd->create_device.dev_no;
934 struct visor_device *dev_info = NULL;
935 struct visor_device *bus_info;
936 struct visorchannel *visorchannel;
937 int rc = CONTROLVM_RESP_SUCCESS;
939 bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
941 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
942 POSTCODE_SEVERITY_ERR);
943 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
947 if (bus_info->state.created == 0) {
948 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
949 POSTCODE_SEVERITY_ERR);
950 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
954 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
955 if (dev_info && (dev_info->state.created == 1)) {
956 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
957 POSTCODE_SEVERITY_ERR);
958 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
962 dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
964 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
965 POSTCODE_SEVERITY_ERR);
966 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
970 dev_info->chipset_bus_no = bus_no;
971 dev_info->chipset_dev_no = dev_no;
972 dev_info->inst = cmd->create_device.dev_inst_uuid;
974 /* not sure where the best place to set the 'parent' */
975 dev_info->device.parent = &bus_info->device;
977 POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
978 POSTCODE_SEVERITY_INFO);
981 visorchannel_create_with_lock(cmd->create_device.channel_addr,
982 cmd->create_device.channel_bytes,
984 cmd->create_device.data_type_uuid);
987 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
988 POSTCODE_SEVERITY_ERR);
989 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
994 dev_info->visorchannel = visorchannel;
995 dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
996 if (uuid_le_cmp(cmd->create_device.data_type_uuid,
997 spar_vhba_channel_protocol_uuid) == 0)
998 save_crash_message(inmsg, CRASH_DEV);
1000 POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1001 POSTCODE_SEVERITY_INFO);
1003 device_epilog(dev_info, segment_state_running,
1004 CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1005 inmsg->hdr.flags.response_expected == 1, 1);
1009 my_device_changestate(struct controlvm_message *inmsg)
1011 struct controlvm_message_packet *cmd = &inmsg->cmd;
1012 u32 bus_no = cmd->device_change_state.bus_no;
1013 u32 dev_no = cmd->device_change_state.dev_no;
1014 struct spar_segment_state state = cmd->device_change_state.state;
1015 struct visor_device *dev_info;
1016 int rc = CONTROLVM_RESP_SUCCESS;
1018 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1020 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1021 POSTCODE_SEVERITY_ERR);
1022 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1023 } else if (dev_info->state.created == 0) {
1024 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1025 POSTCODE_SEVERITY_ERR);
1026 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1028 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1029 device_epilog(dev_info, state,
1030 CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1031 inmsg->hdr.flags.response_expected == 1, 1);
1035 my_device_destroy(struct controlvm_message *inmsg)
1037 struct controlvm_message_packet *cmd = &inmsg->cmd;
1038 u32 bus_no = cmd->destroy_device.bus_no;
1039 u32 dev_no = cmd->destroy_device.dev_no;
1040 struct visor_device *dev_info;
1041 int rc = CONTROLVM_RESP_SUCCESS;
1043 dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1045 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1046 else if (dev_info->state.created == 0)
1047 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1049 if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1050 device_epilog(dev_info, segment_state_running,
1051 CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1052 inmsg->hdr.flags.response_expected == 1, 1);
1056 * initialize_controlvm_payload_info() - init controlvm_payload_info struct
1057 * @phys_addr: the physical address of controlvm channel
1058 * @offset: the offset to payload
1059 * @bytes: the size of the payload in bytes
1060 * @info: the returning valid struct
1062 * When provided with the physical address of the controlvm channel
1063 * (phys_addr), the offset to the payload area we need to manage
1064 * (offset), and the size of this payload area (bytes), fills in the
1065 * controlvm_payload_info struct.
1067 * Return: CONTROLVM_RESP_SUCCESS for success or a negative for failure
1070 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1071 struct visor_controlvm_payload_info *info)
1076 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1078 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1079 if ((offset == 0) || (bytes == 0))
1080 return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1082 payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
1084 return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1086 info->offset = offset;
1087 info->bytes = bytes;
1088 info->ptr = payload;
1090 return CONTROLVM_RESP_SUCCESS;
1094 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1097 memunmap(info->ptr);
1100 memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1104 initialize_controlvm_payload(void)
1106 u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1107 u64 payload_offset = 0;
1108 u32 payload_bytes = 0;
1110 if (visorchannel_read(controlvm_channel,
1111 offsetof(struct spar_controlvm_channel_protocol,
1112 request_payload_offset),
1113 &payload_offset, sizeof(payload_offset)) < 0) {
1114 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1115 POSTCODE_SEVERITY_ERR);
1118 if (visorchannel_read(controlvm_channel,
1119 offsetof(struct spar_controlvm_channel_protocol,
1120 request_payload_bytes),
1121 &payload_bytes, sizeof(payload_bytes)) < 0) {
1122 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1123 POSTCODE_SEVERITY_ERR);
1126 initialize_controlvm_payload_info(phys_addr,
1127 payload_offset, payload_bytes,
1128 &controlvm_payload_info);
1132 * The general parahotplug flow works as follows. The visorchipset
1133 * driver receives a DEVICE_CHANGESTATE message from Command
1134 * specifying a physical device to enable or disable. The CONTROLVM
1135 * message handler calls parahotplug_process_message, which then adds
1136 * the message to a global list and kicks off a udev event which
1137 * causes a user level script to enable or disable the specified
1138 * device. The udev script then writes to
1139 * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1140 * to get called, at which point the appropriate CONTROLVM message is
1141 * retrieved from the list and responded to.
1144 #define PARAHOTPLUG_TIMEOUT_MS 2000
1147 * parahotplug_next_id() - generate unique int to match an outstanding CONTROLVM
1148 * message with a udev script /proc response
1150 * Return: a unique integer value
1153 parahotplug_next_id(void)
1155 static atomic_t id = ATOMIC_INIT(0);
1157 return atomic_inc_return(&id);
1161 * parahotplug_next_expiration() - returns the time (in jiffies) when a
1162 * CONTROLVM message on the list should expire
1163 * -- PARAHOTPLUG_TIMEOUT_MS in the future
1165 * Return: expected expiration time (in jiffies)
1167 static unsigned long
1168 parahotplug_next_expiration(void)
1170 return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1174 * parahotplug_request_create() - create a parahotplug_request, which is
1175 * basically a wrapper for a CONTROLVM_MESSAGE
1176 * that we can stick on a list
1177 * @msg: the message to insert in the request
1179 * Return: the request containing the provided message
1181 static struct parahotplug_request *
1182 parahotplug_request_create(struct controlvm_message *msg)
1184 struct parahotplug_request *req;
1186 req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1190 req->id = parahotplug_next_id();
1191 req->expiration = parahotplug_next_expiration();
1198 * parahotplug_request_destroy() - free a parahotplug_request
1199 * @req: the request to deallocate
1202 parahotplug_request_destroy(struct parahotplug_request *req)
1207 static LIST_HEAD(parahotplug_request_list);
1208 static DEFINE_SPINLOCK(parahotplug_request_list_lock); /* lock for above */
1211 * parahotplug_request_complete() - mark request as complete
1212 * @id: the id of the request
1213 * @active: indicates whether the request is assigned to active partition
1215 * Called from the /proc handler, which means the user script has
1216 * finished the enable/disable. Find the matching identifier, and
1217 * respond to the CONTROLVM message with success.
1219 * Return: 0 on success or -EINVAL on failure
1222 parahotplug_request_complete(int id, u16 active)
1224 struct list_head *pos;
1225 struct list_head *tmp;
1227 spin_lock(¶hotplug_request_list_lock);
1229 /* Look for a request matching "id". */
1230 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
1231 struct parahotplug_request *req =
1232 list_entry(pos, struct parahotplug_request, list);
1233 if (req->id == id) {
1235 * Found a match. Remove it from the list and
1239 spin_unlock(¶hotplug_request_list_lock);
1240 req->msg.cmd.device_change_state.state.active = active;
1241 if (req->msg.hdr.flags.response_expected)
1242 controlvm_respond_physdev_changestate(
1243 &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1244 req->msg.cmd.device_change_state.state);
1245 parahotplug_request_destroy(req);
1250 spin_unlock(¶hotplug_request_list_lock);
1255 * devicedisabled_store() - disables the hotplug device
1256 * @dev: sysfs interface variable not utilized in this function
1257 * @attr: sysfs interface variable not utilized in this function
1258 * @buf: buffer containing the device id
1259 * @count: the size of the buffer
1261 * The parahotplug/devicedisabled interface gets called by our support script
1262 * when an SR-IOV device has been shut down. The ID is passed to the script
1263 * and then passed back when the device has been removed.
1265 * Return: the size of the buffer for success or negative for error
1267 static ssize_t devicedisabled_store(struct device *dev,
1268 struct device_attribute *attr,
1269 const char *buf, size_t count)
1274 if (kstrtouint(buf, 10, &id))
1277 err = parahotplug_request_complete(id, 0);
1282 static DEVICE_ATTR_WO(devicedisabled);
1285 * deviceenabled_store() - enables the hotplug device
1286 * @dev: sysfs interface variable not utilized in this function
1287 * @attr: sysfs interface variable not utilized in this function
1288 * @buf: buffer containing the device id
1289 * @count: the size of the buffer
1291 * The parahotplug/deviceenabled interface gets called by our support script
1292 * when an SR-IOV device has been recovered. The ID is passed to the script
1293 * and then passed back when the device has been brought back up.
1295 * Return: the size of the buffer for success or negative for error
1297 static ssize_t deviceenabled_store(struct device *dev,
1298 struct device_attribute *attr,
1299 const char *buf, size_t count)
1303 if (kstrtouint(buf, 10, &id))
1306 parahotplug_request_complete(id, 1);
1309 static DEVICE_ATTR_WO(deviceenabled);
1311 static struct attribute *visorchipset_install_attrs[] = {
1312 &dev_attr_toolaction.attr,
1313 &dev_attr_boottotool.attr,
1314 &dev_attr_error.attr,
1315 &dev_attr_textid.attr,
1316 &dev_attr_remaining_steps.attr,
1320 static struct attribute_group visorchipset_install_group = {
1322 .attrs = visorchipset_install_attrs
1325 static struct attribute *visorchipset_parahotplug_attrs[] = {
1326 &dev_attr_devicedisabled.attr,
1327 &dev_attr_deviceenabled.attr,
1331 static struct attribute_group visorchipset_parahotplug_group = {
1332 .name = "parahotplug",
1333 .attrs = visorchipset_parahotplug_attrs
1336 static const struct attribute_group *visorchipset_dev_groups[] = {
1337 &visorchipset_install_group,
1338 &visorchipset_parahotplug_group,
1342 static void visorchipset_dev_release(struct device *dev)
1346 /* /sys/devices/platform/visorchipset */
1347 static struct platform_device visorchipset_platform_device = {
1348 .name = "visorchipset",
1350 .dev.groups = visorchipset_dev_groups,
1351 .dev.release = visorchipset_dev_release,
1355 * parahotplug_request_kickoff() - initiate parahotplug request
1356 * @req: the request to initiate
1358 * Cause uevent to run the user level script to do the disable/enable specified
1359 * in the parahotplug_request.
1362 parahotplug_request_kickoff(struct parahotplug_request *req)
1364 struct controlvm_message_packet *cmd = &req->msg.cmd;
1365 char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1368 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1371 sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1372 sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1373 sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1374 cmd->device_change_state.state.active);
1375 sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1376 cmd->device_change_state.bus_no);
1377 sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1378 cmd->device_change_state.dev_no >> 3);
1379 sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1380 cmd->device_change_state.dev_no & 0x7);
1382 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1387 * parahotplug_process_message() - enables or disables a PCI device by kicking
1389 * @inmsg: the message indicating whether to enable or disable
1392 parahotplug_process_message(struct controlvm_message *inmsg)
1394 struct parahotplug_request *req;
1396 req = parahotplug_request_create(inmsg);
1401 if (inmsg->cmd.device_change_state.state.active) {
1403 * For enable messages, just respond with success
1404 * right away. This is a bit of a hack, but there are
1405 * issues with the early enable messages we get (with
1406 * either the udev script not detecting that the device
1407 * is up, or not getting called at all). Fortunately
1408 * the messages that get lost don't matter anyway, as
1410 * devices are automatically enabled at
1413 parahotplug_request_kickoff(req);
1414 controlvm_respond_physdev_changestate
1416 CONTROLVM_RESP_SUCCESS,
1417 inmsg->cmd.device_change_state.state);
1418 parahotplug_request_destroy(req);
1421 * For disable messages, add the request to the
1422 * request list before kicking off the udev script. It
1423 * won't get responded to until the script has
1424 * indicated it's done.
1426 spin_lock(¶hotplug_request_list_lock);
1427 list_add_tail(&req->list, ¶hotplug_request_list);
1428 spin_unlock(¶hotplug_request_list_lock);
1430 parahotplug_request_kickoff(req);
1435 * visorchipset_chipset_ready() - sends chipset_ready action
1437 * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1439 * Return: CONTROLVM_RESP_SUCCESS
1442 visorchipset_chipset_ready(void)
1444 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1445 return CONTROLVM_RESP_SUCCESS;
1449 visorchipset_chipset_selftest(void)
1451 char env_selftest[20];
1452 char *envp[] = { env_selftest, NULL };
1454 sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1455 kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1457 return CONTROLVM_RESP_SUCCESS;
1461 * visorchipset_chipset_notready() - sends chipset_notready action
1463 * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1465 * Return: CONTROLVM_RESP_SUCCESS
1468 visorchipset_chipset_notready(void)
1470 kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1471 return CONTROLVM_RESP_SUCCESS;
1475 chipset_ready(struct controlvm_message_header *msg_hdr)
1477 int rc = visorchipset_chipset_ready();
1479 if (rc != CONTROLVM_RESP_SUCCESS)
1481 if (msg_hdr->flags.response_expected)
1482 controlvm_respond(msg_hdr, rc);
1486 chipset_selftest(struct controlvm_message_header *msg_hdr)
1488 int rc = visorchipset_chipset_selftest();
1490 if (rc != CONTROLVM_RESP_SUCCESS)
1492 if (msg_hdr->flags.response_expected)
1493 controlvm_respond(msg_hdr, rc);
1497 chipset_notready(struct controlvm_message_header *msg_hdr)
1499 int rc = visorchipset_chipset_notready();
1501 if (rc != CONTROLVM_RESP_SUCCESS)
1503 if (msg_hdr->flags.response_expected)
1504 controlvm_respond(msg_hdr, rc);
1507 static inline unsigned int
1508 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1510 struct vmcall_io_controlvm_addr_params params;
1511 int result = VMCALL_SUCCESS;
1514 physaddr = virt_to_phys(¶ms);
1515 ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1516 if (VMCALL_SUCCESSFUL(result)) {
1517 *control_addr = params.address;
1518 *control_bytes = params.channel_bytes;
1523 static u64 controlvm_get_channel_address(void)
1528 if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1535 setup_crash_devices_work_queue(struct work_struct *work)
1537 struct controlvm_message local_crash_bus_msg;
1538 struct controlvm_message local_crash_dev_msg;
1539 struct controlvm_message msg;
1540 u32 local_crash_msg_offset;
1541 u16 local_crash_msg_count;
1543 POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1545 /* send init chipset msg */
1546 msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1547 msg.cmd.init_chipset.bus_count = 23;
1548 msg.cmd.init_chipset.switch_count = 0;
1552 /* get saved message count */
1553 if (visorchannel_read(controlvm_channel,
1554 offsetof(struct spar_controlvm_channel_protocol,
1555 saved_crash_message_count),
1556 &local_crash_msg_count, sizeof(u16)) < 0) {
1557 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1558 POSTCODE_SEVERITY_ERR);
1562 if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1563 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1564 local_crash_msg_count,
1565 POSTCODE_SEVERITY_ERR);
1569 /* get saved crash message offset */
1570 if (visorchannel_read(controlvm_channel,
1571 offsetof(struct spar_controlvm_channel_protocol,
1572 saved_crash_message_offset),
1573 &local_crash_msg_offset, sizeof(u32)) < 0) {
1574 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1575 POSTCODE_SEVERITY_ERR);
1579 /* read create device message for storage bus offset */
1580 if (visorchannel_read(controlvm_channel,
1581 local_crash_msg_offset,
1582 &local_crash_bus_msg,
1583 sizeof(struct controlvm_message)) < 0) {
1584 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1585 POSTCODE_SEVERITY_ERR);
1589 /* read create device message for storage device */
1590 if (visorchannel_read(controlvm_channel,
1591 local_crash_msg_offset +
1592 sizeof(struct controlvm_message),
1593 &local_crash_dev_msg,
1594 sizeof(struct controlvm_message)) < 0) {
1595 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1596 POSTCODE_SEVERITY_ERR);
1600 /* reuse IOVM create bus message */
1601 if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1602 bus_create(&local_crash_bus_msg);
1604 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1605 POSTCODE_SEVERITY_ERR);
1609 /* reuse create device message for storage device */
1610 if (local_crash_dev_msg.cmd.create_device.channel_addr) {
1611 my_device_create(&local_crash_dev_msg);
1613 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
1614 POSTCODE_SEVERITY_ERR);
1617 POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
1621 bus_create_response(struct visor_device *bus_info, int response)
1624 bus_info->state.created = 1;
1626 bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
1629 kfree(bus_info->pending_msg_hdr);
1630 bus_info->pending_msg_hdr = NULL;
1634 bus_destroy_response(struct visor_device *bus_info, int response)
1636 bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
1639 kfree(bus_info->pending_msg_hdr);
1640 bus_info->pending_msg_hdr = NULL;
1644 device_create_response(struct visor_device *dev_info, int response)
1647 dev_info->state.created = 1;
1649 device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
1652 kfree(dev_info->pending_msg_hdr);
1653 dev_info->pending_msg_hdr = NULL;
1657 device_destroy_response(struct visor_device *dev_info, int response)
1659 device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
1662 kfree(dev_info->pending_msg_hdr);
1663 dev_info->pending_msg_hdr = NULL;
1667 device_pause_response(struct visor_device *dev_info,
1670 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1672 segment_state_standby);
1674 kfree(dev_info->pending_msg_hdr);
1675 dev_info->pending_msg_hdr = NULL;
1679 device_resume_response(struct visor_device *dev_info, int response)
1681 device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
1683 segment_state_running);
1685 kfree(dev_info->pending_msg_hdr);
1686 dev_info->pending_msg_hdr = NULL;
1690 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
1692 unsigned long physaddr = 0;
1693 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
1696 /* sv_enable_dfp(); */
1697 if (offset & (PAGE_SIZE - 1))
1698 return -ENXIO; /* need aligned offsets */
1701 case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
1702 vma->vm_flags |= VM_IO;
1703 if (!*file_controlvm_channel)
1707 (*file_controlvm_channel,
1708 offsetof(struct spar_controlvm_channel_protocol,
1709 gp_control_channel),
1710 &addr, sizeof(addr));
1714 physaddr = (unsigned long)addr;
1715 if (remap_pfn_range(vma, vma->vm_start,
1716 physaddr >> PAGE_SHIFT,
1717 vma->vm_end - vma->vm_start,
1718 /*pgprot_noncached */
1719 (vma->vm_page_prot))) {
1729 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
1731 u64 result = VMCALL_SUCCESS;
1734 ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
1739 static inline int issue_vmcall_update_physical_time(u64 adjustment)
1741 int result = VMCALL_SUCCESS;
1743 ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
1747 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
1754 case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
1755 /* get the physical rtc offset */
1756 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
1757 if (copy_to_user((void __user *)arg, &vrtc_offset,
1758 sizeof(vrtc_offset))) {
1762 case VMCALL_UPDATE_PHYSICAL_TIME:
1763 if (copy_from_user(&adjustment, (void __user *)arg,
1764 sizeof(adjustment))) {
1767 return issue_vmcall_update_physical_time(adjustment);
1773 static const struct file_operations visorchipset_fops = {
1774 .owner = THIS_MODULE,
1775 .open = visorchipset_open,
1778 .unlocked_ioctl = visorchipset_ioctl,
1779 .release = visorchipset_release,
1780 .mmap = visorchipset_mmap,
1784 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
1788 file_controlvm_channel = controlvm_channel;
1789 cdev_init(&file_cdev, &visorchipset_fops);
1790 file_cdev.owner = THIS_MODULE;
1791 if (MAJOR(major_dev) == 0) {
1792 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
1793 /* dynamic major device number registration required */
1797 /* static major device number registration required */
1798 rc = register_chrdev_region(major_dev, 1, "visorchipset");
1802 rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
1804 unregister_chrdev_region(major_dev, 1);
1811 visorchipset_file_cleanup(dev_t major_dev)
1814 cdev_del(&file_cdev);
1815 file_cdev.ops = NULL;
1816 unregister_chrdev_region(major_dev, 1);
1819 static struct parser_context *
1820 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
1822 int allocbytes = sizeof(struct parser_context) + bytes;
1823 struct parser_context *ctx;
1829 * alloc an 0 extra byte to ensure payload is
1833 if ((controlvm_payload_bytes_buffered + bytes)
1834 > MAX_CONTROLVM_PAYLOAD_BYTES) {
1839 ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
1846 ctx->allocbytes = allocbytes;
1847 ctx->param_bytes = bytes;
1849 ctx->bytes_remaining = 0;
1850 ctx->byte_stream = false;
1854 if (addr > virt_to_phys(high_memory - 1))
1855 goto err_finish_ctx;
1856 p = __va((unsigned long)(addr));
1857 memcpy(ctx->data, p, bytes);
1859 void *mapping = memremap(addr, bytes, MEMREMAP_WB);
1862 goto err_finish_ctx;
1863 memcpy(ctx->data, mapping, bytes);
1867 ctx->byte_stream = true;
1868 controlvm_payload_bytes_buffered += ctx->param_bytes;
1878 * handle_command() - process a controlvm message
1879 * @inmsg: the message to process
1880 * @channel_addr: address of the controlvm channel
1883 * false - this function will return false only in the case where the
1884 * controlvm message was NOT processed, but processing must be
1885 * retried before reading the next controlvm message; a
1886 * scenario where this can occur is when we need to throttle
1887 * the allocation of memory in which to copy out controlvm
1889 * true - processing of the controlvm message completed,
1890 * either successfully or with an error
1893 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1895 struct controlvm_message_packet *cmd = &inmsg.cmd;
1898 struct parser_context *parser_ctx = NULL;
1900 struct controlvm_message ackmsg;
1902 /* create parsing context if necessary */
1903 local_addr = (inmsg.hdr.flags.test_message == 1);
1904 if (channel_addr == 0)
1906 parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1907 parm_bytes = inmsg.hdr.payload_bytes;
1910 * Parameter and channel addresses within test messages actually lie
1911 * within our OS-controlled memory. We need to know that, because it
1912 * makes a difference in how we compute the virtual address.
1914 if (parm_addr && parm_bytes) {
1918 parser_init_byte_stream(parm_addr, parm_bytes,
1919 local_addr, &retry);
1920 if (!parser_ctx && retry)
1925 controlvm_init_response(&ackmsg, &inmsg.hdr,
1926 CONTROLVM_RESP_SUCCESS);
1927 if (controlvm_channel)
1928 visorchannel_signalinsert(controlvm_channel,
1929 CONTROLVM_QUEUE_ACK,
1932 switch (inmsg.hdr.id) {
1933 case CONTROLVM_CHIPSET_INIT:
1934 chipset_init(&inmsg);
1936 case CONTROLVM_BUS_CREATE:
1939 case CONTROLVM_BUS_DESTROY:
1940 bus_destroy(&inmsg);
1942 case CONTROLVM_BUS_CONFIGURE:
1943 bus_configure(&inmsg, parser_ctx);
1945 case CONTROLVM_DEVICE_CREATE:
1946 my_device_create(&inmsg);
1948 case CONTROLVM_DEVICE_CHANGESTATE:
1949 if (cmd->device_change_state.flags.phys_device) {
1950 parahotplug_process_message(&inmsg);
1953 * save the hdr and cmd structures for later use
1954 * when sending back the response to Command
1956 my_device_changestate(&inmsg);
1960 case CONTROLVM_DEVICE_DESTROY:
1961 my_device_destroy(&inmsg);
1963 case CONTROLVM_DEVICE_CONFIGURE:
1964 /* no op for now, just send a respond that we passed */
1965 if (inmsg.hdr.flags.response_expected)
1966 controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1968 case CONTROLVM_CHIPSET_READY:
1969 chipset_ready(&inmsg.hdr);
1971 case CONTROLVM_CHIPSET_SELFTEST:
1972 chipset_selftest(&inmsg.hdr);
1974 case CONTROLVM_CHIPSET_STOP:
1975 chipset_notready(&inmsg.hdr);
1978 if (inmsg.hdr.flags.response_expected)
1981 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1986 parser_done(parser_ctx);
1993 * read_controlvm_event() - retreives the next message from the
1994 * CONTROLVM_QUEUE_EVENT queue in the controlvm
1996 * @msg: pointer to the retrieved message
1998 * Return: true if a valid message was retrieved or false otherwise
2001 read_controlvm_event(struct controlvm_message *msg)
2003 if (!visorchannel_signalremove(controlvm_channel,
2004 CONTROLVM_QUEUE_EVENT, msg)) {
2006 if (msg->hdr.flags.test_message == 1)
2014 * parahotplug_process_list() - remove any request from the list that's been on
2015 * there too long and respond with an error
2018 parahotplug_process_list(void)
2020 struct list_head *pos;
2021 struct list_head *tmp;
2023 spin_lock(¶hotplug_request_list_lock);
2025 list_for_each_safe(pos, tmp, ¶hotplug_request_list) {
2026 struct parahotplug_request *req =
2027 list_entry(pos, struct parahotplug_request, list);
2029 if (!time_after_eq(jiffies, req->expiration))
2033 if (req->msg.hdr.flags.response_expected)
2034 controlvm_respond_physdev_changestate(
2036 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
2037 req->msg.cmd.device_change_state.state);
2038 parahotplug_request_destroy(req);
2041 spin_unlock(¶hotplug_request_list_lock);
2045 controlvm_periodic_work(struct work_struct *work)
2047 struct controlvm_message inmsg;
2048 bool got_command = false;
2049 bool handle_command_failed = false;
2051 while (!visorchannel_signalremove(controlvm_channel,
2052 CONTROLVM_QUEUE_RESPONSE,
2056 if (controlvm_pending_msg_valid) {
2058 * we throttled processing of a prior
2059 * msg, so try to process it again
2060 * rather than reading a new one
2062 inmsg = controlvm_pending_msg;
2063 controlvm_pending_msg_valid = false;
2066 got_command = read_controlvm_event(&inmsg);
2070 handle_command_failed = false;
2071 while (got_command && (!handle_command_failed)) {
2072 most_recent_message_jiffies = jiffies;
2073 if (handle_command(inmsg,
2074 visorchannel_get_physaddr
2075 (controlvm_channel)))
2076 got_command = read_controlvm_event(&inmsg);
2079 * this is a scenario where throttling
2080 * is required, but probably NOT an
2081 * error...; we stash the current
2082 * controlvm msg so we will attempt to
2083 * reprocess it on our next loop
2085 handle_command_failed = true;
2086 controlvm_pending_msg = inmsg;
2087 controlvm_pending_msg_valid = true;
2091 /* parahotplug_worker */
2092 parahotplug_process_list();
2094 if (time_after(jiffies,
2095 most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
2097 * it's been longer than MIN_IDLE_SECONDS since we
2098 * processed our last controlvm message; slow down the
2101 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
2102 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2104 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
2105 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2108 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
2112 visorchipset_init(struct acpi_device *acpi_device)
2116 uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2118 addr = controlvm_get_channel_address();
2122 memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2124 controlvm_channel = visorchannel_create_with_lock(addr, 0,
2126 if (!controlvm_channel)
2129 if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2130 visorchannel_get_header(controlvm_channel))) {
2131 initialize_controlvm_payload();
2133 goto error_destroy_channel;
2136 major_dev = MKDEV(visorchipset_major, 0);
2137 err = visorchipset_file_init(major_dev, &controlvm_channel);
2139 goto error_destroy_payload;
2141 /* if booting in a crash kernel */
2142 if (is_kdump_kernel())
2143 INIT_DELAYED_WORK(&periodic_controlvm_work,
2144 setup_crash_devices_work_queue);
2146 INIT_DELAYED_WORK(&periodic_controlvm_work,
2147 controlvm_periodic_work);
2149 most_recent_message_jiffies = jiffies;
2150 poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2151 schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
2153 visorchipset_platform_device.dev.devt = major_dev;
2154 if (platform_device_register(&visorchipset_platform_device) < 0) {
2155 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2157 goto error_cancel_work;
2159 POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2161 err = visorbus_init();
2163 goto error_unregister;
2168 platform_device_unregister(&visorchipset_platform_device);
2171 cancel_delayed_work_sync(&periodic_controlvm_work);
2172 visorchipset_file_cleanup(major_dev);
2174 error_destroy_payload:
2175 destroy_controlvm_payload_info(&controlvm_payload_info);
2177 error_destroy_channel:
2178 visorchannel_destroy(controlvm_channel);
2181 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
2186 visorchipset_exit(struct acpi_device *acpi_device)
2188 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2192 cancel_delayed_work_sync(&periodic_controlvm_work);
2193 destroy_controlvm_payload_info(&controlvm_payload_info);
2195 visorchannel_destroy(controlvm_channel);
2197 visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2198 platform_device_unregister(&visorchipset_platform_device);
2199 POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2204 static const struct acpi_device_id unisys_device_ids[] = {
2209 static struct acpi_driver unisys_acpi_driver = {
2210 .name = "unisys_acpi",
2211 .class = "unisys_acpi_class",
2212 .owner = THIS_MODULE,
2213 .ids = unisys_device_ids,
2215 .add = visorchipset_init,
2216 .remove = visorchipset_exit,
2220 MODULE_DEVICE_TABLE(acpi, unisys_device_ids);
2222 static __init uint32_t visorutil_spar_detect(void)
2224 unsigned int eax, ebx, ecx, edx;
2226 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2228 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2229 return (ebx == UNISYS_SPAR_ID_EBX) &&
2230 (ecx == UNISYS_SPAR_ID_ECX) &&
2231 (edx == UNISYS_SPAR_ID_EDX);
2237 static int init_unisys(void)
2241 if (!visorutil_spar_detect())
2244 result = acpi_bus_register_driver(&unisys_acpi_driver);
2248 pr_info("Unisys Visorchipset Driver Loaded.\n");
2252 static void exit_unisys(void)
2254 acpi_bus_unregister_driver(&unisys_acpi_driver);
2257 module_param_named(major, visorchipset_major, int, S_IRUGO);
2258 MODULE_PARM_DESC(visorchipset_major,
2259 "major device number to use for the device node");
2261 module_init(init_unisys);
2262 module_exit(exit_unisys);
2264 MODULE_AUTHOR("Unisys");
2265 MODULE_LICENSE("GPL");
2266 MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses");