1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/security.h>
5 #include <linux/debugfs.h>
6 #include <linux/mutex.h>
12 static bool cxl_raw_allow_all;
17 * Core implementation of the CXL 2.0 Type-3 Memory Device Mailbox. The
18 * implementation is used by the cxl_pci driver to initialize the device
19 * and implement the cxl_mem.h IOCTL UAPI. It also implements the
20 * backend of the cxl_pmem_ctl() transport for LIBNVDIMM.
23 #define cxl_for_each_cmd(cmd) \
24 for ((cmd) = &cxl_mem_commands[0]; \
25 ((cmd) - cxl_mem_commands) < ARRAY_SIZE(cxl_mem_commands); (cmd)++)
27 #define CXL_CMD(_id, sin, sout, _flags) \
28 [CXL_MEM_COMMAND_ID_##_id] = { \
30 .id = CXL_MEM_COMMAND_ID_##_id, \
34 .opcode = CXL_MBOX_OP_##_id, \
38 #define CXL_VARIABLE_PAYLOAD ~0U
40 * This table defines the supported mailbox commands for the driver. This table
41 * is made up of a UAPI structure. Non-negative values as parameters in the
42 * table will be validated against the user's input. For example, if size_in is
43 * 0, and the user passed in 1, it is an error.
45 static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
46 CXL_CMD(IDENTIFY, 0, 0x43, CXL_CMD_FLAG_FORCE_ENABLE),
47 #ifdef CONFIG_CXL_MEM_RAW_COMMANDS
48 CXL_CMD(RAW, CXL_VARIABLE_PAYLOAD, CXL_VARIABLE_PAYLOAD, 0),
50 CXL_CMD(GET_SUPPORTED_LOGS, 0, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
51 CXL_CMD(GET_FW_INFO, 0, 0x50, 0),
52 CXL_CMD(GET_PARTITION_INFO, 0, 0x20, 0),
53 CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0),
54 CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0),
55 CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE),
56 CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0),
57 CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0),
58 CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0),
59 CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0),
60 CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
61 CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
62 CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0),
63 CXL_CMD(INJECT_POISON, 0x8, 0, 0),
64 CXL_CMD(CLEAR_POISON, 0x48, 0, 0),
65 CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
66 CXL_CMD(SCAN_MEDIA, 0x11, 0, 0),
67 CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0),
71 * Commands that RAW doesn't permit. The rationale for each:
73 * CXL_MBOX_OP_ACTIVATE_FW: Firmware activation requires adjustment /
74 * coordination of transaction timeout values at the root bridge level.
76 * CXL_MBOX_OP_SET_PARTITION_INFO: The device memory map may change live
77 * and needs to be coordinated with HDM updates.
79 * CXL_MBOX_OP_SET_LSA: The label storage area may be cached by the
80 * driver and any writes from userspace invalidates those contents.
82 * CXL_MBOX_OP_SET_SHUTDOWN_STATE: Set shutdown state assumes no writes
83 * to the device after it is marked clean, userspace can not make that
86 * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that
87 * is kept up to date with patrol notifications and error management.
89 static u16 cxl_disabled_raw_commands[] = {
90 CXL_MBOX_OP_ACTIVATE_FW,
91 CXL_MBOX_OP_SET_PARTITION_INFO,
93 CXL_MBOX_OP_SET_SHUTDOWN_STATE,
94 CXL_MBOX_OP_SCAN_MEDIA,
95 CXL_MBOX_OP_GET_SCAN_MEDIA,
99 * Command sets that RAW doesn't permit. All opcodes in this set are
100 * disabled because they pass plain text security payloads over the
101 * user/kernel boundary. This functionality is intended to be wrapped
102 * behind the keys ABI which allows for encrypted payloads in the UAPI
104 static u8 security_command_sets[] = {
106 0x45, /* Persistent Memory Data-at-rest Security */
107 0x46, /* Security Passthrough */
110 static bool cxl_is_security_command(u16 opcode)
114 for (i = 0; i < ARRAY_SIZE(security_command_sets); i++)
115 if (security_command_sets[i] == (opcode >> 8))
120 static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
122 struct cxl_mem_command *c;
125 if (c->opcode == opcode)
131 static const char *cxl_mem_opcode_to_name(u16 opcode)
133 struct cxl_mem_command *c;
135 c = cxl_mem_find_command(opcode);
139 return cxl_command_names[c->info.id].name;
143 * cxl_mbox_send_cmd() - Send a mailbox command to a device.
144 * @cxlds: The device data for the operation
145 * @opcode: Opcode for the mailbox command.
146 * @in: The input payload for the mailbox command.
147 * @in_size: The length of the input payload
148 * @out: Caller allocated buffer for the output.
149 * @out_size: Expected size of output.
151 * Context: Any context.
153 * * %>=0 - Number of bytes returned in @out.
154 * * %-E2BIG - Payload is too large for hardware.
155 * * %-EBUSY - Couldn't acquire exclusive mailbox access.
156 * * %-EFAULT - Hardware error occurred.
157 * * %-ENXIO - Command completed, but device reported an error.
158 * * %-EIO - Unexpected output size.
160 * Mailbox commands may execute successfully yet the device itself reported an
161 * error. While this distinction can be useful for commands from userspace, the
162 * kernel will only be able to use results when both are successful.
164 int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
165 size_t in_size, void *out, size_t out_size)
167 const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
168 struct cxl_mbox_cmd mbox_cmd = {
172 .size_out = out_size,
177 if (out_size > cxlds->payload_size)
180 rc = cxlds->mbox_send(cxlds, &mbox_cmd);
184 if (mbox_cmd.return_code != CXL_MBOX_CMD_RC_SUCCESS)
185 return cxl_mbox_cmd_rc2errno(&mbox_cmd);
188 * Variable sized commands can't be validated and so it's up to the
189 * caller to do that if they wish.
191 if (cmd->info.size_out != CXL_VARIABLE_PAYLOAD) {
192 if (mbox_cmd.size_out != out_size)
197 EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
199 static bool cxl_mem_raw_command_allowed(u16 opcode)
203 if (!IS_ENABLED(CONFIG_CXL_MEM_RAW_COMMANDS))
206 if (security_locked_down(LOCKDOWN_PCI_ACCESS))
209 if (cxl_raw_allow_all)
212 if (cxl_is_security_command(opcode))
215 for (i = 0; i < ARRAY_SIZE(cxl_disabled_raw_commands); i++)
216 if (cxl_disabled_raw_commands[i] == opcode)
223 * cxl_payload_from_user_allowed() - Check contents of in_payload.
224 * @opcode: The mailbox command opcode.
225 * @payload_in: Pointer to the input payload passed in from user space.
228 * * true - payload_in passes check for @opcode.
229 * * false - payload_in contains invalid or unsupported values.
231 * The driver may inspect payload contents before sending a mailbox
232 * command from user space to the device. The intent is to reject
233 * commands with input payloads that are known to be unsafe. This
234 * check is not intended to replace the users careful selection of
235 * mailbox command parameters and makes no guarantee that the user
236 * command will succeed, nor that it is appropriate.
238 * The specific checks are determined by the opcode.
240 static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in)
243 case CXL_MBOX_OP_SET_PARTITION_INFO: {
244 struct cxl_mbox_set_partition_info *pi = payload_in;
246 if (pi->flags & CXL_SET_PARTITION_IMMEDIATE_FLAG)
256 static int cxl_mbox_cmd_ctor(struct cxl_mbox_cmd *mbox,
257 struct cxl_dev_state *cxlds, u16 opcode,
258 size_t in_size, size_t out_size, u64 in_payload)
260 *mbox = (struct cxl_mbox_cmd) {
266 mbox->payload_in = vmemdup_user(u64_to_user_ptr(in_payload),
268 if (IS_ERR(mbox->payload_in))
269 return PTR_ERR(mbox->payload_in);
271 if (!cxl_payload_from_user_allowed(opcode, mbox->payload_in)) {
272 dev_dbg(cxlds->dev, "%s: input payload not allowed\n",
273 cxl_mem_opcode_to_name(opcode));
274 kvfree(mbox->payload_in);
279 /* Prepare to handle a full payload for variable sized output */
280 if (out_size == CXL_VARIABLE_PAYLOAD)
281 mbox->size_out = cxlds->payload_size;
283 mbox->size_out = out_size;
285 if (mbox->size_out) {
286 mbox->payload_out = kvzalloc(mbox->size_out, GFP_KERNEL);
287 if (!mbox->payload_out) {
288 kvfree(mbox->payload_in);
295 static void cxl_mbox_cmd_dtor(struct cxl_mbox_cmd *mbox)
297 kvfree(mbox->payload_in);
298 kvfree(mbox->payload_out);
301 static int cxl_to_mem_cmd_raw(struct cxl_mem_command *mem_cmd,
302 const struct cxl_send_command *send_cmd,
303 struct cxl_dev_state *cxlds)
305 if (send_cmd->raw.rsvd)
309 * Unlike supported commands, the output size of RAW commands
310 * gets passed along without further checking, so it must be
313 if (send_cmd->out.size > cxlds->payload_size)
316 if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
319 dev_WARN_ONCE(cxlds->dev, true, "raw command path used\n");
321 *mem_cmd = (struct cxl_mem_command) {
323 .id = CXL_MEM_COMMAND_ID_RAW,
324 .size_in = send_cmd->in.size,
325 .size_out = send_cmd->out.size,
327 .opcode = send_cmd->raw.opcode
333 static int cxl_to_mem_cmd(struct cxl_mem_command *mem_cmd,
334 const struct cxl_send_command *send_cmd,
335 struct cxl_dev_state *cxlds)
337 struct cxl_mem_command *c = &cxl_mem_commands[send_cmd->id];
338 const struct cxl_command_info *info = &c->info;
340 if (send_cmd->flags & ~CXL_MEM_COMMAND_FLAG_MASK)
346 if (send_cmd->in.rsvd || send_cmd->out.rsvd)
349 /* Check that the command is enabled for hardware */
350 if (!test_bit(info->id, cxlds->enabled_cmds))
353 /* Check that the command is not claimed for exclusive kernel use */
354 if (test_bit(info->id, cxlds->exclusive_cmds))
357 /* Check the input buffer is the expected size */
358 if ((info->size_in != CXL_VARIABLE_PAYLOAD) &&
359 (info->size_in != send_cmd->in.size))
362 /* Check the output buffer is at least large enough */
363 if ((info->size_out != CXL_VARIABLE_PAYLOAD) &&
364 (send_cmd->out.size < info->size_out))
367 *mem_cmd = (struct cxl_mem_command) {
370 .flags = info->flags,
371 .size_in = send_cmd->in.size,
372 .size_out = send_cmd->out.size,
381 * cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
382 * @mbox_cmd: Sanitized and populated &struct cxl_mbox_cmd.
383 * @cxlds: The device data for the operation
384 * @send_cmd: &struct cxl_send_command copied in from userspace.
387 * * %0 - @out_cmd is ready to send.
388 * * %-ENOTTY - Invalid command specified.
389 * * %-EINVAL - Reserved fields or invalid values were used.
390 * * %-ENOMEM - Input or output buffer wasn't sized properly.
391 * * %-EPERM - Attempted to use a protected command.
392 * * %-EBUSY - Kernel has claimed exclusive access to this opcode
394 * The result of this command is a fully validated command in @mbox_cmd that is
395 * safe to send to the hardware.
397 static int cxl_validate_cmd_from_user(struct cxl_mbox_cmd *mbox_cmd,
398 struct cxl_dev_state *cxlds,
399 const struct cxl_send_command *send_cmd)
401 struct cxl_mem_command mem_cmd;
404 if (send_cmd->id == 0 || send_cmd->id >= CXL_MEM_COMMAND_ID_MAX)
408 * The user can never specify an input payload larger than what hardware
409 * supports, but output can be arbitrarily large (simply write out as
410 * much data as the hardware provides).
412 if (send_cmd->in.size > cxlds->payload_size)
415 /* Sanitize and construct a cxl_mem_command */
416 if (send_cmd->id == CXL_MEM_COMMAND_ID_RAW)
417 rc = cxl_to_mem_cmd_raw(&mem_cmd, send_cmd, cxlds);
419 rc = cxl_to_mem_cmd(&mem_cmd, send_cmd, cxlds);
424 /* Sanitize and construct a cxl_mbox_cmd */
425 return cxl_mbox_cmd_ctor(mbox_cmd, cxlds, mem_cmd.opcode,
426 mem_cmd.info.size_in, mem_cmd.info.size_out,
427 send_cmd->in.payload);
430 int cxl_query_cmd(struct cxl_memdev *cxlmd,
431 struct cxl_mem_query_commands __user *q)
433 struct device *dev = &cxlmd->dev;
434 struct cxl_mem_command *cmd;
438 dev_dbg(dev, "Query IOCTL\n");
440 if (get_user(n_commands, &q->n_commands))
443 /* returns the total number if 0 elements are requested. */
445 return put_user(ARRAY_SIZE(cxl_mem_commands), &q->n_commands);
448 * otherwise, return max(n_commands, total commands) cxl_command_info
451 cxl_for_each_cmd(cmd) {
452 const struct cxl_command_info *info = &cmd->info;
454 if (copy_to_user(&q->commands[j++], info, sizeof(*info)))
465 * handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
466 * @cxlds: The device data for the operation
467 * @mbox_cmd: The validated mailbox command.
468 * @out_payload: Pointer to userspace's output payload.
469 * @size_out: (Input) Max payload size to copy out.
470 * (Output) Payload size hardware generated.
471 * @retval: Hardware generated return code from the operation.
474 * * %0 - Mailbox transaction succeeded. This implies the mailbox
475 * protocol completed successfully not that the operation itself
477 * * %-ENOMEM - Couldn't allocate a bounce buffer.
478 * * %-EFAULT - Something happened with copy_to/from_user.
479 * * %-EINTR - Mailbox acquisition interrupted.
480 * * %-EXXX - Transaction level failures.
482 * Dispatches a mailbox command on behalf of a userspace request.
483 * The output payload is copied to userspace.
485 * See cxl_send_cmd().
487 static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
488 struct cxl_mbox_cmd *mbox_cmd,
489 u64 out_payload, s32 *size_out,
492 struct device *dev = cxlds->dev;
496 "Submitting %s command for user\n"
499 cxl_mem_opcode_to_name(mbox_cmd->opcode),
500 mbox_cmd->opcode, mbox_cmd->size_in);
502 rc = cxlds->mbox_send(cxlds, mbox_cmd);
507 * @size_out contains the max size that's allowed to be written back out
508 * to userspace. While the payload may have written more output than
509 * this it will have to be ignored.
511 if (mbox_cmd->size_out) {
512 dev_WARN_ONCE(dev, mbox_cmd->size_out > *size_out,
513 "Invalid return size\n");
514 if (copy_to_user(u64_to_user_ptr(out_payload),
515 mbox_cmd->payload_out, mbox_cmd->size_out)) {
521 *size_out = mbox_cmd->size_out;
522 *retval = mbox_cmd->return_code;
525 cxl_mbox_cmd_dtor(mbox_cmd);
529 int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
531 struct cxl_dev_state *cxlds = cxlmd->cxlds;
532 struct device *dev = &cxlmd->dev;
533 struct cxl_send_command send;
534 struct cxl_mbox_cmd mbox_cmd;
537 dev_dbg(dev, "Send IOCTL\n");
539 if (copy_from_user(&send, s, sizeof(send)))
542 rc = cxl_validate_cmd_from_user(&mbox_cmd, cxlmd->cxlds, &send);
546 rc = handle_mailbox_cmd_from_user(cxlds, &mbox_cmd, send.out.payload,
547 &send.out.size, &send.retval);
551 if (copy_to_user(s, &send, sizeof(send)))
557 static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
559 u32 remaining = size;
563 u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
564 struct cxl_mbox_get_log log = {
566 .offset = cpu_to_le32(offset),
567 .length = cpu_to_le32(xfer_size)
571 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
577 remaining -= xfer_size;
585 * cxl_walk_cel() - Walk through the Command Effects Log.
586 * @cxlds: The device data for the operation
587 * @size: Length of the Command Effects Log.
590 * Iterate over each entry in the CEL and determine if the driver supports the
591 * command. If so, the command is enabled for the device and can be used later.
593 static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
595 struct cxl_cel_entry *cel_entry;
596 const int cel_entries = size / sizeof(*cel_entry);
599 cel_entry = (struct cxl_cel_entry *) cel;
601 for (i = 0; i < cel_entries; i++) {
602 u16 opcode = le16_to_cpu(cel_entry[i].opcode);
603 struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
607 "Opcode 0x%04x unsupported by driver", opcode);
611 set_bit(cmd->info.id, cxlds->enabled_cmds);
615 static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
617 struct cxl_mbox_get_supported_logs *ret;
620 ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
622 return ERR_PTR(-ENOMEM);
624 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
625 cxlds->payload_size);
639 /* See CXL 2.0 Table 170. Get Log Input Payload */
640 static const uuid_t log_uuid[] = {
641 [CEL_UUID] = DEFINE_CXL_CEL_UUID,
642 [VENDOR_DEBUG_UUID] = DEFINE_CXL_VENDOR_DEBUG_UUID,
646 * cxl_enumerate_cmds() - Enumerate commands for a device.
647 * @cxlds: The device data for the operation
649 * Returns 0 if enumerate completed successfully.
651 * CXL devices have optional support for certain commands. This function will
652 * determine the set of supported commands for the hardware and update the
653 * enabled_cmds bitmap in the @cxlds.
655 int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
657 struct cxl_mbox_get_supported_logs *gsl;
658 struct device *dev = cxlds->dev;
659 struct cxl_mem_command *cmd;
662 gsl = cxl_get_gsl(cxlds);
667 for (i = 0; i < le16_to_cpu(gsl->entries); i++) {
668 u32 size = le32_to_cpu(gsl->entry[i].size);
669 uuid_t uuid = gsl->entry[i].uuid;
672 dev_dbg(dev, "Found LOG type %pU of size %d", &uuid, size);
674 if (!uuid_equal(&uuid, &log_uuid[CEL_UUID]))
677 log = kvmalloc(size, GFP_KERNEL);
683 rc = cxl_xfer_log(cxlds, &uuid, size, log);
689 cxl_walk_cel(cxlds, size, log);
692 /* In case CEL was bogus, enable some default commands. */
693 cxl_for_each_cmd(cmd)
694 if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
695 set_bit(cmd->info.id, cxlds->enabled_cmds);
697 /* Found the required CEL */
705 EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
708 * cxl_mem_get_partition_info - Get partition info
709 * @cxlds: The device data for the operation
711 * Retrieve the current partition info for the device specified. The active
712 * values are the current capacity in bytes. If not 0, the 'next' values are
713 * the pending values, in bytes, which take affect on next cold reset.
715 * Return: 0 if no error: or the result of the mailbox command.
717 * See CXL @8.2.9.5.2.1 Get Partition Info
719 static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
721 struct cxl_mbox_get_partition_info {
722 __le64 active_volatile_cap;
723 __le64 active_persistent_cap;
724 __le64 next_volatile_cap;
725 __le64 next_persistent_cap;
729 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
735 cxlds->active_volatile_bytes =
736 le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
737 cxlds->active_persistent_bytes =
738 le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
739 cxlds->next_volatile_bytes =
740 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
741 cxlds->next_persistent_bytes =
742 le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
748 * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
749 * @cxlds: The device data for the operation
751 * Return: 0 if identify was executed successfully.
753 * This will dispatch the identify command to the device and on success populate
754 * structures to be exported to sysfs.
756 int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
758 /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
759 struct cxl_mbox_identify id;
762 rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
768 le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
769 cxlds->volatile_only_bytes =
770 le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
771 cxlds->persistent_only_bytes =
772 le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
773 cxlds->partition_align_bytes =
774 le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
777 "Identify Memory Device\n"
778 " total_bytes = %#llx\n"
779 " volatile_only_bytes = %#llx\n"
780 " persistent_only_bytes = %#llx\n"
781 " partition_align_bytes = %#llx\n",
782 cxlds->total_bytes, cxlds->volatile_only_bytes,
783 cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
785 cxlds->lsa_size = le32_to_cpu(id.lsa_size);
786 memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
790 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
792 int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
796 if (cxlds->partition_align_bytes == 0) {
797 cxlds->ram_range.start = 0;
798 cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
799 cxlds->pmem_range.start = cxlds->volatile_only_bytes;
800 cxlds->pmem_range.end = cxlds->volatile_only_bytes +
801 cxlds->persistent_only_bytes - 1;
805 rc = cxl_mem_get_partition_info(cxlds);
807 dev_err(cxlds->dev, "Failed to query partition information\n");
812 "Get Partition Info\n"
813 " active_volatile_bytes = %#llx\n"
814 " active_persistent_bytes = %#llx\n"
815 " next_volatile_bytes = %#llx\n"
816 " next_persistent_bytes = %#llx\n",
817 cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
818 cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
820 cxlds->ram_range.start = 0;
821 cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
823 cxlds->pmem_range.start = cxlds->active_volatile_bytes;
824 cxlds->pmem_range.end =
825 cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
829 EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
831 struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
833 struct cxl_dev_state *cxlds;
835 cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
837 dev_err(dev, "No memory available\n");
838 return ERR_PTR(-ENOMEM);
841 mutex_init(&cxlds->mbox_mutex);
846 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
848 static struct dentry *cxl_debugfs;
850 void __init cxl_mbox_init(void)
852 struct dentry *mbox_debugfs;
854 cxl_debugfs = debugfs_create_dir("cxl", NULL);
855 mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs);
856 debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs,
860 void cxl_mbox_exit(void)
862 debugfs_remove_recursive(cxl_debugfs);