1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. All rights reserved. */
3 #include <linux/io-64-nonatomic-lo-hi.h>
4 #include <linux/moduleparam.h>
5 #include <linux/module.h>
6 #include <linux/delay.h>
7 #include <linux/sizes.h>
8 #include <linux/mutex.h>
9 #include <linux/list.h>
10 #include <linux/pci.h>
11 #include <linux/aer.h>
21 * This implements the PCI exclusive functionality for a CXL device as it is
22 * defined by the Compute Express Link specification. CXL devices may surface
23 * certain functionality even if it isn't CXL enabled. While this driver is
24 * focused around the PCI specific aspects of a CXL device, it binds to the
25 * specific CXL memory device class code, and therefore the implementation of
26 * cxl_pci is focused around CXL memory devices.
28 * The driver has several responsibilities, mainly:
29 * - Create the memX device and register on the CXL bus.
30 * - Enumerate device's register interface and map them.
31 * - Registers nvdimm bridge device with cxl_core.
32 * - Registers a CXL mailbox with cxl_core.
35 #define cxl_doorbell_busy(cxlds) \
36 (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
37 CXLDEV_MBOX_CTRL_DOORBELL)
39 /* CXL 2.0 - 8.2.8.4 */
40 #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
43 * CXL 2.0 ECN "Add Mailbox Ready Time" defines a capability field to
44 * dictate how long to wait for the mailbox to become ready. The new
45 * field allows the device to tell software the amount of time to wait
46 * before mailbox ready. This field per the spec theoretically allows
47 * for up to 255 seconds. 255 seconds is unreasonably long, its longer
48 * than the maximum SATA port link recovery wait. Default to 60 seconds
49 * until someone builds a CXL device that needs more time in practice.
51 static unsigned short mbox_ready_timeout = 60;
52 module_param(mbox_ready_timeout, ushort, 0644);
53 MODULE_PARM_DESC(mbox_ready_timeout, "seconds to wait for mailbox ready");
55 static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
57 const unsigned long start = jiffies;
58 unsigned long end = start;
60 while (cxl_doorbell_busy(cxlds)) {
63 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
64 /* Check again in case preempted before timeout test */
65 if (!cxl_doorbell_busy(cxlds))
72 dev_dbg(cxlds->dev, "Doorbell wait took %dms",
73 jiffies_to_msecs(end) - jiffies_to_msecs(start));
77 #define cxl_err(dev, status, msg) \
78 dev_err_ratelimited(dev, msg ", device state %s%s\n", \
79 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
80 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
82 #define cxl_cmd_err(dev, cmd, status, msg) \
83 dev_err_ratelimited(dev, msg " (opcode: %#x), device state %s%s\n", \
85 status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
86 status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
89 * Threaded irq dev_id's must be globally unique. cxl_dev_id provides a unique
90 * wrapper object for each irq within the same cxlds.
93 struct cxl_dev_state *cxlds;
96 static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
97 irq_handler_t thread_fn)
99 struct device *dev = cxlds->dev;
100 struct cxl_dev_id *dev_id;
102 dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
105 dev_id->cxlds = cxlds;
107 return devm_request_threaded_irq(dev, irq, NULL, thread_fn,
108 IRQF_SHARED | IRQF_ONESHOT, NULL,
112 static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
116 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
117 return FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg) == 100;
120 static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
124 struct cxl_dev_id *dev_id = id;
125 struct cxl_dev_state *cxlds = dev_id->cxlds;
126 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
128 if (!cxl_mbox_background_complete(cxlds))
131 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
132 opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
133 if (opcode == CXL_MBOX_OP_SANITIZE) {
134 mutex_lock(&mds->mbox_mutex);
135 if (mds->security.sanitize_node)
136 mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
137 mutex_unlock(&mds->mbox_mutex);
139 /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
140 rcuwait_wake_up(&mds->mbox_wait);
147 * Sanitization operation polling mode.
149 static void cxl_mbox_sanitize_work(struct work_struct *work)
151 struct cxl_memdev_state *mds =
152 container_of(work, typeof(*mds), security.poll_dwork.work);
153 struct cxl_dev_state *cxlds = &mds->cxlds;
155 mutex_lock(&mds->mbox_mutex);
156 if (cxl_mbox_background_complete(cxlds)) {
157 mds->security.poll_tmo_secs = 0;
158 if (mds->security.sanitize_node)
159 sysfs_notify_dirent(mds->security.sanitize_node);
160 mds->security.sanitize_active = false;
162 dev_dbg(cxlds->dev, "Sanitization operation ended\n");
164 int timeout = mds->security.poll_tmo_secs + 10;
166 mds->security.poll_tmo_secs = min(15 * 60, timeout);
167 schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
169 mutex_unlock(&mds->mbox_mutex);
173 * __cxl_pci_mbox_send_cmd() - Execute a mailbox command
174 * @mds: The memory device driver data
175 * @mbox_cmd: Command to send to the memory device.
177 * Context: Any context. Expects mbox_mutex to be held.
178 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
179 * Caller should check the return code in @mbox_cmd to make sure it
182 * This is a generic form of the CXL mailbox send command thus only using the
183 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
184 * devices, and perhaps other types of CXL devices may have further information
185 * available upon error conditions. Driver facilities wishing to send mailbox
186 * commands should use the wrapper command.
188 * The CXL spec allows for up to two mailboxes. The intention is for the primary
189 * mailbox to be OS controlled and the secondary mailbox to be used by system
190 * firmware. This allows the OS and firmware to communicate with the device and
191 * not need to coordinate with each other. The driver only uses the primary
194 static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
195 struct cxl_mbox_cmd *mbox_cmd)
197 struct cxl_dev_state *cxlds = &mds->cxlds;
198 void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
199 struct device *dev = cxlds->dev;
200 u64 cmd_reg, status_reg;
204 lockdep_assert_held(&mds->mbox_mutex);
207 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
208 * 1. Caller reads MB Control Register to verify doorbell is clear
209 * 2. Caller writes Command Register
210 * 3. Caller writes Command Payload Registers if input payload is non-empty
211 * 4. Caller writes MB Control Register to set doorbell
212 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
213 * 6. Caller reads MB Status Register to fetch Return code
214 * 7. If command successful, Caller reads Command Register to get Payload Length
215 * 8. If output payload is non-empty, host reads Command Payload Registers
217 * Hardware is free to do whatever it wants before the doorbell is rung,
218 * and isn't allowed to change anything after it clears the doorbell. As
219 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
220 * also happen in any order (though some orders might not make sense).
224 if (cxl_doorbell_busy(cxlds)) {
226 readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
228 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status,
229 "mailbox queue busy");
234 * With sanitize polling, hardware might be done and the poller still
235 * not be in sync. Ensure no new command comes in until so. Keep the
236 * hardware semantics and only allow device health status.
238 if (mds->security.poll_tmo_secs > 0) {
239 if (mbox_cmd->opcode != CXL_MBOX_OP_GET_HEALTH_INFO)
243 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
245 if (mbox_cmd->size_in) {
246 if (WARN_ON(!mbox_cmd->payload_in))
249 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
251 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
255 writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
258 dev_dbg(dev, "Sending command: 0x%04x\n", mbox_cmd->opcode);
259 writel(CXLDEV_MBOX_CTRL_DOORBELL,
260 cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
263 rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
264 if (rc == -ETIMEDOUT) {
265 u64 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
267 cxl_cmd_err(cxlds->dev, mbox_cmd, md_status, "mailbox timeout");
272 status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
273 mbox_cmd->return_code =
274 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
277 * Handle the background command in a synchronous manner.
279 * All other mailbox commands will serialize/queue on the mbox_mutex,
280 * which we currently hold. Furthermore this also guarantees that
281 * cxl_mbox_background_complete() checks are safe amongst each other,
282 * in that no new bg operation can occur in between.
284 * Background operations are timesliced in accordance with the nature
285 * of the command. In the event of timeout, the mailbox state is
286 * indeterminate until the next successful command submission and the
287 * driver can get back in sync with the hardware state.
289 if (mbox_cmd->return_code == CXL_MBOX_CMD_RC_BACKGROUND) {
294 * Sanitization is a special case which monopolizes the device
295 * and cannot be timesliced. Handle asynchronously instead,
296 * and allow userspace to poll(2) for completion.
298 if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
299 if (mds->security.sanitize_active)
302 /* give first timeout a second */
304 mds->security.poll_tmo_secs = timeout;
305 mds->security.sanitize_active = true;
306 schedule_delayed_work(&mds->security.poll_dwork,
308 dev_dbg(dev, "Sanitization operation started\n");
312 dev_dbg(dev, "Mailbox background operation (0x%04x) started\n",
315 timeout = mbox_cmd->poll_interval_ms;
316 for (i = 0; i < mbox_cmd->poll_count; i++) {
317 if (rcuwait_wait_event_timeout(&mds->mbox_wait,
318 cxl_mbox_background_complete(cxlds),
319 TASK_UNINTERRUPTIBLE,
320 msecs_to_jiffies(timeout)) > 0)
324 if (!cxl_mbox_background_complete(cxlds)) {
325 dev_err(dev, "timeout waiting for background (%d ms)\n",
326 timeout * mbox_cmd->poll_count);
330 bg_status_reg = readq(cxlds->regs.mbox +
331 CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
332 mbox_cmd->return_code =
333 FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_RC_MASK,
336 "Mailbox background operation (0x%04x) completed\n",
340 if (mbox_cmd->return_code != CXL_MBOX_CMD_RC_SUCCESS) {
341 dev_dbg(dev, "Mailbox operation had an error: %s\n",
342 cxl_mbox_cmd_rc2str(mbox_cmd));
343 return 0; /* completed but caller must check return_code */
348 cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
349 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
352 if (out_len && mbox_cmd->payload_out) {
354 * Sanitize the copy. If hardware misbehaves, out_len per the
355 * spec can actually be greater than the max allowed size (21
356 * bits available but spec defined 1M max). The caller also may
357 * have requested less data than the hardware supplied even
362 n = min3(mbox_cmd->size_out, mds->payload_size, out_len);
363 memcpy_fromio(mbox_cmd->payload_out, payload, n);
364 mbox_cmd->size_out = n;
366 mbox_cmd->size_out = 0;
372 static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
373 struct cxl_mbox_cmd *cmd)
377 mutex_lock_io(&mds->mbox_mutex);
378 rc = __cxl_pci_mbox_send_cmd(mds, cmd);
379 mutex_unlock(&mds->mbox_mutex);
384 static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
386 struct cxl_dev_state *cxlds = &mds->cxlds;
387 const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
388 struct device *dev = cxlds->dev;
389 unsigned long timeout;
394 timeout = jiffies + mbox_ready_timeout * HZ;
396 md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
397 if (md_status & CXLMDEV_MBOX_IF_READY)
399 if (msleep_interruptible(100))
401 } while (!time_after(jiffies, timeout));
403 if (!(md_status & CXLMDEV_MBOX_IF_READY)) {
404 cxl_err(dev, md_status, "timeout awaiting mailbox ready");
409 * A command may be in flight from a previous driver instance,
410 * think kexec, do one doorbell wait so that
411 * __cxl_pci_mbox_send_cmd() can assume that it is the only
412 * source for future doorbell busy events.
414 if (cxl_pci_mbox_wait_for_doorbell(cxlds) != 0) {
415 cxl_err(dev, md_status, "timeout awaiting mailbox idle");
419 mds->mbox_send = cxl_pci_mbox_send;
421 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
424 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
426 * If the size is too small, mandatory commands will not work and so
427 * there's no point in going forward. If the size is too large, there's
428 * no harm is soft limiting it.
430 mds->payload_size = min_t(size_t, mds->payload_size, SZ_1M);
431 if (mds->payload_size < 256) {
432 dev_err(dev, "Mailbox is too small (%zub)",
437 dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
439 rcuwait_init(&mds->mbox_wait);
440 INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
442 /* background command interrupts are optional */
443 if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
446 msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
447 irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
451 if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq))
454 dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
455 /* enable background command mbox irq support */
456 ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
457 ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
458 writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
464 * Assume that any RCIEP that emits the CXL memory expander class code
467 static bool is_cxl_restricted(struct pci_dev *pdev)
469 return pci_pcie_type(pdev) == PCI_EXP_TYPE_RC_END;
472 static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
473 struct cxl_register_map *map)
475 struct cxl_port *port;
476 struct cxl_dport *dport;
477 resource_size_t component_reg_phys;
479 *map = (struct cxl_register_map) {
481 .resource = CXL_RESOURCE_NONE,
484 port = cxl_pci_find_port(pdev, &dport);
486 return -EPROBE_DEFER;
488 component_reg_phys = cxl_rcd_component_reg_phys(&pdev->dev, dport);
490 put_device(&port->dev);
492 if (component_reg_phys == CXL_RESOURCE_NONE)
495 map->resource = component_reg_phys;
496 map->reg_type = CXL_REGLOC_RBI_COMPONENT;
497 map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
502 static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
503 struct cxl_register_map *map)
507 rc = cxl_find_regblock(pdev, type, map);
510 * If the Register Locator DVSEC does not exist, check if it
511 * is an RCH and try to extract the Component Registers from
514 if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev))
515 rc = cxl_rcrb_get_comp_regs(pdev, map);
520 return cxl_setup_regs(map);
523 static int cxl_pci_ras_unmask(struct pci_dev *pdev)
525 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
527 u32 orig_val, val, mask;
531 if (!cxlds->regs.ras) {
532 dev_dbg(&pdev->dev, "No RAS registers.\n");
536 /* BIOS has PCIe AER error control */
537 if (!pcie_aer_is_native(pdev))
540 rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
544 if (cap & PCI_EXP_DEVCTL_URRE) {
545 addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_MASK_OFFSET;
546 orig_val = readl(addr);
548 mask = CXL_RAS_UNCORRECTABLE_MASK_MASK |
549 CXL_RAS_UNCORRECTABLE_MASK_F256B_MASK;
550 val = orig_val & ~mask;
553 "Uncorrectable RAS Errors Mask: %#x -> %#x\n",
557 if (cap & PCI_EXP_DEVCTL_CERE) {
558 addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_MASK_OFFSET;
559 orig_val = readl(addr);
560 val = orig_val & ~CXL_RAS_CORRECTABLE_MASK_MASK;
562 dev_dbg(&pdev->dev, "Correctable RAS Errors Mask: %#x -> %#x\n",
569 static void free_event_buf(void *buf)
575 * There is a single buffer for reading event logs from the mailbox. All logs
576 * share this buffer protected by the mds->event_log_lock.
578 static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
580 struct cxl_get_event_payload *buf;
582 buf = kvmalloc(mds->payload_size, GFP_KERNEL);
585 mds->event.buf = buf;
587 return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
590 static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
595 * Per CXL 3.0 3.1.1 CXL.io Endpoint a function on a CXL device must
596 * not generate INTx messages if that function participates in
597 * CXL.cache or CXL.mem.
599 * Additionally pci_alloc_irq_vectors() handles calling
600 * pci_free_irq_vectors() automatically despite not being called
601 * pcim_*. See pci_setup_msi_context().
603 nvecs = pci_alloc_irq_vectors(pdev, 1, CXL_PCI_DEFAULT_MAX_VECTORS,
604 PCI_IRQ_MSIX | PCI_IRQ_MSI);
606 dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
612 static irqreturn_t cxl_event_thread(int irq, void *id)
614 struct cxl_dev_id *dev_id = id;
615 struct cxl_dev_state *cxlds = dev_id->cxlds;
616 struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
621 * CXL 3.0 8.2.8.3.1: The lower 32 bits are the status;
622 * ignore the reserved upper 32 bits
624 status = readl(cxlds->regs.status + CXLDEV_DEV_EVENT_STATUS_OFFSET);
625 /* Ignore logs unknown to the driver */
626 status &= CXLDEV_EVENT_STATUS_ALL;
629 cxl_mem_get_event_records(mds, status);
636 static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
638 struct pci_dev *pdev = to_pci_dev(cxlds->dev);
641 if (FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting) != CXL_INT_MSI_MSIX)
644 irq = pci_irq_vector(pdev,
645 FIELD_GET(CXLDEV_EVENT_INT_MSGNUM_MASK, setting));
649 return cxl_request_irq(cxlds, irq, cxl_event_thread);
652 static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
653 struct cxl_event_interrupt_policy *policy)
655 struct cxl_mbox_cmd mbox_cmd = {
656 .opcode = CXL_MBOX_OP_GET_EVT_INT_POLICY,
657 .payload_out = policy,
658 .size_out = sizeof(*policy),
662 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
664 dev_err(mds->cxlds.dev,
665 "Failed to get event interrupt policy : %d", rc);
670 static int cxl_event_config_msgnums(struct cxl_memdev_state *mds,
671 struct cxl_event_interrupt_policy *policy)
673 struct cxl_mbox_cmd mbox_cmd;
676 *policy = (struct cxl_event_interrupt_policy) {
677 .info_settings = CXL_INT_MSI_MSIX,
678 .warn_settings = CXL_INT_MSI_MSIX,
679 .failure_settings = CXL_INT_MSI_MSIX,
680 .fatal_settings = CXL_INT_MSI_MSIX,
683 mbox_cmd = (struct cxl_mbox_cmd) {
684 .opcode = CXL_MBOX_OP_SET_EVT_INT_POLICY,
685 .payload_in = policy,
686 .size_in = sizeof(*policy),
689 rc = cxl_internal_send_cmd(mds, &mbox_cmd);
691 dev_err(mds->cxlds.dev, "Failed to set event interrupt policy : %d",
696 /* Retrieve final interrupt settings */
697 return cxl_event_get_int_policy(mds, policy);
700 static int cxl_event_irqsetup(struct cxl_memdev_state *mds)
702 struct cxl_dev_state *cxlds = &mds->cxlds;
703 struct cxl_event_interrupt_policy policy;
706 rc = cxl_event_config_msgnums(mds, &policy);
710 rc = cxl_event_req_irq(cxlds, policy.info_settings);
712 dev_err(cxlds->dev, "Failed to get interrupt for event Info log\n");
716 rc = cxl_event_req_irq(cxlds, policy.warn_settings);
718 dev_err(cxlds->dev, "Failed to get interrupt for event Warn log\n");
722 rc = cxl_event_req_irq(cxlds, policy.failure_settings);
724 dev_err(cxlds->dev, "Failed to get interrupt for event Failure log\n");
728 rc = cxl_event_req_irq(cxlds, policy.fatal_settings);
730 dev_err(cxlds->dev, "Failed to get interrupt for event Fatal log\n");
737 static bool cxl_event_int_is_fw(u8 setting)
739 u8 mode = FIELD_GET(CXLDEV_EVENT_INT_MODE_MASK, setting);
741 return mode == CXL_INT_FW;
744 static int cxl_event_config(struct pci_host_bridge *host_bridge,
745 struct cxl_memdev_state *mds)
747 struct cxl_event_interrupt_policy policy;
751 * When BIOS maintains CXL error reporting control, it will process
752 * event records. Only one agent can do so.
754 if (!host_bridge->native_cxl_error)
757 rc = cxl_mem_alloc_event_buf(mds);
761 rc = cxl_event_get_int_policy(mds, &policy);
765 if (cxl_event_int_is_fw(policy.info_settings) ||
766 cxl_event_int_is_fw(policy.warn_settings) ||
767 cxl_event_int_is_fw(policy.failure_settings) ||
768 cxl_event_int_is_fw(policy.fatal_settings)) {
769 dev_err(mds->cxlds.dev,
770 "FW still in control of Event Logs despite _OSC settings\n");
774 rc = cxl_event_irqsetup(mds);
778 cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
783 static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
785 struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
786 struct cxl_memdev_state *mds;
787 struct cxl_dev_state *cxlds;
788 struct cxl_register_map map;
789 struct cxl_memdev *cxlmd;
790 int i, rc, pmu_count;
793 * Double check the anonymous union trickery in struct cxl_regs
794 * FIXME switch to struct_group()
796 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
797 offsetof(struct cxl_regs, device_regs.memdev));
799 rc = pcim_enable_device(pdev);
802 pci_set_master(pdev);
804 mds = cxl_memdev_state_create(&pdev->dev);
808 pci_set_drvdata(pdev, cxlds);
810 cxlds->rcd = is_cxl_restricted(pdev);
811 cxlds->serial = pci_get_dsn(pdev);
812 cxlds->cxl_dvsec = pci_find_dvsec_capability(
813 pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE);
814 if (!cxlds->cxl_dvsec)
816 "Device DVSEC not present, skip CXL.mem init\n");
818 rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
822 rc = cxl_map_device_regs(&map, &cxlds->regs.device_regs);
827 * If the component registers can't be found, the cxl_pci driver may
828 * still be useful for management functions so don't return an error.
830 rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT,
833 dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
834 else if (!cxlds->reg_map.component_map.ras.valid)
835 dev_dbg(&pdev->dev, "RAS registers not found\n");
837 rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component,
838 BIT(CXL_CM_CAP_CAP_ID_RAS));
840 dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
842 rc = cxl_await_media_ready(cxlds);
844 cxlds->media_ready = true;
846 dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
848 rc = cxl_alloc_irq_vectors(pdev);
852 rc = cxl_pci_setup_mailbox(mds);
856 rc = cxl_enumerate_cmds(mds);
860 rc = cxl_set_timestamp(mds);
864 rc = cxl_poison_state_init(mds);
868 rc = cxl_dev_state_identify(mds);
872 rc = cxl_mem_create_range_info(mds);
876 cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
878 return PTR_ERR(cxlmd);
880 rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
884 rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
888 pmu_count = cxl_count_regblock(pdev, CXL_REGLOC_RBI_PMU);
889 for (i = 0; i < pmu_count; i++) {
890 struct cxl_pmu_regs pmu_regs;
892 rc = cxl_find_regblock_instance(pdev, CXL_REGLOC_RBI_PMU, &map, i);
894 dev_dbg(&pdev->dev, "Could not find PMU regblock\n");
898 rc = cxl_map_pmu_regs(&map, &pmu_regs);
900 dev_dbg(&pdev->dev, "Could not map PMU regs\n");
904 rc = devm_cxl_pmu_add(cxlds->dev, &pmu_regs, cxlmd->id, i, CXL_PMU_MEMDEV);
906 dev_dbg(&pdev->dev, "Could not add PMU instance\n");
911 rc = cxl_event_config(host_bridge, mds);
915 rc = cxl_pci_ras_unmask(pdev);
917 dev_dbg(&pdev->dev, "No RAS reporting unmasked\n");
919 pci_save_state(pdev);
924 static const struct pci_device_id cxl_mem_pci_tbl[] = {
925 /* PCI class code for CXL.mem Type-3 Devices */
926 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
927 { /* terminate list */ },
929 MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
931 static pci_ers_result_t cxl_slot_reset(struct pci_dev *pdev)
933 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
934 struct cxl_memdev *cxlmd = cxlds->cxlmd;
935 struct device *dev = &cxlmd->dev;
937 dev_info(&pdev->dev, "%s: restart CXL.mem after slot reset\n",
939 pci_restore_state(pdev);
940 if (device_attach(dev) <= 0)
941 return PCI_ERS_RESULT_DISCONNECT;
942 return PCI_ERS_RESULT_RECOVERED;
945 static void cxl_error_resume(struct pci_dev *pdev)
947 struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
948 struct cxl_memdev *cxlmd = cxlds->cxlmd;
949 struct device *dev = &cxlmd->dev;
951 dev_info(&pdev->dev, "%s: error resume %s\n", dev_name(dev),
952 dev->driver ? "successful" : "failed");
955 static const struct pci_error_handlers cxl_error_handlers = {
956 .error_detected = cxl_error_detected,
957 .slot_reset = cxl_slot_reset,
958 .resume = cxl_error_resume,
959 .cor_error_detected = cxl_cor_error_detected,
962 static struct pci_driver cxl_pci_driver = {
963 .name = KBUILD_MODNAME,
964 .id_table = cxl_mem_pci_tbl,
965 .probe = cxl_pci_probe,
966 .err_handler = &cxl_error_handlers,
968 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
972 MODULE_LICENSE("GPL v2");
973 module_pci_driver(cxl_pci_driver);
974 MODULE_IMPORT_NS(CXL);