1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "papr-scm: " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/ioport.h>
9 #include <linux/slab.h>
10 #include <linux/ndctl.h>
11 #include <linux/sched.h>
12 #include <linux/libnvdimm.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
15 #include <linux/seq_buf.h>
18 #include <asm/plpar_wrappers.h>
19 #include <asm/papr_pdsm.h>
21 #include <asm/unaligned.h>
22 #include <linux/perf_event.h>
24 #define BIND_ANY_ADDR (~0ul)
26 #define PAPR_SCM_DIMM_CMD_MASK \
27 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
28 (1ul << ND_CMD_GET_CONFIG_DATA) | \
29 (1ul << ND_CMD_SET_CONFIG_DATA) | \
32 /* DIMM health bitmap bitmap indicators */
33 /* SCM device is unable to persist memory contents */
34 #define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
35 /* SCM device failed to persist memory contents */
36 #define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
37 /* SCM device contents are persisted from previous IPL */
38 #define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
39 /* SCM device contents are not persisted from previous IPL */
40 #define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
41 /* SCM device memory life remaining is critically low */
42 #define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
43 /* SCM device will be garded off next IPL due to failure */
44 #define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
45 /* SCM contents cannot persist due to current platform health status */
46 #define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
47 /* SCM device is unable to persist memory contents in certain conditions */
48 #define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
49 /* SCM device is encrypted */
50 #define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
51 /* SCM device has been scrubbed and locked */
52 #define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
54 /* Bits status indicators for health bitmap indicating unarmed dimm */
55 #define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
56 PAPR_PMEM_HEALTH_UNHEALTHY)
58 /* Bits status indicators for health bitmap indicating unflushed dimm */
59 #define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
61 /* Bits status indicators for health bitmap indicating unrestored dimm */
62 #define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
64 /* Bit status indicators for smart event notification */
65 #define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
66 PAPR_PMEM_HEALTH_FATAL | \
67 PAPR_PMEM_HEALTH_UNHEALTHY)
69 #define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS)
70 #define PAPR_SCM_PERF_STATS_VERSION 0x1
72 /* Struct holding a single performance metric */
73 struct papr_scm_perf_stat {
78 /* Struct exchanged between kernel and PHYP for fetching drc perf stats */
79 struct papr_scm_perf_stats {
81 /* Should be PAPR_SCM_PERF_STATS_VERSION */
83 /* Number of stats following */
84 __be32 num_statistics;
85 /* zero or more performance matrics */
86 struct papr_scm_perf_stat scm_statistic[];
89 /* private struct associated with each region */
90 struct papr_scm_priv {
91 struct platform_device *pdev;
92 struct device_node *dn;
98 bool hcall_flush_required;
102 struct nvdimm_bus_descriptor bus_desc;
103 struct nvdimm_bus *bus;
104 struct nvdimm *nvdimm;
106 struct nd_region *region;
107 struct nd_interleave_set nd_set;
108 struct list_head region_list;
110 /* Protect dimm health data from concurrent read/writes */
111 struct mutex health_mutex;
113 /* Last time the health information of the dimm was updated */
114 unsigned long lasthealth_jiffies;
116 /* Health information for the dimm */
119 /* Holds the last known dirty shutdown counter value */
120 u64 dirty_shutdown_counter;
122 /* length of the stat buffer as expected by phyp */
123 size_t stat_buffer_len;
125 /* The bits which needs to be overridden */
126 u64 health_bitmap_inject_mask;
128 /* array to have event_code and stat_id mappings */
129 u8 *nvdimm_events_map;
132 static int papr_scm_pmem_flush(struct nd_region *nd_region,
133 struct bio *bio __maybe_unused)
135 struct papr_scm_priv *p = nd_region_provider_data(nd_region);
136 unsigned long ret_buf[PLPAR_HCALL_BUFSIZE], token = 0;
139 dev_dbg(&p->pdev->dev, "flush drc 0x%x", p->drc_index);
142 rc = plpar_hcall(H_SCM_FLUSH, ret_buf, p->drc_index, token);
145 /* Check if we are stalled for some time */
146 if (H_IS_LONG_BUSY(rc)) {
147 msleep(get_longbusy_msecs(rc));
149 } else if (rc == H_BUSY) {
152 } while (rc == H_BUSY);
155 dev_err(&p->pdev->dev, "flush error: %ld", rc);
158 dev_dbg(&p->pdev->dev, "flush drc 0x%x complete", p->drc_index);
164 static LIST_HEAD(papr_nd_regions);
165 static DEFINE_MUTEX(papr_ndr_lock);
167 static int drc_pmem_bind(struct papr_scm_priv *p)
169 unsigned long ret[PLPAR_HCALL_BUFSIZE];
175 * When the hypervisor cannot map all the requested memory in a single
176 * hcall it returns H_BUSY and we call again with the token until
177 * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
178 * leave the system in an undefined state, so we wait.
183 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
184 p->blocks, BIND_ANY_ADDR, token);
189 } while (rc == H_BUSY);
194 p->bound_addr = saved;
195 dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n",
196 p->drc_index, (unsigned long)saved);
200 static void drc_pmem_unbind(struct papr_scm_priv *p)
202 unsigned long ret[PLPAR_HCALL_BUFSIZE];
206 dev_dbg(&p->pdev->dev, "unbind drc 0x%x\n", p->drc_index);
208 /* NB: unbind has the same retry requirements as drc_pmem_bind() */
211 /* Unbind of all SCM resources associated with drcIndex */
212 rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
213 p->drc_index, token);
216 /* Check if we are stalled for some time */
217 if (H_IS_LONG_BUSY(rc)) {
218 msleep(get_longbusy_msecs(rc));
220 } else if (rc == H_BUSY) {
224 } while (rc == H_BUSY);
227 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
229 dev_dbg(&p->pdev->dev, "unbind drc 0x%x complete\n",
235 static int drc_pmem_query_n_bind(struct papr_scm_priv *p)
237 unsigned long start_addr;
238 unsigned long end_addr;
239 unsigned long ret[PLPAR_HCALL_BUFSIZE];
243 rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
249 /* Make sure the full region is bound. */
250 rc = plpar_hcall(H_SCM_QUERY_BLOCK_MEM_BINDING, ret,
251 p->drc_index, p->blocks - 1);
256 if ((end_addr - start_addr) != ((p->blocks - 1) * p->block_size))
259 p->bound_addr = start_addr;
260 dev_dbg(&p->pdev->dev, "bound drc 0x%x to 0x%lx\n", p->drc_index, start_addr);
264 dev_info(&p->pdev->dev,
265 "Failed to query, trying an unbind followed by bind");
267 return drc_pmem_bind(p);
271 * Query the Dimm performance stats from PHYP and copy them (if returned) to
272 * provided struct papr_scm_perf_stats instance 'stats' that can hold atleast
273 * (num_stats + header) bytes.
274 * - If buff_stats == NULL the return value is the size in bytes of the buffer
275 * needed to hold all supported performance-statistics.
276 * - If buff_stats != NULL and num_stats == 0 then we copy all known
277 * performance-statistics to 'buff_stat' and expect to be large enough to
279 * - if buff_stats != NULL and num_stats > 0 then copy the requested
280 * performance-statistics to buff_stats.
282 static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
283 struct papr_scm_perf_stats *buff_stats,
284 unsigned int num_stats)
286 unsigned long ret[PLPAR_HCALL_BUFSIZE];
290 /* Setup the out buffer */
292 memcpy(buff_stats->eye_catcher,
293 PAPR_SCM_PERF_STATS_EYECATCHER, 8);
294 buff_stats->stats_version =
295 cpu_to_be32(PAPR_SCM_PERF_STATS_VERSION);
296 buff_stats->num_statistics =
297 cpu_to_be32(num_stats);
300 * Calculate the buffer size based on num-stats provided
301 * or use the prefetched max buffer length
304 /* Calculate size from the num_stats */
305 size = sizeof(struct papr_scm_perf_stats) +
306 num_stats * sizeof(struct papr_scm_perf_stat);
308 size = p->stat_buffer_len;
310 /* In case of no out buffer ignore the size */
314 /* Do the HCALL asking PHYP for info */
315 rc = plpar_hcall(H_SCM_PERFORMANCE_STATS, ret, p->drc_index,
316 buff_stats ? virt_to_phys(buff_stats) : 0,
319 /* Check if the error was due to an unknown stat-id */
320 if (rc == H_PARTIAL) {
321 dev_err(&p->pdev->dev,
322 "Unknown performance stats, Err:0x%016lX\n", ret[0]);
324 } else if (rc == H_AUTHORITY) {
325 dev_info(&p->pdev->dev,
326 "Permission denied while accessing performance stats");
328 } else if (rc == H_UNSUPPORTED) {
329 dev_dbg(&p->pdev->dev, "Performance stats unsupported\n");
331 } else if (rc != H_SUCCESS) {
332 dev_err(&p->pdev->dev,
333 "Failed to query performance stats, Err:%lld\n", rc);
337 /* Handle case where stat buffer size was requested */
338 dev_dbg(&p->pdev->dev,
339 "Performance stats size %ld\n", ret[0]);
343 /* Successfully fetched the requested stats from phyp */
344 dev_dbg(&p->pdev->dev,
345 "Performance stats returned %d stats\n",
346 be32_to_cpu(buff_stats->num_statistics));
350 #ifdef CONFIG_PERF_EVENTS
351 #define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
353 static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
355 struct papr_scm_perf_stat *stat;
356 struct papr_scm_perf_stats *stats;
357 struct papr_scm_priv *p = (struct papr_scm_priv *)dev->driver_data;
360 /* Allocate request buffer enough to hold single performance stat */
361 size = sizeof(struct papr_scm_perf_stats) +
362 sizeof(struct papr_scm_perf_stat);
364 if (!p || !p->nvdimm_events_map)
367 stats = kzalloc(size, GFP_KERNEL);
371 stat = &stats->scm_statistic[0];
372 memcpy(&stat->stat_id,
373 &p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)],
374 sizeof(stat->stat_id));
377 rc = drc_pmem_query_stats(p, stats, 1);
383 *count = be64_to_cpu(stat->stat_val);
388 static int papr_scm_pmu_event_init(struct perf_event *event)
390 struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
391 struct papr_scm_priv *p;
396 /* test the event attr type for PMU enumeration */
397 if (event->attr.type != event->pmu->type)
400 /* it does not support event sampling mode */
401 if (is_sampling_event(event))
404 /* no branch sampling */
405 if (has_branch_stack(event))
408 p = (struct papr_scm_priv *)nd_pmu->dev->driver_data;
412 /* Invalid eventcode */
413 if (event->attr.config == 0 || event->attr.config > 16)
419 static int papr_scm_pmu_add(struct perf_event *event, int flags)
423 struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
428 if (flags & PERF_EF_START) {
429 rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &count);
433 local64_set(&event->hw.prev_count, count);
439 static void papr_scm_pmu_read(struct perf_event *event)
443 struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
448 rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &now);
452 prev = local64_xchg(&event->hw.prev_count, now);
453 local64_add(now - prev, &event->count);
456 static void papr_scm_pmu_del(struct perf_event *event, int flags)
458 papr_scm_pmu_read(event);
461 static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu)
463 struct papr_scm_perf_stat *stat;
464 struct papr_scm_perf_stats *stats;
465 u32 available_events;
468 if (!p->stat_buffer_len)
471 available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
472 / sizeof(struct papr_scm_perf_stat);
473 if (available_events == 0)
476 /* Allocate the buffer for phyp where stats are written */
477 stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
483 /* Called to get list of events supported */
484 rc = drc_pmem_query_stats(p, stats, 0);
489 * Allocate memory and populate nvdimm_event_map.
490 * Allocate an extra element for NULL entry
492 p->nvdimm_events_map = kcalloc(available_events + 1,
493 sizeof(stat->stat_id),
495 if (!p->nvdimm_events_map) {
500 /* Copy all stat_ids to event map */
501 for (index = 0, stat = stats->scm_statistic;
502 index < available_events; index++, ++stat) {
503 memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)],
504 &stat->stat_id, sizeof(stat->stat_id));
511 static void papr_scm_pmu_register(struct papr_scm_priv *p)
513 struct nvdimm_pmu *nd_pmu;
516 nd_pmu = kzalloc(sizeof(*nd_pmu), GFP_KERNEL);
522 rc = papr_scm_pmu_check_events(p, nd_pmu);
524 goto pmu_check_events_err;
526 nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
527 nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
528 nd_pmu->pmu.event_init = papr_scm_pmu_event_init;
529 nd_pmu->pmu.read = papr_scm_pmu_read;
530 nd_pmu->pmu.add = papr_scm_pmu_add;
531 nd_pmu->pmu.del = papr_scm_pmu_del;
533 nd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_INTERRUPT |
534 PERF_PMU_CAP_NO_EXCLUDE;
536 /*updating the cpumask variable */
537 nodeid = numa_map_to_online_node(dev_to_node(&p->pdev->dev));
538 nd_pmu->arch_cpumask = *cpumask_of_node(nodeid);
540 rc = register_nvdimm_pmu(nd_pmu, p->pdev);
542 goto pmu_register_err;
545 * Set archdata.priv value to nvdimm_pmu structure, to handle the
546 * unregistering of pmu device.
548 p->pdev->archdata.priv = nd_pmu;
552 kfree(p->nvdimm_events_map);
553 pmu_check_events_err:
556 dev_info(&p->pdev->dev, "nvdimm pmu didn't register rc=%d\n", rc);
560 static void papr_scm_pmu_register(struct papr_scm_priv *p) { }
564 * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
565 * health information.
567 static int __drc_pmem_query_health(struct papr_scm_priv *p)
569 unsigned long ret[PLPAR_HCALL_BUFSIZE];
573 /* issue the hcall */
574 rc = plpar_hcall(H_SCM_HEALTH, ret, p->drc_index);
576 bitmap = ret[0] & ret[1];
577 else if (rc == H_FUNCTION)
578 dev_info_once(&p->pdev->dev,
579 "Hcall H_SCM_HEALTH not implemented, assuming empty health bitmap");
582 dev_err(&p->pdev->dev,
583 "Failed to query health information, Err:%ld\n", rc);
587 p->lasthealth_jiffies = jiffies;
588 /* Allow injecting specific health bits via inject mask. */
589 if (p->health_bitmap_inject_mask)
590 bitmap = (bitmap & ~p->health_bitmap_inject_mask) |
591 p->health_bitmap_inject_mask;
592 WRITE_ONCE(p->health_bitmap, bitmap);
593 dev_dbg(&p->pdev->dev,
594 "Queried dimm health info. Bitmap:0x%016lx Mask:0x%016lx\n",
600 /* Min interval in seconds for assuming stable dimm health */
601 #define MIN_HEALTH_QUERY_INTERVAL 60
603 /* Query cached health info and if needed call drc_pmem_query_health */
604 static int drc_pmem_query_health(struct papr_scm_priv *p)
606 unsigned long cache_timeout;
609 /* Protect concurrent modifications to papr_scm_priv */
610 rc = mutex_lock_interruptible(&p->health_mutex);
614 /* Jiffies offset for which the health data is assumed to be same */
615 cache_timeout = p->lasthealth_jiffies +
616 msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
618 /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
619 if (time_after(jiffies, cache_timeout))
620 rc = __drc_pmem_query_health(p);
622 /* Assume cached health data is valid */
625 mutex_unlock(&p->health_mutex);
629 static int papr_scm_meta_get(struct papr_scm_priv *p,
630 struct nd_cmd_get_config_data_hdr *hdr)
632 unsigned long data[PLPAR_HCALL_BUFSIZE];
633 unsigned long offset, data_offset;
637 if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
640 for (len = hdr->in_length; len; len -= read) {
642 data_offset = hdr->in_length - len;
643 offset = hdr->in_offset + data_offset;
654 ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
657 if (ret == H_PARAMETER) /* bad DRC index */
660 return -EINVAL; /* other invalid parameter */
664 *(uint64_t *)(hdr->out_buf + data_offset) = be64_to_cpu(data[0]);
667 *(uint32_t *)(hdr->out_buf + data_offset) = be32_to_cpu(data[0] & 0xffffffff);
671 *(uint16_t *)(hdr->out_buf + data_offset) = be16_to_cpu(data[0] & 0xffff);
675 *(uint8_t *)(hdr->out_buf + data_offset) = (data[0] & 0xff);
682 static int papr_scm_meta_set(struct papr_scm_priv *p,
683 struct nd_cmd_set_config_hdr *hdr)
685 unsigned long offset, data_offset;
691 if ((hdr->in_offset + hdr->in_length) > p->metadata_size)
694 for (len = hdr->in_length; len; len -= wrote) {
696 data_offset = hdr->in_length - len;
697 offset = hdr->in_offset + data_offset;
700 data = *(uint64_t *)(hdr->in_buf + data_offset);
701 data_be = cpu_to_be64(data);
703 } else if (len >= 4) {
704 data = *(uint32_t *)(hdr->in_buf + data_offset);
706 data_be = cpu_to_be32(data);
708 } else if (len >= 2) {
709 data = *(uint16_t *)(hdr->in_buf + data_offset);
711 data_be = cpu_to_be16(data);
714 data_be = *(uint8_t *)(hdr->in_buf + data_offset);
719 ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, p->drc_index,
720 offset, data_be, wrote);
721 if (ret == H_PARAMETER) /* bad DRC index */
724 return -EINVAL; /* other invalid parameter */
731 * Do a sanity checks on the inputs args to dimm-control function and return
732 * '0' if valid. Validation of PDSM payloads happens later in
733 * papr_scm_service_pdsm.
735 static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
736 unsigned int buf_len)
738 unsigned long cmd_mask = PAPR_SCM_DIMM_CMD_MASK;
739 struct nd_cmd_pkg *nd_cmd;
740 struct papr_scm_priv *p;
743 /* Only dimm-specific calls are supported atm */
747 /* get the provider data from struct nvdimm */
748 p = nvdimm_provider_data(nvdimm);
750 if (!test_bit(cmd, &cmd_mask)) {
751 dev_dbg(&p->pdev->dev, "Unsupported cmd=%u\n", cmd);
755 /* For CMD_CALL verify pdsm request */
756 if (cmd == ND_CMD_CALL) {
757 /* Verify the envelope and envelop size */
759 buf_len < (sizeof(struct nd_cmd_pkg) + ND_PDSM_HDR_SIZE)) {
760 dev_dbg(&p->pdev->dev, "Invalid pkg size=%u\n",
765 /* Verify that the nd_cmd_pkg.nd_family is correct */
766 nd_cmd = (struct nd_cmd_pkg *)buf;
768 if (nd_cmd->nd_family != NVDIMM_FAMILY_PAPR) {
769 dev_dbg(&p->pdev->dev, "Invalid pkg family=0x%llx\n",
774 pdsm = (enum papr_pdsm)nd_cmd->nd_command;
776 /* Verify if the pdsm command is valid */
777 if (pdsm <= PAPR_PDSM_MIN || pdsm >= PAPR_PDSM_MAX) {
778 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid PDSM\n",
783 /* Have enough space to hold returned 'nd_pkg_pdsm' header */
784 if (nd_cmd->nd_size_out < ND_PDSM_HDR_SIZE) {
785 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid payload\n",
791 /* Let the command be further processed */
795 static int papr_pdsm_fuel_gauge(struct papr_scm_priv *p,
796 union nd_pdsm_payload *payload)
800 struct papr_scm_perf_stat *stat;
801 struct papr_scm_perf_stats *stats;
803 /* Silently fail if fetching performance metrics isn't supported */
804 if (!p->stat_buffer_len)
807 /* Allocate request buffer enough to hold single performance stat */
808 size = sizeof(struct papr_scm_perf_stats) +
809 sizeof(struct papr_scm_perf_stat);
811 stats = kzalloc(size, GFP_KERNEL);
815 stat = &stats->scm_statistic[0];
816 memcpy(&stat->stat_id, "MemLife ", sizeof(stat->stat_id));
819 /* Fetch the fuel gauge and populate it in payload */
820 rc = drc_pmem_query_stats(p, stats, 1);
822 dev_dbg(&p->pdev->dev, "Err(%d) fetching fuel gauge\n", rc);
826 statval = be64_to_cpu(stat->stat_val);
827 dev_dbg(&p->pdev->dev,
828 "Fetched fuel-gauge %llu", statval);
829 payload->health.extension_flags |=
830 PDSM_DIMM_HEALTH_RUN_GAUGE_VALID;
831 payload->health.dimm_fuel_gauge = statval;
833 rc = sizeof(struct nd_papr_pdsm_health);
840 /* Add the dirty-shutdown-counter value to the pdsm */
841 static int papr_pdsm_dsc(struct papr_scm_priv *p,
842 union nd_pdsm_payload *payload)
844 payload->health.extension_flags |= PDSM_DIMM_DSC_VALID;
845 payload->health.dimm_dsc = p->dirty_shutdown_counter;
847 return sizeof(struct nd_papr_pdsm_health);
850 /* Fetch the DIMM health info and populate it in provided package. */
851 static int papr_pdsm_health(struct papr_scm_priv *p,
852 union nd_pdsm_payload *payload)
856 /* Ensure dimm health mutex is taken preventing concurrent access */
857 rc = mutex_lock_interruptible(&p->health_mutex);
861 /* Always fetch upto date dimm health data ignoring cached values */
862 rc = __drc_pmem_query_health(p);
864 mutex_unlock(&p->health_mutex);
868 /* update health struct with various flags derived from health bitmap */
869 payload->health = (struct nd_papr_pdsm_health) {
870 .extension_flags = 0,
871 .dimm_unarmed = !!(p->health_bitmap & PAPR_PMEM_UNARMED_MASK),
872 .dimm_bad_shutdown = !!(p->health_bitmap & PAPR_PMEM_BAD_SHUTDOWN_MASK),
873 .dimm_bad_restore = !!(p->health_bitmap & PAPR_PMEM_BAD_RESTORE_MASK),
874 .dimm_scrubbed = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
875 .dimm_locked = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
876 .dimm_encrypted = !!(p->health_bitmap & PAPR_PMEM_ENCRYPTED),
877 .dimm_health = PAPR_PDSM_DIMM_HEALTHY,
880 /* Update field dimm_health based on health_bitmap flags */
881 if (p->health_bitmap & PAPR_PMEM_HEALTH_FATAL)
882 payload->health.dimm_health = PAPR_PDSM_DIMM_FATAL;
883 else if (p->health_bitmap & PAPR_PMEM_HEALTH_CRITICAL)
884 payload->health.dimm_health = PAPR_PDSM_DIMM_CRITICAL;
885 else if (p->health_bitmap & PAPR_PMEM_HEALTH_UNHEALTHY)
886 payload->health.dimm_health = PAPR_PDSM_DIMM_UNHEALTHY;
888 /* struct populated hence can release the mutex now */
889 mutex_unlock(&p->health_mutex);
891 /* Populate the fuel gauge meter in the payload */
892 papr_pdsm_fuel_gauge(p, payload);
893 /* Populate the dirty-shutdown-counter field */
894 papr_pdsm_dsc(p, payload);
896 rc = sizeof(struct nd_papr_pdsm_health);
902 /* Inject a smart error Add the dirty-shutdown-counter value to the pdsm */
903 static int papr_pdsm_smart_inject(struct papr_scm_priv *p,
904 union nd_pdsm_payload *payload)
907 u32 supported_flags = 0;
908 u64 inject_mask = 0, clear_mask = 0;
911 /* Check for individual smart error flags and update inject/clear masks */
912 if (payload->smart_inject.flags & PDSM_SMART_INJECT_HEALTH_FATAL) {
913 supported_flags |= PDSM_SMART_INJECT_HEALTH_FATAL;
914 if (payload->smart_inject.fatal_enable)
915 inject_mask |= PAPR_PMEM_HEALTH_FATAL;
917 clear_mask |= PAPR_PMEM_HEALTH_FATAL;
920 if (payload->smart_inject.flags & PDSM_SMART_INJECT_BAD_SHUTDOWN) {
921 supported_flags |= PDSM_SMART_INJECT_BAD_SHUTDOWN;
922 if (payload->smart_inject.unsafe_shutdown_enable)
923 inject_mask |= PAPR_PMEM_SHUTDOWN_DIRTY;
925 clear_mask |= PAPR_PMEM_SHUTDOWN_DIRTY;
928 dev_dbg(&p->pdev->dev, "[Smart-inject] inject_mask=%#llx clear_mask=%#llx\n",
929 inject_mask, clear_mask);
931 /* Prevent concurrent access to dimm health bitmap related members */
932 rc = mutex_lock_interruptible(&p->health_mutex);
936 /* Use inject/clear masks to set health_bitmap_inject_mask */
937 mask = READ_ONCE(p->health_bitmap_inject_mask);
938 mask = (mask & ~clear_mask) | inject_mask;
939 WRITE_ONCE(p->health_bitmap_inject_mask, mask);
941 /* Invalidate cached health bitmap */
942 p->lasthealth_jiffies = 0;
944 mutex_unlock(&p->health_mutex);
946 /* Return the supported flags back to userspace */
947 payload->smart_inject.flags = supported_flags;
949 return sizeof(struct nd_papr_pdsm_health);
953 * 'struct pdsm_cmd_desc'
954 * Identifies supported PDSMs' expected length of in/out payloads
955 * and pdsm service function.
957 * size_in : Size of input payload if any in the PDSM request.
958 * size_out : Size of output payload if any in the PDSM request.
959 * service : Service function for the PDSM request. Return semantics:
960 * rc < 0 : Error servicing PDSM and rc indicates the error.
961 * rc >=0 : Serviced successfully and 'rc' indicate number of
962 * bytes written to payload.
964 struct pdsm_cmd_desc {
967 int (*service)(struct papr_scm_priv *dimm,
968 union nd_pdsm_payload *payload);
971 /* Holds all supported PDSMs' command descriptors */
972 static const struct pdsm_cmd_desc __pdsm_cmd_descriptors[] = {
978 /* New PDSM command descriptors to be added below */
980 [PAPR_PDSM_HEALTH] = {
982 .size_out = sizeof(struct nd_papr_pdsm_health),
983 .service = papr_pdsm_health,
986 [PAPR_PDSM_SMART_INJECT] = {
987 .size_in = sizeof(struct nd_papr_pdsm_smart_inject),
988 .size_out = sizeof(struct nd_papr_pdsm_smart_inject),
989 .service = papr_pdsm_smart_inject,
999 /* Given a valid pdsm cmd return its command descriptor else return NULL */
1000 static inline const struct pdsm_cmd_desc *pdsm_cmd_desc(enum papr_pdsm cmd)
1002 if (cmd >= 0 || cmd < ARRAY_SIZE(__pdsm_cmd_descriptors))
1003 return &__pdsm_cmd_descriptors[cmd];
1009 * For a given pdsm request call an appropriate service function.
1010 * Returns errors if any while handling the pdsm command package.
1012 static int papr_scm_service_pdsm(struct papr_scm_priv *p,
1013 struct nd_cmd_pkg *pkg)
1015 /* Get the PDSM header and PDSM command */
1016 struct nd_pkg_pdsm *pdsm_pkg = (struct nd_pkg_pdsm *)pkg->nd_payload;
1017 enum papr_pdsm pdsm = (enum papr_pdsm)pkg->nd_command;
1018 const struct pdsm_cmd_desc *pdsc;
1021 /* Fetch corresponding pdsm descriptor for validation and servicing */
1022 pdsc = pdsm_cmd_desc(pdsm);
1024 /* Validate pdsm descriptor */
1025 /* Ensure that reserved fields are 0 */
1026 if (pdsm_pkg->reserved[0] || pdsm_pkg->reserved[1]) {
1027 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid reserved field\n",
1032 /* If pdsm expects some input, then ensure that the size_in matches */
1033 if (pdsc->size_in &&
1034 pkg->nd_size_in != (pdsc->size_in + ND_PDSM_HDR_SIZE)) {
1035 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_in=%d\n",
1036 pdsm, pkg->nd_size_in);
1040 /* If pdsm wants to return data, then ensure that size_out matches */
1041 if (pdsc->size_out &&
1042 pkg->nd_size_out != (pdsc->size_out + ND_PDSM_HDR_SIZE)) {
1043 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_out=%d\n",
1044 pdsm, pkg->nd_size_out);
1048 /* Service the pdsm */
1049 if (pdsc->service) {
1050 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Servicing..\n", pdsm);
1052 rc = pdsc->service(p, &pdsm_pkg->payload);
1055 /* error encountered while servicing pdsm */
1056 pdsm_pkg->cmd_status = rc;
1057 pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
1059 /* pdsm serviced and 'rc' bytes written to payload */
1060 pdsm_pkg->cmd_status = 0;
1061 pkg->nd_fw_size = ND_PDSM_HDR_SIZE + rc;
1064 dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Unsupported PDSM request\n",
1066 pdsm_pkg->cmd_status = -ENOENT;
1067 pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
1070 return pdsm_pkg->cmd_status;
1073 static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
1074 struct nvdimm *nvdimm, unsigned int cmd, void *buf,
1075 unsigned int buf_len, int *cmd_rc)
1077 struct nd_cmd_get_config_size *get_size_hdr;
1078 struct nd_cmd_pkg *call_pkg = NULL;
1079 struct papr_scm_priv *p;
1082 rc = is_cmd_valid(nvdimm, cmd, buf, buf_len);
1084 pr_debug("Invalid cmd=0x%x. Err=%d\n", cmd, rc);
1088 /* Use a local variable in case cmd_rc pointer is NULL */
1092 p = nvdimm_provider_data(nvdimm);
1095 case ND_CMD_GET_CONFIG_SIZE:
1098 get_size_hdr->status = 0;
1099 get_size_hdr->max_xfer = 8;
1100 get_size_hdr->config_size = p->metadata_size;
1104 case ND_CMD_GET_CONFIG_DATA:
1105 *cmd_rc = papr_scm_meta_get(p, buf);
1108 case ND_CMD_SET_CONFIG_DATA:
1109 *cmd_rc = papr_scm_meta_set(p, buf);
1113 call_pkg = (struct nd_cmd_pkg *)buf;
1114 *cmd_rc = papr_scm_service_pdsm(p, call_pkg);
1118 dev_dbg(&p->pdev->dev, "Unknown command = %d\n", cmd);
1122 dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
1127 static ssize_t health_bitmap_inject_show(struct device *dev,
1128 struct device_attribute *attr,
1131 struct nvdimm *dimm = to_nvdimm(dev);
1132 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
1134 return sprintf(buf, "%#llx\n",
1135 READ_ONCE(p->health_bitmap_inject_mask));
1138 static DEVICE_ATTR_ADMIN_RO(health_bitmap_inject);
1140 static ssize_t perf_stats_show(struct device *dev,
1141 struct device_attribute *attr, char *buf)
1146 struct papr_scm_perf_stat *stat;
1147 struct papr_scm_perf_stats *stats;
1148 struct nvdimm *dimm = to_nvdimm(dev);
1149 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
1151 if (!p->stat_buffer_len)
1154 /* Allocate the buffer for phyp where stats are written */
1155 stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
1159 /* Ask phyp to return all dimm perf stats */
1160 rc = drc_pmem_query_stats(p, stats, 0);
1164 * Go through the returned output buffer and print stats and
1165 * values. Since stat_id is essentially a char string of
1166 * 8 bytes, simply use the string format specifier to print it.
1168 seq_buf_init(&s, buf, PAGE_SIZE);
1169 for (index = 0, stat = stats->scm_statistic;
1170 index < be32_to_cpu(stats->num_statistics);
1172 seq_buf_printf(&s, "%.8s = 0x%016llX\n",
1174 be64_to_cpu(stat->stat_val));
1179 return rc ? rc : (ssize_t)seq_buf_used(&s);
1181 static DEVICE_ATTR_ADMIN_RO(perf_stats);
1183 static ssize_t flags_show(struct device *dev,
1184 struct device_attribute *attr, char *buf)
1186 struct nvdimm *dimm = to_nvdimm(dev);
1187 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
1192 rc = drc_pmem_query_health(p);
1196 /* Copy health_bitmap locally, check masks & update out buffer */
1197 health = READ_ONCE(p->health_bitmap);
1199 seq_buf_init(&s, buf, PAGE_SIZE);
1200 if (health & PAPR_PMEM_UNARMED_MASK)
1201 seq_buf_printf(&s, "not_armed ");
1203 if (health & PAPR_PMEM_BAD_SHUTDOWN_MASK)
1204 seq_buf_printf(&s, "flush_fail ");
1206 if (health & PAPR_PMEM_BAD_RESTORE_MASK)
1207 seq_buf_printf(&s, "restore_fail ");
1209 if (health & PAPR_PMEM_ENCRYPTED)
1210 seq_buf_printf(&s, "encrypted ");
1212 if (health & PAPR_PMEM_SMART_EVENT_MASK)
1213 seq_buf_printf(&s, "smart_notify ");
1215 if (health & PAPR_PMEM_SCRUBBED_AND_LOCKED)
1216 seq_buf_printf(&s, "scrubbed locked ");
1218 if (seq_buf_used(&s))
1219 seq_buf_printf(&s, "\n");
1221 return seq_buf_used(&s);
1223 DEVICE_ATTR_RO(flags);
1225 static ssize_t dirty_shutdown_show(struct device *dev,
1226 struct device_attribute *attr, char *buf)
1228 struct nvdimm *dimm = to_nvdimm(dev);
1229 struct papr_scm_priv *p = nvdimm_provider_data(dimm);
1231 return sysfs_emit(buf, "%llu\n", p->dirty_shutdown_counter);
1233 DEVICE_ATTR_RO(dirty_shutdown);
1235 static umode_t papr_nd_attribute_visible(struct kobject *kobj,
1236 struct attribute *attr, int n)
1238 struct device *dev = kobj_to_dev(kobj);
1239 struct nvdimm *nvdimm = to_nvdimm(dev);
1240 struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
1242 /* For if perf-stats not available remove perf_stats sysfs */
1243 if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
1249 /* papr_scm specific dimm attributes */
1250 static struct attribute *papr_nd_attributes[] = {
1251 &dev_attr_flags.attr,
1252 &dev_attr_perf_stats.attr,
1253 &dev_attr_dirty_shutdown.attr,
1254 &dev_attr_health_bitmap_inject.attr,
1258 static const struct attribute_group papr_nd_attribute_group = {
1260 .is_visible = papr_nd_attribute_visible,
1261 .attrs = papr_nd_attributes,
1264 static const struct attribute_group *papr_nd_attr_groups[] = {
1265 &papr_nd_attribute_group,
1269 static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
1271 struct device *dev = &p->pdev->dev;
1272 struct nd_mapping_desc mapping;
1273 struct nd_region_desc ndr_desc;
1274 unsigned long dimm_flags;
1275 int target_nid, online_nid;
1277 p->bus_desc.ndctl = papr_scm_ndctl;
1278 p->bus_desc.module = THIS_MODULE;
1279 p->bus_desc.of_node = p->pdev->dev.of_node;
1280 p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
1282 /* Set the dimm command family mask to accept PDSMs */
1283 set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
1285 if (!p->bus_desc.provider_name)
1288 p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
1290 dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
1291 kfree(p->bus_desc.provider_name);
1296 set_bit(NDD_LABELING, &dimm_flags);
1299 * Check if the nvdimm is unarmed. No locking needed as we are still
1300 * initializing. Ignore error encountered if any.
1302 __drc_pmem_query_health(p);
1304 if (p->health_bitmap & PAPR_PMEM_UNARMED_MASK)
1305 set_bit(NDD_UNARMED, &dimm_flags);
1307 p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups,
1308 dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
1310 dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
1314 if (nvdimm_bus_check_dimm_count(p->bus, 1))
1317 /* now add the region */
1319 memset(&mapping, 0, sizeof(mapping));
1320 mapping.nvdimm = p->nvdimm;
1322 mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
1324 memset(&ndr_desc, 0, sizeof(ndr_desc));
1325 target_nid = dev_to_node(&p->pdev->dev);
1326 online_nid = numa_map_to_online_node(target_nid);
1327 ndr_desc.numa_node = online_nid;
1328 ndr_desc.target_node = target_nid;
1329 ndr_desc.res = &p->res;
1330 ndr_desc.of_node = p->dn;
1331 ndr_desc.provider_data = p;
1332 ndr_desc.mapping = &mapping;
1333 ndr_desc.num_mappings = 1;
1334 ndr_desc.nd_set = &p->nd_set;
1336 if (p->hcall_flush_required) {
1337 set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
1338 ndr_desc.flush = papr_scm_pmem_flush;
1342 p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
1344 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
1345 p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
1348 dev_err(dev, "Error registering region %pR from %pOF\n",
1349 ndr_desc.res, p->dn);
1352 if (target_nid != online_nid)
1353 dev_info(dev, "Region registered with target node %d and online node %d",
1354 target_nid, online_nid);
1356 mutex_lock(&papr_ndr_lock);
1357 list_add_tail(&p->region_list, &papr_nd_regions);
1358 mutex_unlock(&papr_ndr_lock);
1362 err: nvdimm_bus_unregister(p->bus);
1363 kfree(p->bus_desc.provider_name);
1367 static void papr_scm_add_badblock(struct nd_region *region,
1368 struct nvdimm_bus *bus, u64 phys_addr)
1370 u64 aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES);
1372 if (nvdimm_bus_add_badrange(bus, aligned_addr, L1_CACHE_BYTES)) {
1373 pr_err("Bad block registration for 0x%llx failed\n", phys_addr);
1377 pr_debug("Add memory range (0x%llx - 0x%llx) as bad range\n",
1378 aligned_addr, aligned_addr + L1_CACHE_BYTES);
1380 nvdimm_region_notify(region, NVDIMM_REVALIDATE_POISON);
1383 static int handle_mce_ue(struct notifier_block *nb, unsigned long val,
1386 struct machine_check_event *evt = data;
1387 struct papr_scm_priv *p;
1391 if (evt->error_type != MCE_ERROR_TYPE_UE)
1394 if (list_empty(&papr_nd_regions))
1398 * The physical address obtained here is PAGE_SIZE aligned, so get the
1399 * exact address from the effective address
1401 phys_addr = evt->u.ue_error.physical_address +
1402 (evt->u.ue_error.effective_address & ~PAGE_MASK);
1404 if (!evt->u.ue_error.physical_address_provided ||
1405 !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT)))
1408 /* mce notifier is called from a process context, so mutex is safe */
1409 mutex_lock(&papr_ndr_lock);
1410 list_for_each_entry(p, &papr_nd_regions, region_list) {
1411 if (phys_addr >= p->res.start && phys_addr <= p->res.end) {
1418 papr_scm_add_badblock(p->region, p->bus, phys_addr);
1420 mutex_unlock(&papr_ndr_lock);
1422 return found ? NOTIFY_OK : NOTIFY_DONE;
1425 static struct notifier_block mce_ue_nb = {
1426 .notifier_call = handle_mce_ue
1429 static int papr_scm_probe(struct platform_device *pdev)
1431 struct device_node *dn = pdev->dev.of_node;
1432 u32 drc_index, metadata_size;
1433 u64 blocks, block_size;
1434 struct papr_scm_priv *p;
1435 u8 uuid_raw[UUID_SIZE];
1436 const char *uuid_str;
1441 /* check we have all the required DT properties */
1442 if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
1443 dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
1447 if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
1448 dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
1452 if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
1453 dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
1457 if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
1458 dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
1463 p = kzalloc(sizeof(*p), GFP_KERNEL);
1467 /* Initialize the dimm mutex */
1468 mutex_init(&p->health_mutex);
1470 /* optional DT properties */
1471 of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
1474 p->drc_index = drc_index;
1475 p->block_size = block_size;
1477 p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
1478 p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
1480 if (of_property_read_u64(dn, "ibm,persistence-failed-count",
1481 &p->dirty_shutdown_counter))
1482 p->dirty_shutdown_counter = 0;
1484 /* We just need to ensure that set cookies are unique across */
1485 uuid_parse(uuid_str, &uuid);
1488 * The cookie1 and cookie2 are not really little endian.
1489 * We store a raw buffer representation of the
1490 * uuid string so that we can compare this with the label
1491 * area cookie irrespective of the endian configuration
1492 * with which the kernel is built.
1494 * Historically we stored the cookie in the below format.
1495 * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
1496 * cookie1 was 0xfd423b0b671b5172
1497 * cookie2 was 0xaabce8cae35b1d8d
1499 export_uuid(uuid_raw, &uuid);
1500 p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
1501 p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
1504 p->metadata_size = metadata_size;
1507 /* request the hypervisor to bind this region to somewhere in memory */
1508 rc = drc_pmem_bind(p);
1510 /* If phyp says drc memory still bound then force unbound and retry */
1511 if (rc == H_OVERLAP)
1512 rc = drc_pmem_query_n_bind(p);
1514 if (rc != H_SUCCESS) {
1515 dev_err(&p->pdev->dev, "bind err: %d\n", rc);
1520 /* setup the resource for the newly bound range */
1521 p->res.start = p->bound_addr;
1522 p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
1523 p->res.name = pdev->name;
1524 p->res.flags = IORESOURCE_MEM;
1526 /* Try retrieving the stat buffer and see if its supported */
1527 stat_size = drc_pmem_query_stats(p, NULL, 0);
1528 if (stat_size > 0) {
1529 p->stat_buffer_len = stat_size;
1530 dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
1531 p->stat_buffer_len);
1534 rc = papr_scm_nvdimm_init(p);
1538 platform_set_drvdata(pdev, p);
1539 papr_scm_pmu_register(p);
1543 err2: drc_pmem_unbind(p);
1548 static int papr_scm_remove(struct platform_device *pdev)
1550 struct papr_scm_priv *p = platform_get_drvdata(pdev);
1552 mutex_lock(&papr_ndr_lock);
1553 list_del(&p->region_list);
1554 mutex_unlock(&papr_ndr_lock);
1556 nvdimm_bus_unregister(p->bus);
1559 if (pdev->archdata.priv)
1560 unregister_nvdimm_pmu(pdev->archdata.priv);
1562 pdev->archdata.priv = NULL;
1563 kfree(p->nvdimm_events_map);
1564 kfree(p->bus_desc.provider_name);
1570 static const struct of_device_id papr_scm_match[] = {
1571 { .compatible = "ibm,pmemory" },
1572 { .compatible = "ibm,pmemory-v2" },
1576 static struct platform_driver papr_scm_driver = {
1577 .probe = papr_scm_probe,
1578 .remove = papr_scm_remove,
1581 .of_match_table = papr_scm_match,
1585 static int __init papr_scm_init(void)
1589 ret = platform_driver_register(&papr_scm_driver);
1591 mce_register_notifier(&mce_ue_nb);
1595 module_init(papr_scm_init);
1597 static void __exit papr_scm_exit(void)
1599 mce_unregister_notifier(&mce_ue_nb);
1600 platform_driver_unregister(&papr_scm_driver);
1602 module_exit(papr_scm_exit);
1604 MODULE_DEVICE_TABLE(of, papr_scm_match);
1605 MODULE_LICENSE("GPL");
1606 MODULE_AUTHOR("IBM Corporation");