GNU Linux-libre 6.1.24-gnu
[releases.git] / drivers / cxl / pmem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3 #include <linux/libnvdimm.h>
4 #include <asm/unaligned.h>
5 #include <linux/device.h>
6 #include <linux/module.h>
7 #include <linux/ndctl.h>
8 #include <linux/async.h>
9 #include <linux/slab.h>
10 #include <linux/nd.h>
11 #include "cxlmem.h"
12 #include "cxl.h"
13
14 /*
15  * Ordered workqueue for cxl nvdimm device arrival and departure
16  * to coordinate bus rescans when a bridge arrives and trigger remove
17  * operations when the bridge is removed.
18  */
19 static struct workqueue_struct *cxl_pmem_wq;
20
21 static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
22
23 static void clear_exclusive(void *cxlds)
24 {
25         clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
26 }
27
28 static void unregister_nvdimm(void *nvdimm)
29 {
30         struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
31         struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge;
32         struct cxl_pmem_region *cxlr_pmem;
33         unsigned long index;
34
35         device_lock(&cxl_nvb->dev);
36         dev_set_drvdata(&cxl_nvd->dev, NULL);
37         xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) {
38                 get_device(&cxlr_pmem->dev);
39                 device_unlock(&cxl_nvb->dev);
40
41                 device_release_driver(&cxlr_pmem->dev);
42                 put_device(&cxlr_pmem->dev);
43
44                 device_lock(&cxl_nvb->dev);
45         }
46         device_unlock(&cxl_nvb->dev);
47
48         nvdimm_delete(nvdimm);
49         cxl_nvd->bridge = NULL;
50 }
51
52 static int cxl_nvdimm_probe(struct device *dev)
53 {
54         struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
55         struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
56         unsigned long flags = 0, cmd_mask = 0;
57         struct cxl_dev_state *cxlds = cxlmd->cxlds;
58         struct cxl_nvdimm_bridge *cxl_nvb;
59         struct nvdimm *nvdimm;
60         int rc;
61
62         cxl_nvb = cxl_find_nvdimm_bridge(dev);
63         if (!cxl_nvb)
64                 return -ENXIO;
65
66         device_lock(&cxl_nvb->dev);
67         if (!cxl_nvb->nvdimm_bus) {
68                 rc = -ENXIO;
69                 goto out;
70         }
71
72         set_exclusive_cxl_commands(cxlds, exclusive_cmds);
73         rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
74         if (rc)
75                 goto out;
76
77         set_bit(NDD_LABELING, &flags);
78         set_bit(NDD_REGISTER_SYNC, &flags);
79         set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
80         set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
81         set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
82         nvdimm = nvdimm_create(cxl_nvb->nvdimm_bus, cxl_nvd, NULL, flags,
83                                cmd_mask, 0, NULL);
84         if (!nvdimm) {
85                 rc = -ENOMEM;
86                 goto out;
87         }
88
89         dev_set_drvdata(dev, nvdimm);
90         cxl_nvd->bridge = cxl_nvb;
91         rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
92 out:
93         device_unlock(&cxl_nvb->dev);
94         put_device(&cxl_nvb->dev);
95
96         return rc;
97 }
98
99 static struct cxl_driver cxl_nvdimm_driver = {
100         .name = "cxl_nvdimm",
101         .probe = cxl_nvdimm_probe,
102         .id = CXL_DEVICE_NVDIMM,
103 };
104
105 static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
106                                     struct nd_cmd_get_config_size *cmd,
107                                     unsigned int buf_len)
108 {
109         if (sizeof(*cmd) > buf_len)
110                 return -EINVAL;
111
112         *cmd = (struct nd_cmd_get_config_size) {
113                  .config_size = cxlds->lsa_size,
114                  .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa),
115         };
116
117         return 0;
118 }
119
120 static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
121                                     struct nd_cmd_get_config_data_hdr *cmd,
122                                     unsigned int buf_len)
123 {
124         struct cxl_mbox_get_lsa get_lsa;
125         int rc;
126
127         if (sizeof(*cmd) > buf_len)
128                 return -EINVAL;
129         if (struct_size(cmd, out_buf, cmd->in_length) > buf_len)
130                 return -EINVAL;
131
132         get_lsa = (struct cxl_mbox_get_lsa) {
133                 .offset = cpu_to_le32(cmd->in_offset),
134                 .length = cpu_to_le32(cmd->in_length),
135         };
136
137         rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
138                                sizeof(get_lsa), cmd->out_buf, cmd->in_length);
139         cmd->status = 0;
140
141         return rc;
142 }
143
144 static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
145                                     struct nd_cmd_set_config_hdr *cmd,
146                                     unsigned int buf_len)
147 {
148         struct cxl_mbox_set_lsa *set_lsa;
149         int rc;
150
151         if (sizeof(*cmd) > buf_len)
152                 return -EINVAL;
153
154         /* 4-byte status follows the input data in the payload */
155         if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len)
156                 return -EINVAL;
157
158         set_lsa =
159                 kvzalloc(struct_size(set_lsa, data, cmd->in_length), GFP_KERNEL);
160         if (!set_lsa)
161                 return -ENOMEM;
162
163         *set_lsa = (struct cxl_mbox_set_lsa) {
164                 .offset = cpu_to_le32(cmd->in_offset),
165         };
166         memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
167
168         rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
169                                struct_size(set_lsa, data, cmd->in_length),
170                                NULL, 0);
171
172         /*
173          * Set "firmware" status (4-packed bytes at the end of the input
174          * payload.
175          */
176         put_unaligned(0, (u32 *) &cmd->in_buf[cmd->in_length]);
177         kvfree(set_lsa);
178
179         return rc;
180 }
181
182 static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
183                                void *buf, unsigned int buf_len)
184 {
185         struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
186         unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
187         struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
188         struct cxl_dev_state *cxlds = cxlmd->cxlds;
189
190         if (!test_bit(cmd, &cmd_mask))
191                 return -ENOTTY;
192
193         switch (cmd) {
194         case ND_CMD_GET_CONFIG_SIZE:
195                 return cxl_pmem_get_config_size(cxlds, buf, buf_len);
196         case ND_CMD_GET_CONFIG_DATA:
197                 return cxl_pmem_get_config_data(cxlds, buf, buf_len);
198         case ND_CMD_SET_CONFIG_DATA:
199                 return cxl_pmem_set_config_data(cxlds, buf, buf_len);
200         default:
201                 return -ENOTTY;
202         }
203 }
204
205 static int cxl_pmem_ctl(struct nvdimm_bus_descriptor *nd_desc,
206                         struct nvdimm *nvdimm, unsigned int cmd, void *buf,
207                         unsigned int buf_len, int *cmd_rc)
208 {
209         /*
210          * No firmware response to translate, let the transport error
211          * code take precedence.
212          */
213         *cmd_rc = 0;
214
215         if (!nvdimm)
216                 return -ENOTTY;
217         return cxl_pmem_nvdimm_ctl(nvdimm, cmd, buf, buf_len);
218 }
219
220 static bool online_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb)
221 {
222         if (cxl_nvb->nvdimm_bus)
223                 return true;
224         cxl_nvb->nvdimm_bus =
225                 nvdimm_bus_register(&cxl_nvb->dev, &cxl_nvb->nd_desc);
226         return cxl_nvb->nvdimm_bus != NULL;
227 }
228
229 static int cxl_nvdimm_release_driver(struct device *dev, void *cxl_nvb)
230 {
231         struct cxl_nvdimm *cxl_nvd;
232
233         if (!is_cxl_nvdimm(dev))
234                 return 0;
235
236         cxl_nvd = to_cxl_nvdimm(dev);
237         if (cxl_nvd->bridge != cxl_nvb)
238                 return 0;
239
240         device_release_driver(dev);
241         return 0;
242 }
243
244 static int cxl_pmem_region_release_driver(struct device *dev, void *cxl_nvb)
245 {
246         struct cxl_pmem_region *cxlr_pmem;
247
248         if (!is_cxl_pmem_region(dev))
249                 return 0;
250
251         cxlr_pmem = to_cxl_pmem_region(dev);
252         if (cxlr_pmem->bridge != cxl_nvb)
253                 return 0;
254
255         device_release_driver(dev);
256         return 0;
257 }
258
259 static void offline_nvdimm_bus(struct cxl_nvdimm_bridge *cxl_nvb,
260                                struct nvdimm_bus *nvdimm_bus)
261 {
262         if (!nvdimm_bus)
263                 return;
264
265         /*
266          * Set the state of cxl_nvdimm devices to unbound / idle before
267          * nvdimm_bus_unregister() rips the nvdimm objects out from
268          * underneath them.
269          */
270         bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
271                          cxl_pmem_region_release_driver);
272         bus_for_each_dev(&cxl_bus_type, NULL, cxl_nvb,
273                          cxl_nvdimm_release_driver);
274         nvdimm_bus_unregister(nvdimm_bus);
275 }
276
277 static void cxl_nvb_update_state(struct work_struct *work)
278 {
279         struct cxl_nvdimm_bridge *cxl_nvb =
280                 container_of(work, typeof(*cxl_nvb), state_work);
281         struct nvdimm_bus *victim_bus = NULL;
282         bool release = false, rescan = false;
283
284         device_lock(&cxl_nvb->dev);
285         switch (cxl_nvb->state) {
286         case CXL_NVB_ONLINE:
287                 if (!online_nvdimm_bus(cxl_nvb)) {
288                         dev_err(&cxl_nvb->dev,
289                                 "failed to establish nvdimm bus\n");
290                         release = true;
291                 } else
292                         rescan = true;
293                 break;
294         case CXL_NVB_OFFLINE:
295         case CXL_NVB_DEAD:
296                 victim_bus = cxl_nvb->nvdimm_bus;
297                 cxl_nvb->nvdimm_bus = NULL;
298                 break;
299         default:
300                 break;
301         }
302         device_unlock(&cxl_nvb->dev);
303
304         if (release)
305                 device_release_driver(&cxl_nvb->dev);
306         if (rescan) {
307                 int rc = bus_rescan_devices(&cxl_bus_type);
308
309                 dev_dbg(&cxl_nvb->dev, "rescan: %d\n", rc);
310         }
311         offline_nvdimm_bus(cxl_nvb, victim_bus);
312
313         put_device(&cxl_nvb->dev);
314 }
315
316 static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb)
317 {
318         /*
319          * Take a reference that the workqueue will drop if new work
320          * gets queued.
321          */
322         get_device(&cxl_nvb->dev);
323         if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
324                 put_device(&cxl_nvb->dev);
325 }
326
327 static void cxl_nvdimm_bridge_remove(struct device *dev)
328 {
329         struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
330
331         if (cxl_nvb->state == CXL_NVB_ONLINE)
332                 cxl_nvb->state = CXL_NVB_OFFLINE;
333         cxl_nvdimm_bridge_state_work(cxl_nvb);
334 }
335
336 static int cxl_nvdimm_bridge_probe(struct device *dev)
337 {
338         struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
339
340         if (cxl_nvb->state == CXL_NVB_DEAD)
341                 return -ENXIO;
342
343         if (cxl_nvb->state == CXL_NVB_NEW) {
344                 cxl_nvb->nd_desc = (struct nvdimm_bus_descriptor) {
345                         .provider_name = "CXL",
346                         .module = THIS_MODULE,
347                         .ndctl = cxl_pmem_ctl,
348                 };
349
350                 INIT_WORK(&cxl_nvb->state_work, cxl_nvb_update_state);
351         }
352
353         cxl_nvb->state = CXL_NVB_ONLINE;
354         cxl_nvdimm_bridge_state_work(cxl_nvb);
355
356         return 0;
357 }
358
359 static struct cxl_driver cxl_nvdimm_bridge_driver = {
360         .name = "cxl_nvdimm_bridge",
361         .probe = cxl_nvdimm_bridge_probe,
362         .remove = cxl_nvdimm_bridge_remove,
363         .id = CXL_DEVICE_NVDIMM_BRIDGE,
364 };
365
366 static int match_cxl_nvdimm(struct device *dev, void *data)
367 {
368         return is_cxl_nvdimm(dev);
369 }
370
371 static void unregister_nvdimm_region(void *nd_region)
372 {
373         nvdimm_region_delete(nd_region);
374 }
375
376 static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd,
377                                  struct cxl_pmem_region *cxlr_pmem)
378 {
379         int rc;
380
381         rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem,
382                        cxlr_pmem, GFP_KERNEL);
383         if (rc)
384                 return rc;
385
386         get_device(&cxlr_pmem->dev);
387         return 0;
388 }
389
390 static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd,
391                                   struct cxl_pmem_region *cxlr_pmem)
392 {
393         /*
394          * It is possible this is called without a corresponding
395          * cxl_nvdimm_add_region for @cxlr_pmem
396          */
397         cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem);
398         if (cxlr_pmem)
399                 put_device(&cxlr_pmem->dev);
400 }
401
402 static void release_mappings(void *data)
403 {
404         int i;
405         struct cxl_pmem_region *cxlr_pmem = data;
406         struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge;
407
408         device_lock(&cxl_nvb->dev);
409         for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
410                 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
411                 struct cxl_nvdimm *cxl_nvd = m->cxl_nvd;
412
413                 cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem);
414         }
415         device_unlock(&cxl_nvb->dev);
416 }
417
418 static void cxlr_pmem_remove_resource(void *res)
419 {
420         remove_resource(res);
421 }
422
423 struct cxl_pmem_region_info {
424         u64 offset;
425         u64 serial;
426 };
427
428 static int cxl_pmem_region_probe(struct device *dev)
429 {
430         struct nd_mapping_desc mappings[CXL_DECODER_MAX_INTERLEAVE];
431         struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev);
432         struct cxl_region *cxlr = cxlr_pmem->cxlr;
433         struct cxl_pmem_region_info *info = NULL;
434         struct cxl_nvdimm_bridge *cxl_nvb;
435         struct nd_interleave_set *nd_set;
436         struct nd_region_desc ndr_desc;
437         struct cxl_nvdimm *cxl_nvd;
438         struct nvdimm *nvdimm;
439         struct resource *res;
440         int rc, i = 0;
441
442         cxl_nvb = cxl_find_nvdimm_bridge(&cxlr_pmem->mapping[0].cxlmd->dev);
443         if (!cxl_nvb) {
444                 dev_dbg(dev, "bridge not found\n");
445                 return -ENXIO;
446         }
447         cxlr_pmem->bridge = cxl_nvb;
448
449         device_lock(&cxl_nvb->dev);
450         if (!cxl_nvb->nvdimm_bus) {
451                 dev_dbg(dev, "nvdimm bus not found\n");
452                 rc = -ENXIO;
453                 goto out_nvb;
454         }
455
456         memset(&mappings, 0, sizeof(mappings));
457         memset(&ndr_desc, 0, sizeof(ndr_desc));
458
459         res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
460         if (!res) {
461                 rc = -ENOMEM;
462                 goto out_nvb;
463         }
464
465         res->name = "Persistent Memory";
466         res->start = cxlr_pmem->hpa_range.start;
467         res->end = cxlr_pmem->hpa_range.end;
468         res->flags = IORESOURCE_MEM;
469         res->desc = IORES_DESC_PERSISTENT_MEMORY;
470
471         rc = insert_resource(&iomem_resource, res);
472         if (rc)
473                 goto out_nvb;
474
475         rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res);
476         if (rc)
477                 goto out_nvb;
478
479         ndr_desc.res = res;
480         ndr_desc.provider_data = cxlr_pmem;
481
482         ndr_desc.numa_node = memory_add_physaddr_to_nid(res->start);
483         ndr_desc.target_node = phys_to_target_node(res->start);
484         if (ndr_desc.target_node == NUMA_NO_NODE) {
485                 ndr_desc.target_node = ndr_desc.numa_node;
486                 dev_dbg(&cxlr->dev, "changing target node from %d to %d",
487                         NUMA_NO_NODE, ndr_desc.target_node);
488         }
489
490         nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
491         if (!nd_set) {
492                 rc = -ENOMEM;
493                 goto out_nvb;
494         }
495
496         ndr_desc.memregion = cxlr->id;
497         set_bit(ND_REGION_CXL, &ndr_desc.flags);
498         set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
499
500         info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL);
501         if (!info) {
502                 rc = -ENOMEM;
503                 goto out_nvb;
504         }
505
506         rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem);
507         if (rc)
508                 goto out_nvd;
509
510         for (i = 0; i < cxlr_pmem->nr_mappings; i++) {
511                 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i];
512                 struct cxl_memdev *cxlmd = m->cxlmd;
513                 struct cxl_dev_state *cxlds = cxlmd->cxlds;
514                 struct device *d;
515
516                 d = device_find_child(&cxlmd->dev, NULL, match_cxl_nvdimm);
517                 if (!d) {
518                         dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i,
519                                 dev_name(&cxlmd->dev));
520                         rc = -ENODEV;
521                         goto out_nvd;
522                 }
523
524                 /* safe to drop ref now with bridge lock held */
525                 put_device(d);
526
527                 cxl_nvd = to_cxl_nvdimm(d);
528                 nvdimm = dev_get_drvdata(&cxl_nvd->dev);
529                 if (!nvdimm) {
530                         dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i,
531                                 dev_name(&cxlmd->dev));
532                         rc = -ENODEV;
533                         goto out_nvd;
534                 }
535
536                 /*
537                  * Pin the region per nvdimm device as those may be released
538                  * out-of-order with respect to the region, and a single nvdimm
539                  * maybe associated with multiple regions
540                  */
541                 rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem);
542                 if (rc)
543                         goto out_nvd;
544                 m->cxl_nvd = cxl_nvd;
545                 mappings[i] = (struct nd_mapping_desc) {
546                         .nvdimm = nvdimm,
547                         .start = m->start,
548                         .size = m->size,
549                         .position = i,
550                 };
551                 info[i].offset = m->start;
552                 info[i].serial = cxlds->serial;
553         }
554         ndr_desc.num_mappings = cxlr_pmem->nr_mappings;
555         ndr_desc.mapping = mappings;
556
557         /*
558          * TODO enable CXL labels which skip the need for 'interleave-set cookie'
559          */
560         nd_set->cookie1 =
561                 nd_fletcher64(info, sizeof(*info) * cxlr_pmem->nr_mappings, 0);
562         nd_set->cookie2 = nd_set->cookie1;
563         ndr_desc.nd_set = nd_set;
564
565         cxlr_pmem->nd_region =
566                 nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc);
567         if (!cxlr_pmem->nd_region) {
568                 rc = -ENOMEM;
569                 goto out_nvd;
570         }
571
572         rc = devm_add_action_or_reset(dev, unregister_nvdimm_region,
573                                       cxlr_pmem->nd_region);
574 out_nvd:
575         kfree(info);
576 out_nvb:
577         device_unlock(&cxl_nvb->dev);
578         put_device(&cxl_nvb->dev);
579
580         return rc;
581 }
582
583 static struct cxl_driver cxl_pmem_region_driver = {
584         .name = "cxl_pmem_region",
585         .probe = cxl_pmem_region_probe,
586         .id = CXL_DEVICE_PMEM_REGION,
587 };
588
589 /*
590  * Return all bridges to the CXL_NVB_NEW state to invalidate any
591  * ->state_work referring to the now destroyed cxl_pmem_wq.
592  */
593 static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
594 {
595         struct cxl_nvdimm_bridge *cxl_nvb;
596
597         if (!is_cxl_nvdimm_bridge(dev))
598                 return 0;
599
600         cxl_nvb = to_cxl_nvdimm_bridge(dev);
601         device_lock(dev);
602         cxl_nvb->state = CXL_NVB_NEW;
603         device_unlock(dev);
604
605         return 0;
606 }
607
608 static void destroy_cxl_pmem_wq(void)
609 {
610         destroy_workqueue(cxl_pmem_wq);
611         bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset);
612 }
613
614 static __init int cxl_pmem_init(void)
615 {
616         int rc;
617
618         set_bit(CXL_MEM_COMMAND_ID_SET_SHUTDOWN_STATE, exclusive_cmds);
619         set_bit(CXL_MEM_COMMAND_ID_SET_LSA, exclusive_cmds);
620
621         cxl_pmem_wq = alloc_ordered_workqueue("cxl_pmem", 0);
622         if (!cxl_pmem_wq)
623                 return -ENXIO;
624
625         rc = cxl_driver_register(&cxl_nvdimm_bridge_driver);
626         if (rc)
627                 goto err_bridge;
628
629         rc = cxl_driver_register(&cxl_nvdimm_driver);
630         if (rc)
631                 goto err_nvdimm;
632
633         rc = cxl_driver_register(&cxl_pmem_region_driver);
634         if (rc)
635                 goto err_region;
636
637         return 0;
638
639 err_region:
640         cxl_driver_unregister(&cxl_nvdimm_driver);
641 err_nvdimm:
642         cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
643 err_bridge:
644         destroy_cxl_pmem_wq();
645         return rc;
646 }
647
648 static __exit void cxl_pmem_exit(void)
649 {
650         cxl_driver_unregister(&cxl_pmem_region_driver);
651         cxl_driver_unregister(&cxl_nvdimm_driver);
652         cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
653         destroy_cxl_pmem_wq();
654 }
655
656 MODULE_LICENSE("GPL v2");
657 module_init(cxl_pmem_init);
658 module_exit(cxl_pmem_exit);
659 MODULE_IMPORT_NS(CXL);
660 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM_BRIDGE);
661 MODULE_ALIAS_CXL(CXL_DEVICE_NVDIMM);
662 MODULE_ALIAS_CXL(CXL_DEVICE_PMEM_REGION);