1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
13 * The core CXL PMEM infrastructure supports persistent memory
14 * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
15 * 'bridge' device is added at the root of a CXL device topology if
16 * platform firmware advertises at least one persistent memory capable
17 * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
18 * device. Then for each cxl_memdev in the CXL device topology a bridge
19 * device is added to host a LIBNVDIMM dimm object. When these bridges
20 * are registered native LIBNVDIMM uapis are translated to CXL
21 * operations, for example, namespace label access commands.
24 static DEFINE_IDA(cxl_nvdimm_bridge_ida);
26 static void cxl_nvdimm_bridge_release(struct device *dev)
28 struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
30 ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
34 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
35 &cxl_base_attribute_group,
39 const struct device_type cxl_nvdimm_bridge_type = {
40 .name = "cxl_nvdimm_bridge",
41 .release = cxl_nvdimm_bridge_release,
42 .groups = cxl_nvdimm_bridge_attribute_groups,
45 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
47 if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
48 "not a cxl_nvdimm_bridge device\n"))
50 return container_of(dev, struct cxl_nvdimm_bridge, dev);
52 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
54 bool is_cxl_nvdimm_bridge(struct device *dev)
56 return dev->type == &cxl_nvdimm_bridge_type;
58 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
60 static int match_nvdimm_bridge(struct device *dev, void *data)
62 return is_cxl_nvdimm_bridge(dev);
65 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
67 struct cxl_port *port = find_cxl_root(&cxl_nvd->dev);
73 dev = device_find_child(&port->dev, NULL, match_nvdimm_bridge);
74 put_device(&port->dev);
79 return to_cxl_nvdimm_bridge(dev);
81 EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
83 static struct lock_class_key cxl_nvdimm_bridge_key;
85 static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
87 struct cxl_nvdimm_bridge *cxl_nvb;
91 cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
93 return ERR_PTR(-ENOMEM);
95 rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
101 cxl_nvb->port = port;
102 cxl_nvb->state = CXL_NVB_NEW;
103 device_initialize(dev);
104 lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key);
105 device_set_pm_not_required(dev);
106 dev->parent = &port->dev;
107 dev->bus = &cxl_bus_type;
108 dev->type = &cxl_nvdimm_bridge_type;
117 static void unregister_nvb(void *_cxl_nvb)
119 struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
123 * If the bridge was ever activated then there might be in-flight state
124 * work to flush. Once the state has been changed to 'dead' then no new
125 * work can be queued by user-triggered bind.
127 device_lock(&cxl_nvb->dev);
128 flush = cxl_nvb->state != CXL_NVB_NEW;
129 cxl_nvb->state = CXL_NVB_DEAD;
130 device_unlock(&cxl_nvb->dev);
133 * Even though the device core will trigger device_release_driver()
134 * before the unregister, it does not know about the fact that
135 * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
136 * release not and flush it before tearing down the nvdimm device
139 device_release_driver(&cxl_nvb->dev);
141 flush_work(&cxl_nvb->state_work);
142 device_unregister(&cxl_nvb->dev);
146 * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
147 * @host: platform firmware root device
148 * @port: CXL port at the root of a CXL topology
150 * Return: bridge device that can host cxl_nvdimm objects
152 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
153 struct cxl_port *port)
155 struct cxl_nvdimm_bridge *cxl_nvb;
159 if (!IS_ENABLED(CONFIG_CXL_PMEM))
160 return ERR_PTR(-ENXIO);
162 cxl_nvb = cxl_nvdimm_bridge_alloc(port);
167 rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
171 rc = device_add(dev);
175 rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
185 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
187 static void cxl_nvdimm_release(struct device *dev)
189 struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
194 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
195 &cxl_base_attribute_group,
199 const struct device_type cxl_nvdimm_type = {
200 .name = "cxl_nvdimm",
201 .release = cxl_nvdimm_release,
202 .groups = cxl_nvdimm_attribute_groups,
205 bool is_cxl_nvdimm(struct device *dev)
207 return dev->type == &cxl_nvdimm_type;
209 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
211 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
213 if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
214 "not a cxl_nvdimm device\n"))
216 return container_of(dev, struct cxl_nvdimm, dev);
218 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
220 static struct lock_class_key cxl_nvdimm_key;
222 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
224 struct cxl_nvdimm *cxl_nvd;
227 cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
229 return ERR_PTR(-ENOMEM);
232 cxl_nvd->cxlmd = cxlmd;
233 device_initialize(dev);
234 lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
235 device_set_pm_not_required(dev);
236 dev->parent = &cxlmd->dev;
237 dev->bus = &cxl_bus_type;
238 dev->type = &cxl_nvdimm_type;
243 static void cxl_nvd_unregister(void *dev)
245 device_unregister(dev);
249 * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
250 * @host: same host as @cxlmd
251 * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
253 * Return: 0 on success negative error code on failure.
255 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
257 struct cxl_nvdimm *cxl_nvd;
261 cxl_nvd = cxl_nvdimm_alloc(cxlmd);
263 return PTR_ERR(cxl_nvd);
266 rc = dev_set_name(dev, "pmem%d", cxlmd->id);
270 rc = device_add(dev);
274 dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
277 return devm_add_action_or_reset(host, cxl_nvd_unregister, dev);
283 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);