GNU Linux-libre 5.19-rc6-gnu
[releases.git] / drivers / cxl / core / pmem.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2020 Intel Corporation. */
3 #include <linux/device.h>
4 #include <linux/slab.h>
5 #include <linux/idr.h>
6 #include <cxlmem.h>
7 #include <cxl.h>
8 #include "core.h"
9
10 /**
11  * DOC: cxl pmem
12  *
13  * The core CXL PMEM infrastructure supports persistent memory
14  * provisioning and serves as a bridge to the LIBNVDIMM subsystem. A CXL
15  * 'bridge' device is added at the root of a CXL device topology if
16  * platform firmware advertises at least one persistent memory capable
17  * CXL window. That root-level bridge corresponds to a LIBNVDIMM 'bus'
18  * device. Then for each cxl_memdev in the CXL device topology a bridge
19  * device is added to host a LIBNVDIMM dimm object. When these bridges
20  * are registered native LIBNVDIMM uapis are translated to CXL
21  * operations, for example, namespace label access commands.
22  */
23
24 static DEFINE_IDA(cxl_nvdimm_bridge_ida);
25
26 static void cxl_nvdimm_bridge_release(struct device *dev)
27 {
28         struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
29
30         ida_free(&cxl_nvdimm_bridge_ida, cxl_nvb->id);
31         kfree(cxl_nvb);
32 }
33
34 static const struct attribute_group *cxl_nvdimm_bridge_attribute_groups[] = {
35         &cxl_base_attribute_group,
36         NULL,
37 };
38
39 const struct device_type cxl_nvdimm_bridge_type = {
40         .name = "cxl_nvdimm_bridge",
41         .release = cxl_nvdimm_bridge_release,
42         .groups = cxl_nvdimm_bridge_attribute_groups,
43 };
44
45 struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
46 {
47         if (dev_WARN_ONCE(dev, dev->type != &cxl_nvdimm_bridge_type,
48                           "not a cxl_nvdimm_bridge device\n"))
49                 return NULL;
50         return container_of(dev, struct cxl_nvdimm_bridge, dev);
51 }
52 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
53
54 bool is_cxl_nvdimm_bridge(struct device *dev)
55 {
56         return dev->type == &cxl_nvdimm_bridge_type;
57 }
58 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
59
60 static int match_nvdimm_bridge(struct device *dev, void *data)
61 {
62         return is_cxl_nvdimm_bridge(dev);
63 }
64
65 struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
66 {
67         struct cxl_port *port = find_cxl_root(&cxl_nvd->dev);
68         struct device *dev;
69
70         if (!port)
71                 return NULL;
72
73         dev = device_find_child(&port->dev, NULL, match_nvdimm_bridge);
74         put_device(&port->dev);
75
76         if (!dev)
77                 return NULL;
78
79         return to_cxl_nvdimm_bridge(dev);
80 }
81 EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
82
83 static struct lock_class_key cxl_nvdimm_bridge_key;
84
85 static struct cxl_nvdimm_bridge *cxl_nvdimm_bridge_alloc(struct cxl_port *port)
86 {
87         struct cxl_nvdimm_bridge *cxl_nvb;
88         struct device *dev;
89         int rc;
90
91         cxl_nvb = kzalloc(sizeof(*cxl_nvb), GFP_KERNEL);
92         if (!cxl_nvb)
93                 return ERR_PTR(-ENOMEM);
94
95         rc = ida_alloc(&cxl_nvdimm_bridge_ida, GFP_KERNEL);
96         if (rc < 0)
97                 goto err;
98         cxl_nvb->id = rc;
99
100         dev = &cxl_nvb->dev;
101         cxl_nvb->port = port;
102         cxl_nvb->state = CXL_NVB_NEW;
103         device_initialize(dev);
104         lockdep_set_class(&dev->mutex, &cxl_nvdimm_bridge_key);
105         device_set_pm_not_required(dev);
106         dev->parent = &port->dev;
107         dev->bus = &cxl_bus_type;
108         dev->type = &cxl_nvdimm_bridge_type;
109
110         return cxl_nvb;
111
112 err:
113         kfree(cxl_nvb);
114         return ERR_PTR(rc);
115 }
116
117 static void unregister_nvb(void *_cxl_nvb)
118 {
119         struct cxl_nvdimm_bridge *cxl_nvb = _cxl_nvb;
120         bool flush;
121
122         /*
123          * If the bridge was ever activated then there might be in-flight state
124          * work to flush. Once the state has been changed to 'dead' then no new
125          * work can be queued by user-triggered bind.
126          */
127         device_lock(&cxl_nvb->dev);
128         flush = cxl_nvb->state != CXL_NVB_NEW;
129         cxl_nvb->state = CXL_NVB_DEAD;
130         device_unlock(&cxl_nvb->dev);
131
132         /*
133          * Even though the device core will trigger device_release_driver()
134          * before the unregister, it does not know about the fact that
135          * cxl_nvdimm_bridge_driver defers ->remove() work. So, do the driver
136          * release not and flush it before tearing down the nvdimm device
137          * hierarchy.
138          */
139         device_release_driver(&cxl_nvb->dev);
140         if (flush)
141                 flush_work(&cxl_nvb->state_work);
142         device_unregister(&cxl_nvb->dev);
143 }
144
145 /**
146  * devm_cxl_add_nvdimm_bridge() - add the root of a LIBNVDIMM topology
147  * @host: platform firmware root device
148  * @port: CXL port at the root of a CXL topology
149  *
150  * Return: bridge device that can host cxl_nvdimm objects
151  */
152 struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
153                                                      struct cxl_port *port)
154 {
155         struct cxl_nvdimm_bridge *cxl_nvb;
156         struct device *dev;
157         int rc;
158
159         if (!IS_ENABLED(CONFIG_CXL_PMEM))
160                 return ERR_PTR(-ENXIO);
161
162         cxl_nvb = cxl_nvdimm_bridge_alloc(port);
163         if (IS_ERR(cxl_nvb))
164                 return cxl_nvb;
165
166         dev = &cxl_nvb->dev;
167         rc = dev_set_name(dev, "nvdimm-bridge%d", cxl_nvb->id);
168         if (rc)
169                 goto err;
170
171         rc = device_add(dev);
172         if (rc)
173                 goto err;
174
175         rc = devm_add_action_or_reset(host, unregister_nvb, cxl_nvb);
176         if (rc)
177                 return ERR_PTR(rc);
178
179         return cxl_nvb;
180
181 err:
182         put_device(dev);
183         return ERR_PTR(rc);
184 }
185 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
186
187 static void cxl_nvdimm_release(struct device *dev)
188 {
189         struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
190
191         kfree(cxl_nvd);
192 }
193
194 static const struct attribute_group *cxl_nvdimm_attribute_groups[] = {
195         &cxl_base_attribute_group,
196         NULL,
197 };
198
199 const struct device_type cxl_nvdimm_type = {
200         .name = "cxl_nvdimm",
201         .release = cxl_nvdimm_release,
202         .groups = cxl_nvdimm_attribute_groups,
203 };
204
205 bool is_cxl_nvdimm(struct device *dev)
206 {
207         return dev->type == &cxl_nvdimm_type;
208 }
209 EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
210
211 struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
212 {
213         if (dev_WARN_ONCE(dev, !is_cxl_nvdimm(dev),
214                           "not a cxl_nvdimm device\n"))
215                 return NULL;
216         return container_of(dev, struct cxl_nvdimm, dev);
217 }
218 EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
219
220 static struct lock_class_key cxl_nvdimm_key;
221
222 static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
223 {
224         struct cxl_nvdimm *cxl_nvd;
225         struct device *dev;
226
227         cxl_nvd = kzalloc(sizeof(*cxl_nvd), GFP_KERNEL);
228         if (!cxl_nvd)
229                 return ERR_PTR(-ENOMEM);
230
231         dev = &cxl_nvd->dev;
232         cxl_nvd->cxlmd = cxlmd;
233         device_initialize(dev);
234         lockdep_set_class(&dev->mutex, &cxl_nvdimm_key);
235         device_set_pm_not_required(dev);
236         dev->parent = &cxlmd->dev;
237         dev->bus = &cxl_bus_type;
238         dev->type = &cxl_nvdimm_type;
239
240         return cxl_nvd;
241 }
242
243 static void cxl_nvd_unregister(void *dev)
244 {
245         device_unregister(dev);
246 }
247
248 /**
249  * devm_cxl_add_nvdimm() - add a bridge between a cxl_memdev and an nvdimm
250  * @host: same host as @cxlmd
251  * @cxlmd: cxl_memdev instance that will perform LIBNVDIMM operations
252  *
253  * Return: 0 on success negative error code on failure.
254  */
255 int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd)
256 {
257         struct cxl_nvdimm *cxl_nvd;
258         struct device *dev;
259         int rc;
260
261         cxl_nvd = cxl_nvdimm_alloc(cxlmd);
262         if (IS_ERR(cxl_nvd))
263                 return PTR_ERR(cxl_nvd);
264
265         dev = &cxl_nvd->dev;
266         rc = dev_set_name(dev, "pmem%d", cxlmd->id);
267         if (rc)
268                 goto err;
269
270         rc = device_add(dev);
271         if (rc)
272                 goto err;
273
274         dev_dbg(host, "%s: register %s\n", dev_name(dev->parent),
275                 dev_name(dev));
276
277         return devm_add_action_or_reset(host, cxl_nvd_unregister, dev);
278
279 err:
280         put_device(dev);
281         return rc;
282 }
283 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);