1 // SPDX-License-Identifier: GPL-2.0
3 * nvmem framework core.
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
9 #include <linux/device.h>
10 #include <linux/export.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
19 #include <linux/slab.h>
28 struct device_node *np;
29 struct nvmem_device *nvmem;
30 struct list_head node;
33 static DEFINE_MUTEX(nvmem_mutex);
34 static DEFINE_IDA(nvmem_ida);
36 static DEFINE_MUTEX(nvmem_cell_mutex);
37 static LIST_HEAD(nvmem_cell_tables);
39 static DEFINE_MUTEX(nvmem_lookup_mutex);
40 static LIST_HEAD(nvmem_lookup_list);
42 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
45 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 void *val, size_t bytes)
49 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
54 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 void *val, size_t bytes)
58 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
63 static void nvmem_release(struct device *dev)
65 struct nvmem_device *nvmem = to_nvmem_device(dev);
67 ida_simple_remove(&nvmem_ida, nvmem->id);
71 static const struct device_type nvmem_provider_type = {
72 .release = nvmem_release,
75 static struct bus_type nvmem_bus_type = {
79 static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
86 d = bus_find_device_by_of_node(&nvmem_bus_type, nvmem_np);
91 return to_nvmem_device(d);
94 static struct nvmem_device *nvmem_find(const char *name)
98 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
103 return to_nvmem_device(d);
106 static void nvmem_cell_drop(struct nvmem_cell *cell)
108 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
109 mutex_lock(&nvmem_mutex);
110 list_del(&cell->node);
111 mutex_unlock(&nvmem_mutex);
112 of_node_put(cell->np);
113 kfree_const(cell->name);
117 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
119 struct nvmem_cell *cell, *p;
121 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
122 nvmem_cell_drop(cell);
125 static void nvmem_cell_add(struct nvmem_cell *cell)
127 mutex_lock(&nvmem_mutex);
128 list_add_tail(&cell->node, &cell->nvmem->cells);
129 mutex_unlock(&nvmem_mutex);
130 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
133 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
134 const struct nvmem_cell_info *info,
135 struct nvmem_cell *cell)
138 cell->offset = info->offset;
139 cell->bytes = info->bytes;
140 cell->name = info->name;
142 cell->bit_offset = info->bit_offset;
143 cell->nbits = info->nbits;
146 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
149 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
151 "cell %s unaligned to nvmem stride %d\n",
152 cell->name ?: "<unknown>", nvmem->stride);
159 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
160 const struct nvmem_cell_info *info,
161 struct nvmem_cell *cell)
165 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
169 cell->name = kstrdup_const(info->name, GFP_KERNEL);
177 * nvmem_add_cells() - Add cell information to an nvmem device
179 * @nvmem: nvmem device to add cells to.
180 * @info: nvmem cell info to add to the device
181 * @ncells: number of cells in info
183 * Return: 0 or negative error code on failure.
185 static int nvmem_add_cells(struct nvmem_device *nvmem,
186 const struct nvmem_cell_info *info,
189 struct nvmem_cell **cells;
192 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
196 for (i = 0; i < ncells; i++) {
197 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
203 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
209 nvmem_cell_add(cells[i]);
212 /* remove tmp array */
218 nvmem_cell_drop(cells[i]);
226 * nvmem_register_notifier() - Register a notifier block for nvmem events.
228 * @nb: notifier block to be called on nvmem events.
230 * Return: 0 on success, negative error number on failure.
232 int nvmem_register_notifier(struct notifier_block *nb)
234 return blocking_notifier_chain_register(&nvmem_notifier, nb);
236 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
239 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
241 * @nb: notifier block to be unregistered.
243 * Return: 0 on success, negative error number on failure.
245 int nvmem_unregister_notifier(struct notifier_block *nb)
247 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
249 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
251 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
253 const struct nvmem_cell_info *info;
254 struct nvmem_cell_table *table;
255 struct nvmem_cell *cell;
258 mutex_lock(&nvmem_cell_mutex);
259 list_for_each_entry(table, &nvmem_cell_tables, node) {
260 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
261 for (i = 0; i < table->ncells; i++) {
262 info = &table->cells[i];
264 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
270 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
278 nvmem_cell_add(cell);
284 mutex_unlock(&nvmem_cell_mutex);
288 static struct nvmem_cell *
289 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
291 struct nvmem_cell *iter, *cell = NULL;
293 mutex_lock(&nvmem_mutex);
294 list_for_each_entry(iter, &nvmem->cells, node) {
295 if (strcmp(cell_id, iter->name) == 0) {
300 mutex_unlock(&nvmem_mutex);
305 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
307 struct device_node *parent, *child;
308 struct device *dev = &nvmem->dev;
309 struct nvmem_cell *cell;
313 parent = dev->of_node;
315 for_each_child_of_node(parent, child) {
316 addr = of_get_property(child, "reg", &len);
319 if (len < 2 * sizeof(u32)) {
320 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
325 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
332 cell->offset = be32_to_cpup(addr++);
333 cell->bytes = be32_to_cpup(addr);
334 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
336 addr = of_get_property(child, "bits", &len);
337 if (addr && len == (2 * sizeof(u32))) {
338 cell->bit_offset = be32_to_cpup(addr++);
339 cell->nbits = be32_to_cpup(addr);
343 cell->bytes = DIV_ROUND_UP(
344 cell->nbits + cell->bit_offset,
347 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
348 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
349 cell->name, nvmem->stride);
350 /* Cells already added will be freed later. */
351 kfree_const(cell->name);
357 cell->np = of_node_get(child);
358 nvmem_cell_add(cell);
365 * nvmem_register() - Register a nvmem device for given nvmem_config.
366 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
368 * @config: nvmem device configuration with which nvmem device is created.
370 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
374 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
376 struct nvmem_device *nvmem;
380 return ERR_PTR(-EINVAL);
382 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
384 return ERR_PTR(-ENOMEM);
386 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
389 return ERR_PTR(rval);
392 kref_init(&nvmem->refcnt);
393 INIT_LIST_HEAD(&nvmem->cells);
396 nvmem->owner = config->owner;
397 if (!nvmem->owner && config->dev->driver)
398 nvmem->owner = config->dev->driver->owner;
399 nvmem->stride = config->stride ?: 1;
400 nvmem->word_size = config->word_size ?: 1;
401 nvmem->size = config->size;
402 nvmem->dev.type = &nvmem_provider_type;
403 nvmem->dev.bus = &nvmem_bus_type;
404 nvmem->dev.parent = config->dev;
405 nvmem->priv = config->priv;
406 nvmem->type = config->type;
407 nvmem->reg_read = config->reg_read;
408 nvmem->reg_write = config->reg_write;
409 if (!config->no_of_node)
410 nvmem->dev.of_node = config->dev->of_node;
412 if (config->id == -1 && config->name) {
413 dev_set_name(&nvmem->dev, "%s", config->name);
415 dev_set_name(&nvmem->dev, "%s%d",
416 config->name ? : "nvmem",
417 config->name ? config->id : nvmem->id);
420 nvmem->read_only = device_property_present(config->dev, "read-only") ||
421 config->read_only || !nvmem->reg_write;
423 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
425 device_initialize(&nvmem->dev);
427 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
429 rval = device_add(&nvmem->dev);
433 if (config->compat) {
434 rval = nvmem_sysfs_setup_compat(nvmem, config);
440 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
442 goto err_remove_cells;
445 rval = nvmem_add_cells_from_table(nvmem);
447 goto err_remove_cells;
449 rval = nvmem_add_cells_from_of(nvmem);
451 goto err_remove_cells;
453 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
458 nvmem_device_remove_all_cells(nvmem);
460 nvmem_sysfs_remove_compat(nvmem, config);
462 device_del(&nvmem->dev);
464 put_device(&nvmem->dev);
466 return ERR_PTR(rval);
468 EXPORT_SYMBOL_GPL(nvmem_register);
470 static void nvmem_device_release(struct kref *kref)
472 struct nvmem_device *nvmem;
474 nvmem = container_of(kref, struct nvmem_device, refcnt);
476 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
478 if (nvmem->flags & FLAG_COMPAT)
479 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
481 nvmem_device_remove_all_cells(nvmem);
482 device_del(&nvmem->dev);
483 put_device(&nvmem->dev);
487 * nvmem_unregister() - Unregister previously registered nvmem device
489 * @nvmem: Pointer to previously registered nvmem device.
491 void nvmem_unregister(struct nvmem_device *nvmem)
493 kref_put(&nvmem->refcnt, nvmem_device_release);
495 EXPORT_SYMBOL_GPL(nvmem_unregister);
497 static void devm_nvmem_release(struct device *dev, void *res)
499 nvmem_unregister(*(struct nvmem_device **)res);
503 * devm_nvmem_register() - Register a managed nvmem device for given
505 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
507 * @dev: Device that uses the nvmem device.
508 * @config: nvmem device configuration with which nvmem device is created.
510 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
513 struct nvmem_device *devm_nvmem_register(struct device *dev,
514 const struct nvmem_config *config)
516 struct nvmem_device **ptr, *nvmem;
518 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
520 return ERR_PTR(-ENOMEM);
522 nvmem = nvmem_register(config);
524 if (!IS_ERR(nvmem)) {
526 devres_add(dev, ptr);
533 EXPORT_SYMBOL_GPL(devm_nvmem_register);
535 static int devm_nvmem_match(struct device *dev, void *res, void *data)
537 struct nvmem_device **r = res;
543 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
546 * @dev: Device that uses the nvmem device.
547 * @nvmem: Pointer to previously registered nvmem device.
549 * Return: Will be an negative on error or a zero on success.
551 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
553 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
555 EXPORT_SYMBOL(devm_nvmem_unregister);
557 static struct nvmem_device *__nvmem_device_get(struct device_node *np,
558 const char *nvmem_name)
560 struct nvmem_device *nvmem = NULL;
562 mutex_lock(&nvmem_mutex);
563 nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
564 mutex_unlock(&nvmem_mutex);
566 return ERR_PTR(-EPROBE_DEFER);
568 if (!try_module_get(nvmem->owner)) {
570 "could not increase module refcount for cell %s\n",
571 nvmem_dev_name(nvmem));
573 put_device(&nvmem->dev);
574 return ERR_PTR(-EINVAL);
577 kref_get(&nvmem->refcnt);
582 static void __nvmem_device_put(struct nvmem_device *nvmem)
584 put_device(&nvmem->dev);
585 module_put(nvmem->owner);
586 kref_put(&nvmem->refcnt, nvmem_device_release);
589 #if IS_ENABLED(CONFIG_OF)
591 * of_nvmem_device_get() - Get nvmem device from a given id
593 * @np: Device tree node that uses the nvmem device.
594 * @id: nvmem name from nvmem-names property.
596 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
599 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
602 struct device_node *nvmem_np;
606 index = of_property_match_string(np, "nvmem-names", id);
608 nvmem_np = of_parse_phandle(np, "nvmem", index);
610 return ERR_PTR(-ENOENT);
612 return __nvmem_device_get(nvmem_np, NULL);
614 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
618 * nvmem_device_get() - Get nvmem device from a given id
620 * @dev: Device that uses the nvmem device.
621 * @dev_name: name of the requested nvmem device.
623 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
626 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
628 if (dev->of_node) { /* try dt first */
629 struct nvmem_device *nvmem;
631 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
633 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
638 return __nvmem_device_get(NULL, dev_name);
640 EXPORT_SYMBOL_GPL(nvmem_device_get);
642 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
644 struct nvmem_device **nvmem = res;
646 if (WARN_ON(!nvmem || !*nvmem))
649 return *nvmem == data;
652 static void devm_nvmem_device_release(struct device *dev, void *res)
654 nvmem_device_put(*(struct nvmem_device **)res);
658 * devm_nvmem_device_put() - put alredy got nvmem device
660 * @dev: Device that uses the nvmem device.
661 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
662 * that needs to be released.
664 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
668 ret = devres_release(dev, devm_nvmem_device_release,
669 devm_nvmem_device_match, nvmem);
673 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
676 * nvmem_device_put() - put alredy got nvmem device
678 * @nvmem: pointer to nvmem device that needs to be released.
680 void nvmem_device_put(struct nvmem_device *nvmem)
682 __nvmem_device_put(nvmem);
684 EXPORT_SYMBOL_GPL(nvmem_device_put);
687 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
689 * @dev: Device that requests the nvmem device.
690 * @id: name id for the requested nvmem device.
692 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
693 * on success. The nvmem_cell will be freed by the automatically once the
696 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
698 struct nvmem_device **ptr, *nvmem;
700 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
702 return ERR_PTR(-ENOMEM);
704 nvmem = nvmem_device_get(dev, id);
705 if (!IS_ERR(nvmem)) {
707 devres_add(dev, ptr);
714 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
716 static struct nvmem_cell *
717 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
719 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
720 struct nvmem_cell_lookup *lookup;
721 struct nvmem_device *nvmem;
725 return ERR_PTR(-EINVAL);
727 dev_id = dev_name(dev);
729 mutex_lock(&nvmem_lookup_mutex);
731 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
732 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
733 (strcmp(lookup->con_id, con_id) == 0)) {
734 /* This is the right entry. */
735 nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
737 /* Provider may not be registered yet. */
738 cell = ERR_CAST(nvmem);
742 cell = nvmem_find_cell_by_name(nvmem,
745 __nvmem_device_put(nvmem);
746 cell = ERR_PTR(-ENOENT);
752 mutex_unlock(&nvmem_lookup_mutex);
756 #if IS_ENABLED(CONFIG_OF)
757 static struct nvmem_cell *
758 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
760 struct nvmem_cell *iter, *cell = NULL;
762 mutex_lock(&nvmem_mutex);
763 list_for_each_entry(iter, &nvmem->cells, node) {
764 if (np == iter->np) {
769 mutex_unlock(&nvmem_mutex);
775 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
777 * @np: Device tree node that uses the nvmem cell.
778 * @id: nvmem cell name from nvmem-cell-names property, or NULL
779 * for the cell at index 0 (the lone cell with no accompanying
780 * nvmem-cell-names property).
782 * Return: Will be an ERR_PTR() on error or a valid pointer
783 * to a struct nvmem_cell. The nvmem_cell will be freed by the
786 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
788 struct device_node *cell_np, *nvmem_np;
789 struct nvmem_device *nvmem;
790 struct nvmem_cell *cell;
793 /* if cell name exists, find index to the name */
795 index = of_property_match_string(np, "nvmem-cell-names", id);
797 cell_np = of_parse_phandle(np, "nvmem-cells", index);
799 return ERR_PTR(-ENOENT);
801 nvmem_np = of_get_next_parent(cell_np);
803 return ERR_PTR(-EINVAL);
805 nvmem = __nvmem_device_get(nvmem_np, NULL);
806 of_node_put(nvmem_np);
808 return ERR_CAST(nvmem);
810 cell = nvmem_find_cell_by_node(nvmem, cell_np);
812 __nvmem_device_put(nvmem);
813 return ERR_PTR(-ENOENT);
818 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
822 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
824 * @dev: Device that requests the nvmem cell.
825 * @id: nvmem cell name to get (this corresponds with the name from the
826 * nvmem-cell-names property for DT systems and with the con_id from
827 * the lookup entry for non-DT systems).
829 * Return: Will be an ERR_PTR() on error or a valid pointer
830 * to a struct nvmem_cell. The nvmem_cell will be freed by the
833 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
835 struct nvmem_cell *cell;
837 if (dev->of_node) { /* try dt first */
838 cell = of_nvmem_cell_get(dev->of_node, id);
839 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
843 /* NULL cell id only allowed for device tree; invalid otherwise */
845 return ERR_PTR(-EINVAL);
847 return nvmem_cell_get_from_lookup(dev, id);
849 EXPORT_SYMBOL_GPL(nvmem_cell_get);
851 static void devm_nvmem_cell_release(struct device *dev, void *res)
853 nvmem_cell_put(*(struct nvmem_cell **)res);
857 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
859 * @dev: Device that requests the nvmem cell.
860 * @id: nvmem cell name id to get.
862 * Return: Will be an ERR_PTR() on error or a valid pointer
863 * to a struct nvmem_cell. The nvmem_cell will be freed by the
864 * automatically once the device is freed.
866 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
868 struct nvmem_cell **ptr, *cell;
870 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
872 return ERR_PTR(-ENOMEM);
874 cell = nvmem_cell_get(dev, id);
877 devres_add(dev, ptr);
884 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
886 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
888 struct nvmem_cell **c = res;
890 if (WARN_ON(!c || !*c))
897 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
898 * from devm_nvmem_cell_get.
900 * @dev: Device that requests the nvmem cell.
901 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
903 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
907 ret = devres_release(dev, devm_nvmem_cell_release,
908 devm_nvmem_cell_match, cell);
912 EXPORT_SYMBOL(devm_nvmem_cell_put);
915 * nvmem_cell_put() - Release previously allocated nvmem cell.
917 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
919 void nvmem_cell_put(struct nvmem_cell *cell)
921 struct nvmem_device *nvmem = cell->nvmem;
923 __nvmem_device_put(nvmem);
925 EXPORT_SYMBOL_GPL(nvmem_cell_put);
927 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
930 int i, extra, bit_offset = cell->bit_offset;
937 /* setup rest of the bytes if any */
938 for (i = 1; i < cell->bytes; i++) {
939 /* Get bits from next byte and shift them towards msb */
940 *p |= *b << (BITS_PER_BYTE - bit_offset);
946 /* point to the msb */
947 p += cell->bytes - 1;
950 /* result fits in less bytes */
951 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
955 /* clear msb bits if any leftover in the last byte */
956 if (cell->nbits % BITS_PER_BYTE)
957 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
960 static int __nvmem_cell_read(struct nvmem_device *nvmem,
961 struct nvmem_cell *cell,
962 void *buf, size_t *len)
966 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
971 /* shift bits in-place */
972 if (cell->bit_offset || cell->nbits)
973 nvmem_shift_read_buffer_in_place(cell, buf);
982 * nvmem_cell_read() - Read a given nvmem cell
984 * @cell: nvmem cell to be read.
985 * @len: pointer to length of cell which will be populated on successful read;
988 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
989 * buffer should be freed by the consumer with a kfree().
991 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
993 struct nvmem_device *nvmem = cell->nvmem;
998 return ERR_PTR(-EINVAL);
1000 buf = kzalloc(cell->bytes, GFP_KERNEL);
1002 return ERR_PTR(-ENOMEM);
1004 rc = __nvmem_cell_read(nvmem, cell, buf, len);
1012 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1014 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1017 struct nvmem_device *nvmem = cell->nvmem;
1018 int i, rc, nbits, bit_offset = cell->bit_offset;
1019 u8 v, *p, *buf, *b, pbyte, pbits;
1021 nbits = cell->nbits;
1022 buf = kzalloc(cell->bytes, GFP_KERNEL);
1024 return ERR_PTR(-ENOMEM);
1026 memcpy(buf, _buf, len);
1033 /* setup the first byte with lsb bits from nvmem */
1034 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1037 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1039 /* setup rest of the byte if any */
1040 for (i = 1; i < cell->bytes; i++) {
1041 /* Get last byte bits and shift them towards lsb */
1042 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1050 /* if it's not end on byte boundary */
1051 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1052 /* setup the last byte with msb bits from nvmem */
1053 rc = nvmem_reg_read(nvmem,
1054 cell->offset + cell->bytes - 1, &v, 1);
1057 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1068 * nvmem_cell_write() - Write to a given nvmem cell
1070 * @cell: nvmem cell to be written.
1071 * @buf: Buffer to be written.
1072 * @len: length of buffer to be written to nvmem cell.
1074 * Return: length of bytes written or negative on failure.
1076 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1078 struct nvmem_device *nvmem = cell->nvmem;
1081 if (!nvmem || nvmem->read_only ||
1082 (cell->bit_offset == 0 && len != cell->bytes))
1085 if (cell->bit_offset || cell->nbits) {
1086 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1088 return PTR_ERR(buf);
1091 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1093 /* free the tmp buffer */
1094 if (cell->bit_offset || cell->nbits)
1102 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1105 * nvmem_cell_read_u16() - Read a cell value as an u16
1107 * @dev: Device that requests the nvmem cell.
1108 * @cell_id: Name of nvmem cell to read.
1109 * @val: pointer to output value.
1111 * Return: 0 on success or negative errno.
1113 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1115 struct nvmem_cell *cell;
1119 cell = nvmem_cell_get(dev, cell_id);
1121 return PTR_ERR(cell);
1123 buf = nvmem_cell_read(cell, &len);
1125 nvmem_cell_put(cell);
1126 return PTR_ERR(buf);
1128 if (len != sizeof(*val)) {
1130 nvmem_cell_put(cell);
1133 memcpy(val, buf, sizeof(*val));
1135 nvmem_cell_put(cell);
1139 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1142 * nvmem_cell_read_u32() - Read a cell value as an u32
1144 * @dev: Device that requests the nvmem cell.
1145 * @cell_id: Name of nvmem cell to read.
1146 * @val: pointer to output value.
1148 * Return: 0 on success or negative errno.
1150 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1152 struct nvmem_cell *cell;
1156 cell = nvmem_cell_get(dev, cell_id);
1158 return PTR_ERR(cell);
1160 buf = nvmem_cell_read(cell, &len);
1162 nvmem_cell_put(cell);
1163 return PTR_ERR(buf);
1165 if (len != sizeof(*val)) {
1167 nvmem_cell_put(cell);
1170 memcpy(val, buf, sizeof(*val));
1173 nvmem_cell_put(cell);
1176 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1179 * nvmem_device_cell_read() - Read a given nvmem device and cell
1181 * @nvmem: nvmem device to read from.
1182 * @info: nvmem cell info to be read.
1183 * @buf: buffer pointer which will be populated on successful read.
1185 * Return: length of successful bytes read on success and negative
1186 * error code on error.
1188 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1189 struct nvmem_cell_info *info, void *buf)
1191 struct nvmem_cell cell;
1198 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1202 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1208 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1211 * nvmem_device_cell_write() - Write cell to a given nvmem device
1213 * @nvmem: nvmem device to be written to.
1214 * @info: nvmem cell info to be written.
1215 * @buf: buffer to be written to cell.
1217 * Return: length of bytes written or negative error code on failure.
1219 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1220 struct nvmem_cell_info *info, void *buf)
1222 struct nvmem_cell cell;
1228 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1232 return nvmem_cell_write(&cell, buf, cell.bytes);
1234 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1237 * nvmem_device_read() - Read from a given nvmem device
1239 * @nvmem: nvmem device to read from.
1240 * @offset: offset in nvmem device.
1241 * @bytes: number of bytes to read.
1242 * @buf: buffer pointer which will be populated on successful read.
1244 * Return: length of successful bytes read on success and negative
1245 * error code on error.
1247 int nvmem_device_read(struct nvmem_device *nvmem,
1248 unsigned int offset,
1249 size_t bytes, void *buf)
1256 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1263 EXPORT_SYMBOL_GPL(nvmem_device_read);
1266 * nvmem_device_write() - Write cell to a given nvmem device
1268 * @nvmem: nvmem device to be written to.
1269 * @offset: offset in nvmem device.
1270 * @bytes: number of bytes to write.
1271 * @buf: buffer to be written.
1273 * Return: length of bytes written or negative error code on failure.
1275 int nvmem_device_write(struct nvmem_device *nvmem,
1276 unsigned int offset,
1277 size_t bytes, void *buf)
1284 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1292 EXPORT_SYMBOL_GPL(nvmem_device_write);
1295 * nvmem_add_cell_table() - register a table of cell info entries
1297 * @table: table of cell info entries
1299 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1301 mutex_lock(&nvmem_cell_mutex);
1302 list_add_tail(&table->node, &nvmem_cell_tables);
1303 mutex_unlock(&nvmem_cell_mutex);
1305 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1308 * nvmem_del_cell_table() - remove a previously registered cell info table
1310 * @table: table of cell info entries
1312 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1314 mutex_lock(&nvmem_cell_mutex);
1315 list_del(&table->node);
1316 mutex_unlock(&nvmem_cell_mutex);
1318 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1321 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1323 * @entries: array of cell lookup entries
1324 * @nentries: number of cell lookup entries in the array
1326 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1330 mutex_lock(&nvmem_lookup_mutex);
1331 for (i = 0; i < nentries; i++)
1332 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1333 mutex_unlock(&nvmem_lookup_mutex);
1335 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1338 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1341 * @entries: array of cell lookup entries
1342 * @nentries: number of cell lookup entries in the array
1344 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1348 mutex_lock(&nvmem_lookup_mutex);
1349 for (i = 0; i < nentries; i++)
1350 list_del(&entries[i].node);
1351 mutex_unlock(&nvmem_lookup_mutex);
1353 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1356 * nvmem_dev_name() - Get the name of a given nvmem device.
1358 * @nvmem: nvmem device.
1360 * Return: name of the nvmem device.
1362 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1364 return dev_name(&nvmem->dev);
1366 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1368 static int __init nvmem_init(void)
1370 return bus_register(&nvmem_bus_type);
1373 static void __exit nvmem_exit(void)
1375 bus_unregister(&nvmem_bus_type);
1378 subsys_initcall(nvmem_init);
1379 module_exit(nvmem_exit);
1381 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1382 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1383 MODULE_DESCRIPTION("nvmem Driver Core");
1384 MODULE_LICENSE("GPL v2");