4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/property.h>
35 #include <linux/export.h>
36 #include <linux/sched/rt.h>
37 #include <uapi/linux/sched/types.h>
38 #include <linux/delay.h>
39 #include <linux/kthread.h>
40 #include <linux/ioport.h>
41 #include <linux/acpi.h>
42 #include <linux/highmem.h>
43 #include <linux/idr.h>
44 #include <linux/platform_data/x86/apple.h>
46 #define CREATE_TRACE_POINTS
47 #include <trace/events/spi.h>
49 static DEFINE_IDR(spi_master_idr);
51 static void spidev_release(struct device *dev)
53 struct spi_device *spi = to_spi_device(dev);
55 /* spi controllers may cleanup for released devices */
56 if (spi->controller->cleanup)
57 spi->controller->cleanup(spi);
59 spi_controller_put(spi->controller);
64 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
66 const struct spi_device *spi = to_spi_device(dev);
69 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
73 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
75 static DEVICE_ATTR_RO(modalias);
77 #define SPI_STATISTICS_ATTRS(field, file) \
78 static ssize_t spi_controller_##field##_show(struct device *dev, \
79 struct device_attribute *attr, \
82 struct spi_controller *ctlr = container_of(dev, \
83 struct spi_controller, dev); \
84 return spi_statistics_##field##_show(&ctlr->statistics, buf); \
86 static struct device_attribute dev_attr_spi_controller_##field = { \
87 .attr = { .name = file, .mode = 0444 }, \
88 .show = spi_controller_##field##_show, \
90 static ssize_t spi_device_##field##_show(struct device *dev, \
91 struct device_attribute *attr, \
94 struct spi_device *spi = to_spi_device(dev); \
95 return spi_statistics_##field##_show(&spi->statistics, buf); \
97 static struct device_attribute dev_attr_spi_device_##field = { \
98 .attr = { .name = file, .mode = 0444 }, \
99 .show = spi_device_##field##_show, \
102 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
103 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
106 unsigned long flags; \
108 spin_lock_irqsave(&stat->lock, flags); \
109 len = sprintf(buf, format_string, stat->field); \
110 spin_unlock_irqrestore(&stat->lock, flags); \
113 SPI_STATISTICS_ATTRS(name, file)
115 #define SPI_STATISTICS_SHOW(field, format_string) \
116 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
117 field, format_string)
119 SPI_STATISTICS_SHOW(messages, "%lu");
120 SPI_STATISTICS_SHOW(transfers, "%lu");
121 SPI_STATISTICS_SHOW(errors, "%lu");
122 SPI_STATISTICS_SHOW(timedout, "%lu");
124 SPI_STATISTICS_SHOW(spi_sync, "%lu");
125 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
126 SPI_STATISTICS_SHOW(spi_async, "%lu");
128 SPI_STATISTICS_SHOW(bytes, "%llu");
129 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
130 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
132 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
133 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
134 "transfer_bytes_histo_" number, \
135 transfer_bytes_histo[index], "%lu")
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
147 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
148 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
149 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
150 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
151 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
152 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
154 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
156 static struct attribute *spi_dev_attrs[] = {
157 &dev_attr_modalias.attr,
161 static const struct attribute_group spi_dev_group = {
162 .attrs = spi_dev_attrs,
165 static struct attribute *spi_device_statistics_attrs[] = {
166 &dev_attr_spi_device_messages.attr,
167 &dev_attr_spi_device_transfers.attr,
168 &dev_attr_spi_device_errors.attr,
169 &dev_attr_spi_device_timedout.attr,
170 &dev_attr_spi_device_spi_sync.attr,
171 &dev_attr_spi_device_spi_sync_immediate.attr,
172 &dev_attr_spi_device_spi_async.attr,
173 &dev_attr_spi_device_bytes.attr,
174 &dev_attr_spi_device_bytes_rx.attr,
175 &dev_attr_spi_device_bytes_tx.attr,
176 &dev_attr_spi_device_transfer_bytes_histo0.attr,
177 &dev_attr_spi_device_transfer_bytes_histo1.attr,
178 &dev_attr_spi_device_transfer_bytes_histo2.attr,
179 &dev_attr_spi_device_transfer_bytes_histo3.attr,
180 &dev_attr_spi_device_transfer_bytes_histo4.attr,
181 &dev_attr_spi_device_transfer_bytes_histo5.attr,
182 &dev_attr_spi_device_transfer_bytes_histo6.attr,
183 &dev_attr_spi_device_transfer_bytes_histo7.attr,
184 &dev_attr_spi_device_transfer_bytes_histo8.attr,
185 &dev_attr_spi_device_transfer_bytes_histo9.attr,
186 &dev_attr_spi_device_transfer_bytes_histo10.attr,
187 &dev_attr_spi_device_transfer_bytes_histo11.attr,
188 &dev_attr_spi_device_transfer_bytes_histo12.attr,
189 &dev_attr_spi_device_transfer_bytes_histo13.attr,
190 &dev_attr_spi_device_transfer_bytes_histo14.attr,
191 &dev_attr_spi_device_transfer_bytes_histo15.attr,
192 &dev_attr_spi_device_transfer_bytes_histo16.attr,
193 &dev_attr_spi_device_transfers_split_maxsize.attr,
197 static const struct attribute_group spi_device_statistics_group = {
198 .name = "statistics",
199 .attrs = spi_device_statistics_attrs,
202 static const struct attribute_group *spi_dev_groups[] = {
204 &spi_device_statistics_group,
208 static struct attribute *spi_controller_statistics_attrs[] = {
209 &dev_attr_spi_controller_messages.attr,
210 &dev_attr_spi_controller_transfers.attr,
211 &dev_attr_spi_controller_errors.attr,
212 &dev_attr_spi_controller_timedout.attr,
213 &dev_attr_spi_controller_spi_sync.attr,
214 &dev_attr_spi_controller_spi_sync_immediate.attr,
215 &dev_attr_spi_controller_spi_async.attr,
216 &dev_attr_spi_controller_bytes.attr,
217 &dev_attr_spi_controller_bytes_rx.attr,
218 &dev_attr_spi_controller_bytes_tx.attr,
219 &dev_attr_spi_controller_transfer_bytes_histo0.attr,
220 &dev_attr_spi_controller_transfer_bytes_histo1.attr,
221 &dev_attr_spi_controller_transfer_bytes_histo2.attr,
222 &dev_attr_spi_controller_transfer_bytes_histo3.attr,
223 &dev_attr_spi_controller_transfer_bytes_histo4.attr,
224 &dev_attr_spi_controller_transfer_bytes_histo5.attr,
225 &dev_attr_spi_controller_transfer_bytes_histo6.attr,
226 &dev_attr_spi_controller_transfer_bytes_histo7.attr,
227 &dev_attr_spi_controller_transfer_bytes_histo8.attr,
228 &dev_attr_spi_controller_transfer_bytes_histo9.attr,
229 &dev_attr_spi_controller_transfer_bytes_histo10.attr,
230 &dev_attr_spi_controller_transfer_bytes_histo11.attr,
231 &dev_attr_spi_controller_transfer_bytes_histo12.attr,
232 &dev_attr_spi_controller_transfer_bytes_histo13.attr,
233 &dev_attr_spi_controller_transfer_bytes_histo14.attr,
234 &dev_attr_spi_controller_transfer_bytes_histo15.attr,
235 &dev_attr_spi_controller_transfer_bytes_histo16.attr,
236 &dev_attr_spi_controller_transfers_split_maxsize.attr,
240 static const struct attribute_group spi_controller_statistics_group = {
241 .name = "statistics",
242 .attrs = spi_controller_statistics_attrs,
245 static const struct attribute_group *spi_master_groups[] = {
246 &spi_controller_statistics_group,
250 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
251 struct spi_transfer *xfer,
252 struct spi_controller *ctlr)
255 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
260 spin_lock_irqsave(&stats->lock, flags);
263 stats->transfer_bytes_histo[l2len]++;
265 stats->bytes += xfer->len;
266 if ((xfer->tx_buf) &&
267 (xfer->tx_buf != ctlr->dummy_tx))
268 stats->bytes_tx += xfer->len;
269 if ((xfer->rx_buf) &&
270 (xfer->rx_buf != ctlr->dummy_rx))
271 stats->bytes_rx += xfer->len;
273 spin_unlock_irqrestore(&stats->lock, flags);
275 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
277 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
278 * and the sysfs version makes coldplug work too.
281 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
282 const struct spi_device *sdev)
284 while (id->name[0]) {
285 if (!strcmp(sdev->modalias, id->name))
292 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
294 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
296 return spi_match_id(sdrv->id_table, sdev);
298 EXPORT_SYMBOL_GPL(spi_get_device_id);
300 static int spi_match_device(struct device *dev, struct device_driver *drv)
302 const struct spi_device *spi = to_spi_device(dev);
303 const struct spi_driver *sdrv = to_spi_driver(drv);
305 /* Attempt an OF style match */
306 if (of_driver_match_device(dev, drv))
310 if (acpi_driver_match_device(dev, drv))
314 return !!spi_match_id(sdrv->id_table, spi);
316 return strcmp(spi->modalias, drv->name) == 0;
319 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
321 const struct spi_device *spi = to_spi_device(dev);
324 rc = acpi_device_uevent_modalias(dev, env);
328 return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
331 struct bus_type spi_bus_type = {
333 .dev_groups = spi_dev_groups,
334 .match = spi_match_device,
335 .uevent = spi_uevent,
337 EXPORT_SYMBOL_GPL(spi_bus_type);
340 static int spi_drv_probe(struct device *dev)
342 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
343 struct spi_device *spi = to_spi_device(dev);
346 ret = of_clk_set_defaults(dev->of_node, false);
351 spi->irq = of_irq_get(dev->of_node, 0);
352 if (spi->irq == -EPROBE_DEFER)
353 return -EPROBE_DEFER;
358 ret = dev_pm_domain_attach(dev, true);
359 if (ret != -EPROBE_DEFER) {
360 ret = sdrv->probe(spi);
362 dev_pm_domain_detach(dev, true);
368 static int spi_drv_remove(struct device *dev)
370 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
373 ret = sdrv->remove(to_spi_device(dev));
374 dev_pm_domain_detach(dev, true);
379 static void spi_drv_shutdown(struct device *dev)
381 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
383 sdrv->shutdown(to_spi_device(dev));
387 * __spi_register_driver - register a SPI driver
388 * @owner: owner module of the driver to register
389 * @sdrv: the driver to register
392 * Return: zero on success, else a negative error code.
394 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
396 sdrv->driver.owner = owner;
397 sdrv->driver.bus = &spi_bus_type;
399 sdrv->driver.probe = spi_drv_probe;
401 sdrv->driver.remove = spi_drv_remove;
403 sdrv->driver.shutdown = spi_drv_shutdown;
404 return driver_register(&sdrv->driver);
406 EXPORT_SYMBOL_GPL(__spi_register_driver);
408 /*-------------------------------------------------------------------------*/
410 /* SPI devices should normally not be created by SPI device drivers; that
411 * would make them board-specific. Similarly with SPI controller drivers.
412 * Device registration normally goes into like arch/.../mach.../board-YYY.c
413 * with other readonly (flashable) information about mainboard devices.
417 struct list_head list;
418 struct spi_board_info board_info;
421 static LIST_HEAD(board_list);
422 static LIST_HEAD(spi_controller_list);
425 * Used to protect add/del opertion for board_info list and
426 * spi_controller list, and their matching process
427 * also used to protect object of type struct idr
429 static DEFINE_MUTEX(board_lock);
432 * Prevents addition of devices with same chip select and
433 * addition of devices below an unregistering controller.
435 static DEFINE_MUTEX(spi_add_lock);
438 * spi_alloc_device - Allocate a new SPI device
439 * @ctlr: Controller to which device is connected
442 * Allows a driver to allocate and initialize a spi_device without
443 * registering it immediately. This allows a driver to directly
444 * fill the spi_device with device parameters before calling
445 * spi_add_device() on it.
447 * Caller is responsible to call spi_add_device() on the returned
448 * spi_device structure to add it to the SPI controller. If the caller
449 * needs to discard the spi_device without adding it, then it should
450 * call spi_dev_put() on it.
452 * Return: a pointer to the new device, or NULL.
454 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
456 struct spi_device *spi;
458 if (!spi_controller_get(ctlr))
461 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
463 spi_controller_put(ctlr);
467 spi->master = spi->controller = ctlr;
468 spi->dev.parent = &ctlr->dev;
469 spi->dev.bus = &spi_bus_type;
470 spi->dev.release = spidev_release;
471 spi->cs_gpio = -ENOENT;
473 spin_lock_init(&spi->statistics.lock);
475 device_initialize(&spi->dev);
478 EXPORT_SYMBOL_GPL(spi_alloc_device);
480 static void spi_dev_set_name(struct spi_device *spi)
482 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
485 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
489 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
493 static int spi_dev_check(struct device *dev, void *data)
495 struct spi_device *spi = to_spi_device(dev);
496 struct spi_device *new_spi = data;
498 if (spi->controller == new_spi->controller &&
499 spi->chip_select == new_spi->chip_select)
505 * spi_add_device - Add spi_device allocated with spi_alloc_device
506 * @spi: spi_device to register
508 * Companion function to spi_alloc_device. Devices allocated with
509 * spi_alloc_device can be added onto the spi bus with this function.
511 * Return: 0 on success; negative errno on failure
513 int spi_add_device(struct spi_device *spi)
515 struct spi_controller *ctlr = spi->controller;
516 struct device *dev = ctlr->dev.parent;
519 /* Chipselects are numbered 0..max; validate. */
520 if (spi->chip_select >= ctlr->num_chipselect) {
521 dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
522 ctlr->num_chipselect);
526 /* Set the bus ID string */
527 spi_dev_set_name(spi);
529 /* We need to make sure there's no other device with this
530 * chipselect **BEFORE** we call setup(), else we'll trash
531 * its configuration. Lock against concurrent add() calls.
533 mutex_lock(&spi_add_lock);
535 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
537 dev_err(dev, "chipselect %d already in use\n",
542 /* Controller may unregister concurrently */
543 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
544 !device_is_registered(&ctlr->dev)) {
550 spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
552 /* Drivers may modify this initial i/o setup, but will
553 * normally rely on the device being setup. Devices
554 * using SPI_CS_HIGH can't coexist well otherwise...
556 status = spi_setup(spi);
558 dev_err(dev, "can't setup %s, status %d\n",
559 dev_name(&spi->dev), status);
563 /* Device may be bound to an active driver when this returns */
564 status = device_add(&spi->dev);
566 dev_err(dev, "can't add %s, status %d\n",
567 dev_name(&spi->dev), status);
569 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
572 mutex_unlock(&spi_add_lock);
575 EXPORT_SYMBOL_GPL(spi_add_device);
578 * spi_new_device - instantiate one new SPI device
579 * @ctlr: Controller to which device is connected
580 * @chip: Describes the SPI device
583 * On typical mainboards, this is purely internal; and it's not needed
584 * after board init creates the hard-wired devices. Some development
585 * platforms may not be able to use spi_register_board_info though, and
586 * this is exported so that for example a USB or parport based adapter
587 * driver could add devices (which it would learn about out-of-band).
589 * Return: the new device, or NULL.
591 struct spi_device *spi_new_device(struct spi_controller *ctlr,
592 struct spi_board_info *chip)
594 struct spi_device *proxy;
597 /* NOTE: caller did any chip->bus_num checks necessary.
599 * Also, unless we change the return value convention to use
600 * error-or-pointer (not NULL-or-pointer), troubleshootability
601 * suggests syslogged diagnostics are best here (ugh).
604 proxy = spi_alloc_device(ctlr);
608 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
610 proxy->chip_select = chip->chip_select;
611 proxy->max_speed_hz = chip->max_speed_hz;
612 proxy->mode = chip->mode;
613 proxy->irq = chip->irq;
614 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
615 proxy->dev.platform_data = (void *) chip->platform_data;
616 proxy->controller_data = chip->controller_data;
617 proxy->controller_state = NULL;
619 if (chip->properties) {
620 status = device_add_properties(&proxy->dev, chip->properties);
623 "failed to add properties to '%s': %d\n",
624 chip->modalias, status);
629 status = spi_add_device(proxy);
631 goto err_remove_props;
636 if (chip->properties)
637 device_remove_properties(&proxy->dev);
642 EXPORT_SYMBOL_GPL(spi_new_device);
645 * spi_unregister_device - unregister a single SPI device
646 * @spi: spi_device to unregister
648 * Start making the passed SPI device vanish. Normally this would be handled
649 * by spi_unregister_controller().
651 void spi_unregister_device(struct spi_device *spi)
656 if (spi->dev.of_node) {
657 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
658 of_node_put(spi->dev.of_node);
660 if (ACPI_COMPANION(&spi->dev))
661 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
662 device_unregister(&spi->dev);
664 EXPORT_SYMBOL_GPL(spi_unregister_device);
666 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
667 struct spi_board_info *bi)
669 struct spi_device *dev;
671 if (ctlr->bus_num != bi->bus_num)
674 dev = spi_new_device(ctlr, bi);
676 dev_err(ctlr->dev.parent, "can't create new device for %s\n",
681 * spi_register_board_info - register SPI devices for a given board
682 * @info: array of chip descriptors
683 * @n: how many descriptors are provided
686 * Board-specific early init code calls this (probably during arch_initcall)
687 * with segments of the SPI device table. Any device nodes are created later,
688 * after the relevant parent SPI controller (bus_num) is defined. We keep
689 * this table of devices forever, so that reloading a controller driver will
690 * not make Linux forget about these hard-wired devices.
692 * Other code can also call this, e.g. a particular add-on board might provide
693 * SPI devices through its expansion connector, so code initializing that board
694 * would naturally declare its SPI devices.
696 * The board info passed can safely be __initdata ... but be careful of
697 * any embedded pointers (platform_data, etc), they're copied as-is.
698 * Device properties are deep-copied though.
700 * Return: zero on success, else a negative error code.
702 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
704 struct boardinfo *bi;
710 bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
714 for (i = 0; i < n; i++, bi++, info++) {
715 struct spi_controller *ctlr;
717 memcpy(&bi->board_info, info, sizeof(*info));
718 if (info->properties) {
719 bi->board_info.properties =
720 property_entries_dup(info->properties);
721 if (IS_ERR(bi->board_info.properties))
722 return PTR_ERR(bi->board_info.properties);
725 mutex_lock(&board_lock);
726 list_add_tail(&bi->list, &board_list);
727 list_for_each_entry(ctlr, &spi_controller_list, list)
728 spi_match_controller_to_boardinfo(ctlr,
730 mutex_unlock(&board_lock);
736 /*-------------------------------------------------------------------------*/
738 static void spi_set_cs(struct spi_device *spi, bool enable)
740 if (spi->mode & SPI_CS_HIGH)
743 if (gpio_is_valid(spi->cs_gpio)) {
744 gpio_set_value(spi->cs_gpio, !enable);
745 /* Some SPI masters need both GPIO CS & slave_select */
746 if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
747 spi->controller->set_cs)
748 spi->controller->set_cs(spi, !enable);
749 } else if (spi->controller->set_cs) {
750 spi->controller->set_cs(spi, !enable);
754 #ifdef CONFIG_HAS_DMA
755 static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
756 struct sg_table *sgt, void *buf, size_t len,
757 enum dma_data_direction dir)
759 const bool vmalloced_buf = is_vmalloc_addr(buf);
760 unsigned int max_seg_size = dma_get_max_seg_size(dev);
761 #ifdef CONFIG_HIGHMEM
762 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
763 (unsigned long)buf < (PKMAP_BASE +
764 (LAST_PKMAP * PAGE_SIZE)));
766 const bool kmap_buf = false;
770 struct page *vm_page;
771 struct scatterlist *sg;
776 if (vmalloced_buf || kmap_buf) {
777 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
778 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
779 } else if (virt_addr_valid(buf)) {
780 desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
781 sgs = DIV_ROUND_UP(len, desc_len);
786 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
791 for (i = 0; i < sgs; i++) {
793 if (vmalloced_buf || kmap_buf) {
795 * Next scatterlist entry size is the minimum between
796 * the desc_len and the remaining buffer length that
799 min = min_t(size_t, desc_len,
801 PAGE_SIZE - offset_in_page(buf)));
803 vm_page = vmalloc_to_page(buf);
805 vm_page = kmap_to_page(buf);
810 sg_set_page(sg, vm_page,
811 min, offset_in_page(buf));
813 min = min_t(size_t, len, desc_len);
815 sg_set_buf(sg, sg_buf, min);
823 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
836 static void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
837 struct sg_table *sgt, enum dma_data_direction dir)
839 if (sgt->orig_nents) {
840 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
845 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
847 struct device *tx_dev, *rx_dev;
848 struct spi_transfer *xfer;
855 tx_dev = ctlr->dma_tx->device->dev;
857 tx_dev = ctlr->dev.parent;
860 rx_dev = ctlr->dma_rx->device->dev;
862 rx_dev = ctlr->dev.parent;
864 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
865 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
868 if (xfer->tx_buf != NULL) {
869 ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
870 (void *)xfer->tx_buf, xfer->len,
876 if (xfer->rx_buf != NULL) {
877 ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
878 xfer->rx_buf, xfer->len,
881 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
888 ctlr->cur_msg_mapped = true;
893 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
895 struct spi_transfer *xfer;
896 struct device *tx_dev, *rx_dev;
898 if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
902 tx_dev = ctlr->dma_tx->device->dev;
904 tx_dev = ctlr->dev.parent;
907 rx_dev = ctlr->dma_rx->device->dev;
909 rx_dev = ctlr->dev.parent;
911 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
912 if (!ctlr->can_dma(ctlr, msg->spi, xfer))
915 spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
916 spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
921 #else /* !CONFIG_HAS_DMA */
922 static inline int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
923 struct sg_table *sgt, void *buf, size_t len,
924 enum dma_data_direction dir)
929 static inline void spi_unmap_buf(struct spi_controller *ctlr,
930 struct device *dev, struct sg_table *sgt,
931 enum dma_data_direction dir)
935 static inline int __spi_map_msg(struct spi_controller *ctlr,
936 struct spi_message *msg)
941 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
942 struct spi_message *msg)
946 #endif /* !CONFIG_HAS_DMA */
948 static inline int spi_unmap_msg(struct spi_controller *ctlr,
949 struct spi_message *msg)
951 struct spi_transfer *xfer;
953 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
955 * Restore the original value of tx_buf or rx_buf if they are
958 if (xfer->tx_buf == ctlr->dummy_tx)
960 if (xfer->rx_buf == ctlr->dummy_rx)
964 return __spi_unmap_msg(ctlr, msg);
967 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
969 struct spi_transfer *xfer;
971 unsigned int max_tx, max_rx;
973 if (ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX)) {
977 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
978 if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
980 max_tx = max(xfer->len, max_tx);
981 if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
983 max_rx = max(xfer->len, max_rx);
987 tmp = krealloc(ctlr->dummy_tx, max_tx,
988 GFP_KERNEL | GFP_DMA);
991 ctlr->dummy_tx = tmp;
992 memset(tmp, 0, max_tx);
996 tmp = krealloc(ctlr->dummy_rx, max_rx,
997 GFP_KERNEL | GFP_DMA);
1000 ctlr->dummy_rx = tmp;
1003 if (max_tx || max_rx) {
1004 list_for_each_entry(xfer, &msg->transfers,
1009 xfer->tx_buf = ctlr->dummy_tx;
1011 xfer->rx_buf = ctlr->dummy_rx;
1016 return __spi_map_msg(ctlr, msg);
1020 * spi_transfer_one_message - Default implementation of transfer_one_message()
1022 * This is a standard implementation of transfer_one_message() for
1023 * drivers which implement a transfer_one() operation. It provides
1024 * standard handling of delays and chip select management.
1026 static int spi_transfer_one_message(struct spi_controller *ctlr,
1027 struct spi_message *msg)
1029 struct spi_transfer *xfer;
1030 bool keep_cs = false;
1032 unsigned long long ms = 1;
1033 struct spi_statistics *statm = &ctlr->statistics;
1034 struct spi_statistics *stats = &msg->spi->statistics;
1036 spi_set_cs(msg->spi, true);
1038 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1039 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1041 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1042 trace_spi_transfer_start(msg, xfer);
1044 spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1045 spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1047 if (xfer->tx_buf || xfer->rx_buf) {
1048 reinit_completion(&ctlr->xfer_completion);
1050 ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1052 SPI_STATISTICS_INCREMENT_FIELD(statm,
1054 SPI_STATISTICS_INCREMENT_FIELD(stats,
1056 dev_err(&msg->spi->dev,
1057 "SPI transfer failed: %d\n", ret);
1063 ms = 8LL * 1000LL * xfer->len;
1064 do_div(ms, xfer->speed_hz);
1065 ms += ms + 200; /* some tolerance */
1070 ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1071 msecs_to_jiffies(ms));
1075 SPI_STATISTICS_INCREMENT_FIELD(statm,
1077 SPI_STATISTICS_INCREMENT_FIELD(stats,
1079 dev_err(&msg->spi->dev,
1080 "SPI transfer timed out\n");
1081 msg->status = -ETIMEDOUT;
1085 dev_err(&msg->spi->dev,
1086 "Bufferless transfer has length %u\n",
1090 trace_spi_transfer_stop(msg, xfer);
1092 if (msg->status != -EINPROGRESS)
1095 if (xfer->delay_usecs) {
1096 u16 us = xfer->delay_usecs;
1101 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1104 if (xfer->cs_change) {
1105 if (list_is_last(&xfer->transfer_list,
1109 spi_set_cs(msg->spi, false);
1111 spi_set_cs(msg->spi, true);
1115 msg->actual_length += xfer->len;
1119 if (ret != 0 || !keep_cs)
1120 spi_set_cs(msg->spi, false);
1122 if (msg->status == -EINPROGRESS)
1125 if (msg->status && ctlr->handle_err)
1126 ctlr->handle_err(ctlr, msg);
1128 spi_finalize_current_message(ctlr);
1134 * spi_finalize_current_transfer - report completion of a transfer
1135 * @ctlr: the controller reporting completion
1137 * Called by SPI drivers using the core transfer_one_message()
1138 * implementation to notify it that the current interrupt driven
1139 * transfer has finished and the next one may be scheduled.
1141 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1143 complete(&ctlr->xfer_completion);
1145 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1148 * __spi_pump_messages - function which processes spi message queue
1149 * @ctlr: controller to process queue for
1150 * @in_kthread: true if we are in the context of the message pump thread
1152 * This function checks if there is any spi message in the queue that
1153 * needs processing and if so call out to the driver to initialize hardware
1154 * and transfer each message.
1156 * Note that it is called both from the kthread itself and also from
1157 * inside spi_sync(); the queue extraction handling at the top of the
1158 * function should deal with this safely.
1160 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1162 unsigned long flags;
1163 bool was_busy = false;
1167 spin_lock_irqsave(&ctlr->queue_lock, flags);
1169 /* Make sure we are not already running a message */
1170 if (ctlr->cur_msg) {
1171 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1175 /* If another context is idling the device then defer */
1177 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1178 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1182 /* Check if the queue is idle */
1183 if (list_empty(&ctlr->queue) || !ctlr->running) {
1185 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1189 /* Only do teardown in the thread */
1191 kthread_queue_work(&ctlr->kworker,
1192 &ctlr->pump_messages);
1193 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1198 ctlr->idling = true;
1199 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1201 kfree(ctlr->dummy_rx);
1202 ctlr->dummy_rx = NULL;
1203 kfree(ctlr->dummy_tx);
1204 ctlr->dummy_tx = NULL;
1205 if (ctlr->unprepare_transfer_hardware &&
1206 ctlr->unprepare_transfer_hardware(ctlr))
1208 "failed to unprepare transfer hardware\n");
1209 if (ctlr->auto_runtime_pm) {
1210 pm_runtime_mark_last_busy(ctlr->dev.parent);
1211 pm_runtime_put_autosuspend(ctlr->dev.parent);
1213 trace_spi_controller_idle(ctlr);
1215 spin_lock_irqsave(&ctlr->queue_lock, flags);
1216 ctlr->idling = false;
1217 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1221 /* Extract head of queue */
1223 list_first_entry(&ctlr->queue, struct spi_message, queue);
1225 list_del_init(&ctlr->cur_msg->queue);
1230 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1232 mutex_lock(&ctlr->io_mutex);
1234 if (!was_busy && ctlr->auto_runtime_pm) {
1235 ret = pm_runtime_get_sync(ctlr->dev.parent);
1237 pm_runtime_put_noidle(ctlr->dev.parent);
1238 dev_err(&ctlr->dev, "Failed to power device: %d\n",
1240 mutex_unlock(&ctlr->io_mutex);
1246 trace_spi_controller_busy(ctlr);
1248 if (!was_busy && ctlr->prepare_transfer_hardware) {
1249 ret = ctlr->prepare_transfer_hardware(ctlr);
1252 "failed to prepare transfer hardware\n");
1254 if (ctlr->auto_runtime_pm)
1255 pm_runtime_put(ctlr->dev.parent);
1256 mutex_unlock(&ctlr->io_mutex);
1261 trace_spi_message_start(ctlr->cur_msg);
1263 if (ctlr->prepare_message) {
1264 ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
1266 dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1268 ctlr->cur_msg->status = ret;
1269 spi_finalize_current_message(ctlr);
1272 ctlr->cur_msg_prepared = true;
1275 ret = spi_map_msg(ctlr, ctlr->cur_msg);
1277 ctlr->cur_msg->status = ret;
1278 spi_finalize_current_message(ctlr);
1282 ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
1285 "failed to transfer one message from queue\n");
1290 mutex_unlock(&ctlr->io_mutex);
1292 /* Prod the scheduler in case transfer_one() was busy waiting */
1298 * spi_pump_messages - kthread work function which processes spi message queue
1299 * @work: pointer to kthread work struct contained in the controller struct
1301 static void spi_pump_messages(struct kthread_work *work)
1303 struct spi_controller *ctlr =
1304 container_of(work, struct spi_controller, pump_messages);
1306 __spi_pump_messages(ctlr, true);
1309 static int spi_init_queue(struct spi_controller *ctlr)
1311 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1313 ctlr->running = false;
1316 kthread_init_worker(&ctlr->kworker);
1317 ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
1318 "%s", dev_name(&ctlr->dev));
1319 if (IS_ERR(ctlr->kworker_task)) {
1320 dev_err(&ctlr->dev, "failed to create message pump task\n");
1321 return PTR_ERR(ctlr->kworker_task);
1323 kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1326 * Controller config will indicate if this controller should run the
1327 * message pump with high (realtime) priority to reduce the transfer
1328 * latency on the bus by minimising the delay between a transfer
1329 * request and the scheduling of the message pump thread. Without this
1330 * setting the message pump thread will remain at default priority.
1333 dev_info(&ctlr->dev,
1334 "will run message pump with realtime priority\n");
1335 sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
1342 * spi_get_next_queued_message() - called by driver to check for queued
1344 * @ctlr: the controller to check for queued messages
1346 * If there are more messages in the queue, the next message is returned from
1349 * Return: the next message in the queue, else NULL if the queue is empty.
1351 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1353 struct spi_message *next;
1354 unsigned long flags;
1356 /* get a pointer to the next message, if any */
1357 spin_lock_irqsave(&ctlr->queue_lock, flags);
1358 next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
1360 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1364 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1367 * spi_finalize_current_message() - the current message is complete
1368 * @ctlr: the controller to return the message to
1370 * Called by the driver to notify the core that the message in the front of the
1371 * queue is complete and can be removed from the queue.
1373 void spi_finalize_current_message(struct spi_controller *ctlr)
1375 struct spi_message *mesg;
1376 unsigned long flags;
1379 spin_lock_irqsave(&ctlr->queue_lock, flags);
1380 mesg = ctlr->cur_msg;
1381 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1383 spi_unmap_msg(ctlr, mesg);
1385 /* In the prepare_messages callback the spi bus has the opportunity to
1386 * split a transfer to smaller chunks.
1387 * Release splited transfers here since spi_map_msg is done on the
1388 * splited transfers.
1390 spi_res_release(ctlr, mesg);
1392 if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
1393 ret = ctlr->unprepare_message(ctlr, mesg);
1395 dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
1400 spin_lock_irqsave(&ctlr->queue_lock, flags);
1401 ctlr->cur_msg = NULL;
1402 ctlr->cur_msg_prepared = false;
1403 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1404 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1406 trace_spi_message_done(mesg);
1410 mesg->complete(mesg->context);
1412 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1414 static int spi_start_queue(struct spi_controller *ctlr)
1416 unsigned long flags;
1418 spin_lock_irqsave(&ctlr->queue_lock, flags);
1420 if (ctlr->running || ctlr->busy) {
1421 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1425 ctlr->running = true;
1426 ctlr->cur_msg = NULL;
1427 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1429 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1434 static int spi_stop_queue(struct spi_controller *ctlr)
1436 unsigned long flags;
1437 unsigned limit = 500;
1440 spin_lock_irqsave(&ctlr->queue_lock, flags);
1443 * This is a bit lame, but is optimized for the common execution path.
1444 * A wait_queue on the ctlr->busy could be used, but then the common
1445 * execution path (pump_messages) would be required to call wake_up or
1446 * friends on every SPI message. Do this instead.
1448 while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
1449 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1450 usleep_range(10000, 11000);
1451 spin_lock_irqsave(&ctlr->queue_lock, flags);
1454 if (!list_empty(&ctlr->queue) || ctlr->busy)
1457 ctlr->running = false;
1459 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1462 dev_warn(&ctlr->dev, "could not stop message queue\n");
1468 static int spi_destroy_queue(struct spi_controller *ctlr)
1472 ret = spi_stop_queue(ctlr);
1475 * kthread_flush_worker will block until all work is done.
1476 * If the reason that stop_queue timed out is that the work will never
1477 * finish, then it does no good to call flush/stop thread, so
1481 dev_err(&ctlr->dev, "problem destroying queue\n");
1485 kthread_flush_worker(&ctlr->kworker);
1486 kthread_stop(ctlr->kworker_task);
1491 static int __spi_queued_transfer(struct spi_device *spi,
1492 struct spi_message *msg,
1495 struct spi_controller *ctlr = spi->controller;
1496 unsigned long flags;
1498 spin_lock_irqsave(&ctlr->queue_lock, flags);
1500 if (!ctlr->running) {
1501 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1504 msg->actual_length = 0;
1505 msg->status = -EINPROGRESS;
1507 list_add_tail(&msg->queue, &ctlr->queue);
1508 if (!ctlr->busy && need_pump)
1509 kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
1511 spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1516 * spi_queued_transfer - transfer function for queued transfers
1517 * @spi: spi device which is requesting transfer
1518 * @msg: spi message which is to handled is queued to driver queue
1520 * Return: zero on success, else a negative error code.
1522 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1524 return __spi_queued_transfer(spi, msg, true);
1527 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1531 ctlr->transfer = spi_queued_transfer;
1532 if (!ctlr->transfer_one_message)
1533 ctlr->transfer_one_message = spi_transfer_one_message;
1535 /* Initialize and start queue */
1536 ret = spi_init_queue(ctlr);
1538 dev_err(&ctlr->dev, "problem initializing queue\n");
1539 goto err_init_queue;
1541 ctlr->queued = true;
1542 ret = spi_start_queue(ctlr);
1544 dev_err(&ctlr->dev, "problem starting queue\n");
1545 goto err_start_queue;
1551 spi_destroy_queue(ctlr);
1556 /*-------------------------------------------------------------------------*/
1558 #if defined(CONFIG_OF)
1559 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1560 struct device_node *nc)
1565 /* Mode (clock phase/polarity/etc.) */
1566 if (of_property_read_bool(nc, "spi-cpha"))
1567 spi->mode |= SPI_CPHA;
1568 if (of_property_read_bool(nc, "spi-cpol"))
1569 spi->mode |= SPI_CPOL;
1570 if (of_property_read_bool(nc, "spi-cs-high"))
1571 spi->mode |= SPI_CS_HIGH;
1572 if (of_property_read_bool(nc, "spi-3wire"))
1573 spi->mode |= SPI_3WIRE;
1574 if (of_property_read_bool(nc, "spi-lsb-first"))
1575 spi->mode |= SPI_LSB_FIRST;
1577 /* Device DUAL/QUAD mode */
1578 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1583 spi->mode |= SPI_TX_DUAL;
1586 spi->mode |= SPI_TX_QUAD;
1589 dev_warn(&ctlr->dev,
1590 "spi-tx-bus-width %d not supported\n",
1596 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1601 spi->mode |= SPI_RX_DUAL;
1604 spi->mode |= SPI_RX_QUAD;
1607 dev_warn(&ctlr->dev,
1608 "spi-rx-bus-width %d not supported\n",
1614 if (spi_controller_is_slave(ctlr)) {
1615 if (strcmp(nc->name, "slave")) {
1616 dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
1623 /* Device address */
1624 rc = of_property_read_u32(nc, "reg", &value);
1626 dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
1630 spi->chip_select = value;
1633 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1636 "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
1639 spi->max_speed_hz = value;
1644 static struct spi_device *
1645 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
1647 struct spi_device *spi;
1650 /* Alloc an spi_device */
1651 spi = spi_alloc_device(ctlr);
1653 dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
1658 /* Select device driver */
1659 rc = of_modalias_node(nc, spi->modalias,
1660 sizeof(spi->modalias));
1662 dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
1666 rc = of_spi_parse_dt(ctlr, spi, nc);
1670 /* Store a pointer to the node in the device structure */
1672 spi->dev.of_node = nc;
1673 spi->dev.fwnode = of_fwnode_handle(nc);
1675 /* Register the new device */
1676 rc = spi_add_device(spi);
1678 dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
1679 goto err_of_node_put;
1692 * of_register_spi_devices() - Register child devices onto the SPI bus
1693 * @ctlr: Pointer to spi_controller device
1695 * Registers an spi_device for each child node of controller node which
1696 * represents a valid SPI slave.
1698 static void of_register_spi_devices(struct spi_controller *ctlr)
1700 struct spi_device *spi;
1701 struct device_node *nc;
1703 if (!ctlr->dev.of_node)
1706 for_each_available_child_of_node(ctlr->dev.of_node, nc) {
1707 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1709 spi = of_register_spi_device(ctlr, nc);
1711 dev_warn(&ctlr->dev,
1712 "Failed to create SPI device for %pOF\n", nc);
1713 of_node_clear_flag(nc, OF_POPULATED);
1718 static void of_register_spi_devices(struct spi_controller *ctlr) { }
1722 static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1724 struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1725 const union acpi_object *obj;
1727 if (!x86_apple_machine)
1730 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1731 && obj->buffer.length >= 4)
1732 spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1734 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1735 && obj->buffer.length == 8)
1736 spi->bits_per_word = *(u64 *)obj->buffer.pointer;
1738 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1739 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1740 spi->mode |= SPI_LSB_FIRST;
1742 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1743 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1744 spi->mode |= SPI_CPOL;
1746 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1747 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1748 spi->mode |= SPI_CPHA;
1751 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1753 struct spi_device *spi = data;
1754 struct spi_controller *ctlr = spi->controller;
1756 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1757 struct acpi_resource_spi_serialbus *sb;
1759 sb = &ares->data.spi_serial_bus;
1760 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1762 * ACPI DeviceSelection numbering is handled by the
1763 * host controller driver in Windows and can vary
1764 * from driver to driver. In Linux we always expect
1765 * 0 .. max - 1 so we need to ask the driver to
1766 * translate between the two schemes.
1768 if (ctlr->fw_translate_cs) {
1769 int cs = ctlr->fw_translate_cs(ctlr,
1770 sb->device_selection);
1773 spi->chip_select = cs;
1775 spi->chip_select = sb->device_selection;
1778 spi->max_speed_hz = sb->connection_speed;
1780 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1781 spi->mode |= SPI_CPHA;
1782 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1783 spi->mode |= SPI_CPOL;
1784 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1785 spi->mode |= SPI_CS_HIGH;
1787 } else if (spi->irq < 0) {
1790 if (acpi_dev_resource_interrupt(ares, 0, &r))
1794 /* Always tell the ACPI core to skip this resource */
1798 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1799 struct acpi_device *adev)
1801 struct list_head resource_list;
1802 struct spi_device *spi;
1805 if (acpi_bus_get_status(adev) || !adev->status.present ||
1806 acpi_device_enumerated(adev))
1809 spi = spi_alloc_device(ctlr);
1811 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1812 dev_name(&adev->dev));
1813 return AE_NO_MEMORY;
1816 ACPI_COMPANION_SET(&spi->dev, adev);
1819 INIT_LIST_HEAD(&resource_list);
1820 ret = acpi_dev_get_resources(adev, &resource_list,
1821 acpi_spi_add_resource, spi);
1822 acpi_dev_free_resource_list(&resource_list);
1824 acpi_spi_parse_apple_properties(spi);
1826 if (ret < 0 || !spi->max_speed_hz) {
1831 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1832 sizeof(spi->modalias));
1835 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1837 acpi_device_set_enumerated(adev);
1839 adev->power.flags.ignore_parent = true;
1840 if (spi_add_device(spi)) {
1841 adev->power.flags.ignore_parent = false;
1842 dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
1843 dev_name(&adev->dev));
1850 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1851 void *data, void **return_value)
1853 struct spi_controller *ctlr = data;
1854 struct acpi_device *adev;
1856 if (acpi_bus_get_device(handle, &adev))
1859 return acpi_register_spi_device(ctlr, adev);
1862 static void acpi_register_spi_devices(struct spi_controller *ctlr)
1867 handle = ACPI_HANDLE(ctlr->dev.parent);
1871 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1872 acpi_spi_add_device, NULL, ctlr, NULL);
1873 if (ACPI_FAILURE(status))
1874 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
1877 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
1878 #endif /* CONFIG_ACPI */
1880 static void spi_controller_release(struct device *dev)
1882 struct spi_controller *ctlr;
1884 ctlr = container_of(dev, struct spi_controller, dev);
1888 static struct class spi_master_class = {
1889 .name = "spi_master",
1890 .owner = THIS_MODULE,
1891 .dev_release = spi_controller_release,
1892 .dev_groups = spi_master_groups,
1895 #ifdef CONFIG_SPI_SLAVE
1897 * spi_slave_abort - abort the ongoing transfer request on an SPI slave
1899 * @spi: device used for the current transfer
1901 int spi_slave_abort(struct spi_device *spi)
1903 struct spi_controller *ctlr = spi->controller;
1905 if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
1906 return ctlr->slave_abort(ctlr);
1910 EXPORT_SYMBOL_GPL(spi_slave_abort);
1912 static int match_true(struct device *dev, void *data)
1917 static ssize_t spi_slave_show(struct device *dev,
1918 struct device_attribute *attr, char *buf)
1920 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1922 struct device *child;
1924 child = device_find_child(&ctlr->dev, NULL, match_true);
1925 return sprintf(buf, "%s\n",
1926 child ? to_spi_device(child)->modalias : NULL);
1929 static ssize_t spi_slave_store(struct device *dev,
1930 struct device_attribute *attr, const char *buf,
1933 struct spi_controller *ctlr = container_of(dev, struct spi_controller,
1935 struct spi_device *spi;
1936 struct device *child;
1940 rc = sscanf(buf, "%31s", name);
1941 if (rc != 1 || !name[0])
1944 child = device_find_child(&ctlr->dev, NULL, match_true);
1946 /* Remove registered slave */
1947 device_unregister(child);
1951 if (strcmp(name, "(null)")) {
1952 /* Register new slave */
1953 spi = spi_alloc_device(ctlr);
1957 strlcpy(spi->modalias, name, sizeof(spi->modalias));
1959 rc = spi_add_device(spi);
1969 static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
1971 static struct attribute *spi_slave_attrs[] = {
1972 &dev_attr_slave.attr,
1976 static const struct attribute_group spi_slave_group = {
1977 .attrs = spi_slave_attrs,
1980 static const struct attribute_group *spi_slave_groups[] = {
1981 &spi_controller_statistics_group,
1986 static struct class spi_slave_class = {
1987 .name = "spi_slave",
1988 .owner = THIS_MODULE,
1989 .dev_release = spi_controller_release,
1990 .dev_groups = spi_slave_groups,
1993 extern struct class spi_slave_class; /* dummy */
1997 * __spi_alloc_controller - allocate an SPI master or slave controller
1998 * @dev: the controller, possibly using the platform_bus
1999 * @size: how much zeroed driver-private data to allocate; the pointer to this
2000 * memory is in the driver_data field of the returned device,
2001 * accessible with spi_controller_get_devdata().
2002 * @slave: flag indicating whether to allocate an SPI master (false) or SPI
2003 * slave (true) controller
2004 * Context: can sleep
2006 * This call is used only by SPI controller drivers, which are the
2007 * only ones directly touching chip registers. It's how they allocate
2008 * an spi_controller structure, prior to calling spi_register_controller().
2010 * This must be called from context that can sleep.
2012 * The caller is responsible for assigning the bus number and initializing the
2013 * controller's methods before calling spi_register_controller(); and (after
2014 * errors adding the device) calling spi_controller_put() to prevent a memory
2017 * Return: the SPI controller structure on success, else NULL.
2019 struct spi_controller *__spi_alloc_controller(struct device *dev,
2020 unsigned int size, bool slave)
2022 struct spi_controller *ctlr;
2027 ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
2031 device_initialize(&ctlr->dev);
2033 ctlr->num_chipselect = 1;
2034 ctlr->slave = slave;
2035 if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
2036 ctlr->dev.class = &spi_slave_class;
2038 ctlr->dev.class = &spi_master_class;
2039 ctlr->dev.parent = dev;
2040 pm_suspend_ignore_children(&ctlr->dev, true);
2041 spi_controller_set_devdata(ctlr, &ctlr[1]);
2045 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
2047 static void devm_spi_release_controller(struct device *dev, void *ctlr)
2049 spi_controller_put(*(struct spi_controller **)ctlr);
2053 * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
2054 * @dev: physical device of SPI controller
2055 * @size: how much zeroed driver-private data to allocate
2056 * @slave: whether to allocate an SPI master (false) or SPI slave (true)
2057 * Context: can sleep
2059 * Allocate an SPI controller and automatically release a reference on it
2060 * when @dev is unbound from its driver. Drivers are thus relieved from
2061 * having to call spi_controller_put().
2063 * The arguments to this function are identical to __spi_alloc_controller().
2065 * Return: the SPI controller structure on success, else NULL.
2067 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
2071 struct spi_controller **ptr, *ctlr;
2073 ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
2078 ctlr = __spi_alloc_controller(dev, size, slave);
2080 ctlr->devm_allocated = true;
2082 devres_add(dev, ptr);
2089 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
2092 static int of_spi_register_master(struct spi_controller *ctlr)
2095 struct device_node *np = ctlr->dev.of_node;
2100 nb = of_gpio_named_count(np, "cs-gpios");
2101 ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2103 /* Return error only for an incorrectly formed cs-gpios property */
2104 if (nb == 0 || nb == -ENOENT)
2109 cs = devm_kzalloc(&ctlr->dev, sizeof(int) * ctlr->num_chipselect,
2111 ctlr->cs_gpios = cs;
2113 if (!ctlr->cs_gpios)
2116 for (i = 0; i < ctlr->num_chipselect; i++)
2119 for (i = 0; i < nb; i++)
2120 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
2125 static int of_spi_register_master(struct spi_controller *ctlr)
2132 * spi_register_controller - register SPI master or slave controller
2133 * @ctlr: initialized master, originally from spi_alloc_master() or
2135 * Context: can sleep
2137 * SPI controllers connect to their drivers using some non-SPI bus,
2138 * such as the platform bus. The final stage of probe() in that code
2139 * includes calling spi_register_controller() to hook up to this SPI bus glue.
2141 * SPI controllers use board specific (often SOC specific) bus numbers,
2142 * and board-specific addressing for SPI devices combines those numbers
2143 * with chip select numbers. Since SPI does not directly support dynamic
2144 * device identification, boards need configuration tables telling which
2145 * chip is at which address.
2147 * This must be called from context that can sleep. It returns zero on
2148 * success, else a negative error code (dropping the controller's refcount).
2149 * After a successful return, the caller is responsible for calling
2150 * spi_unregister_controller().
2152 * Return: zero on success, else a negative error code.
2154 int spi_register_controller(struct spi_controller *ctlr)
2156 struct device *dev = ctlr->dev.parent;
2157 struct boardinfo *bi;
2158 int status = -ENODEV;
2159 int id, first_dynamic;
2164 if (!spi_controller_is_slave(ctlr)) {
2165 status = of_spi_register_master(ctlr);
2170 /* even if it's just one always-selected device, there must
2171 * be at least one chipselect
2173 if (ctlr->num_chipselect == 0)
2175 if (ctlr->bus_num >= 0) {
2176 /* devices with a fixed bus num must check-in with the num */
2177 mutex_lock(&board_lock);
2178 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2179 ctlr->bus_num + 1, GFP_KERNEL);
2180 mutex_unlock(&board_lock);
2181 if (WARN(id < 0, "couldn't get idr"))
2182 return id == -ENOSPC ? -EBUSY : id;
2184 } else if (ctlr->dev.of_node) {
2185 /* allocate dynamic bus number using Linux idr */
2186 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2189 mutex_lock(&board_lock);
2190 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2191 ctlr->bus_num + 1, GFP_KERNEL);
2192 mutex_unlock(&board_lock);
2193 if (WARN(id < 0, "couldn't get idr"))
2194 return id == -ENOSPC ? -EBUSY : id;
2197 if (ctlr->bus_num < 0) {
2198 first_dynamic = of_alias_get_highest_id("spi");
2199 if (first_dynamic < 0)
2204 mutex_lock(&board_lock);
2205 id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
2207 mutex_unlock(&board_lock);
2208 if (WARN(id < 0, "couldn't get idr"))
2212 INIT_LIST_HEAD(&ctlr->queue);
2213 spin_lock_init(&ctlr->queue_lock);
2214 spin_lock_init(&ctlr->bus_lock_spinlock);
2215 mutex_init(&ctlr->bus_lock_mutex);
2216 mutex_init(&ctlr->io_mutex);
2217 ctlr->bus_lock_flag = 0;
2218 init_completion(&ctlr->xfer_completion);
2219 if (!ctlr->max_dma_len)
2220 ctlr->max_dma_len = INT_MAX;
2222 /* register the device, then userspace will see it.
2223 * registration fails if the bus ID is in use.
2225 dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
2226 status = device_add(&ctlr->dev);
2229 mutex_lock(&board_lock);
2230 idr_remove(&spi_master_idr, ctlr->bus_num);
2231 mutex_unlock(&board_lock);
2234 dev_dbg(dev, "registered %s %s\n",
2235 spi_controller_is_slave(ctlr) ? "slave" : "master",
2236 dev_name(&ctlr->dev));
2238 /* If we're using a queued driver, start the queue */
2240 dev_info(dev, "controller is unqueued, this is deprecated\n");
2242 status = spi_controller_initialize_queue(ctlr);
2244 device_del(&ctlr->dev);
2246 mutex_lock(&board_lock);
2247 idr_remove(&spi_master_idr, ctlr->bus_num);
2248 mutex_unlock(&board_lock);
2252 /* add statistics */
2253 spin_lock_init(&ctlr->statistics.lock);
2255 mutex_lock(&board_lock);
2256 list_add_tail(&ctlr->list, &spi_controller_list);
2257 list_for_each_entry(bi, &board_list, list)
2258 spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
2259 mutex_unlock(&board_lock);
2261 /* Register devices from the device tree and ACPI */
2262 of_register_spi_devices(ctlr);
2263 acpi_register_spi_devices(ctlr);
2267 EXPORT_SYMBOL_GPL(spi_register_controller);
2269 static void devm_spi_unregister(struct device *dev, void *res)
2271 spi_unregister_controller(*(struct spi_controller **)res);
2275 * devm_spi_register_controller - register managed SPI master or slave
2277 * @dev: device managing SPI controller
2278 * @ctlr: initialized controller, originally from spi_alloc_master() or
2280 * Context: can sleep
2282 * Register a SPI device as with spi_register_controller() which will
2283 * automatically be unregister
2285 * Return: zero on success, else a negative error code.
2287 int devm_spi_register_controller(struct device *dev,
2288 struct spi_controller *ctlr)
2290 struct spi_controller **ptr;
2293 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2297 ret = spi_register_controller(ctlr);
2300 devres_add(dev, ptr);
2307 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2309 static int __unregister(struct device *dev, void *null)
2311 spi_unregister_device(to_spi_device(dev));
2316 * spi_unregister_controller - unregister SPI master or slave controller
2317 * @ctlr: the controller being unregistered
2318 * Context: can sleep
2320 * This call is used only by SPI controller drivers, which are the
2321 * only ones directly touching chip registers.
2323 * This must be called from context that can sleep.
2325 void spi_unregister_controller(struct spi_controller *ctlr)
2327 struct spi_controller *found;
2328 int id = ctlr->bus_num;
2330 /* Prevent addition of new devices, unregister existing ones */
2331 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2332 mutex_lock(&spi_add_lock);
2334 device_for_each_child(&ctlr->dev, NULL, __unregister);
2336 /* First make sure that this controller was ever added */
2337 mutex_lock(&board_lock);
2338 found = idr_find(&spi_master_idr, id);
2339 mutex_unlock(&board_lock);
2341 if (spi_destroy_queue(ctlr))
2342 dev_err(&ctlr->dev, "queue remove failed\n");
2344 mutex_lock(&board_lock);
2345 list_del(&ctlr->list);
2346 mutex_unlock(&board_lock);
2348 device_del(&ctlr->dev);
2350 /* Release the last reference on the controller if its driver
2351 * has not yet been converted to devm_spi_alloc_master/slave().
2353 if (!ctlr->devm_allocated)
2354 put_device(&ctlr->dev);
2357 mutex_lock(&board_lock);
2359 idr_remove(&spi_master_idr, id);
2360 mutex_unlock(&board_lock);
2362 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2363 mutex_unlock(&spi_add_lock);
2365 EXPORT_SYMBOL_GPL(spi_unregister_controller);
2367 int spi_controller_suspend(struct spi_controller *ctlr)
2371 /* Basically no-ops for non-queued controllers */
2375 ret = spi_stop_queue(ctlr);
2377 dev_err(&ctlr->dev, "queue stop failed\n");
2381 EXPORT_SYMBOL_GPL(spi_controller_suspend);
2383 int spi_controller_resume(struct spi_controller *ctlr)
2390 ret = spi_start_queue(ctlr);
2392 dev_err(&ctlr->dev, "queue restart failed\n");
2396 EXPORT_SYMBOL_GPL(spi_controller_resume);
2398 static int __spi_controller_match(struct device *dev, const void *data)
2400 struct spi_controller *ctlr;
2401 const u16 *bus_num = data;
2403 ctlr = container_of(dev, struct spi_controller, dev);
2404 return ctlr->bus_num == *bus_num;
2408 * spi_busnum_to_master - look up master associated with bus_num
2409 * @bus_num: the master's bus number
2410 * Context: can sleep
2412 * This call may be used with devices that are registered after
2413 * arch init time. It returns a refcounted pointer to the relevant
2414 * spi_controller (which the caller must release), or NULL if there is
2415 * no such master registered.
2417 * Return: the SPI master structure on success, else NULL.
2419 struct spi_controller *spi_busnum_to_master(u16 bus_num)
2422 struct spi_controller *ctlr = NULL;
2424 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2425 __spi_controller_match);
2427 ctlr = container_of(dev, struct spi_controller, dev);
2428 /* reference got in class_find_device */
2431 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2433 /*-------------------------------------------------------------------------*/
2435 /* Core methods for SPI resource management */
2438 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2439 * during the processing of a spi_message while using
2441 * @spi: the spi device for which we allocate memory
2442 * @release: the release code to execute for this resource
2443 * @size: size to alloc and return
2444 * @gfp: GFP allocation flags
2446 * Return: the pointer to the allocated data
2448 * This may get enhanced in the future to allocate from a memory pool
2449 * of the @spi_device or @spi_controller to avoid repeated allocations.
2451 void *spi_res_alloc(struct spi_device *spi,
2452 spi_res_release_t release,
2453 size_t size, gfp_t gfp)
2455 struct spi_res *sres;
2457 sres = kzalloc(sizeof(*sres) + size, gfp);
2461 INIT_LIST_HEAD(&sres->entry);
2462 sres->release = release;
2466 EXPORT_SYMBOL_GPL(spi_res_alloc);
2469 * spi_res_free - free an spi resource
2470 * @res: pointer to the custom data of a resource
2473 void spi_res_free(void *res)
2475 struct spi_res *sres = container_of(res, struct spi_res, data);
2480 WARN_ON(!list_empty(&sres->entry));
2483 EXPORT_SYMBOL_GPL(spi_res_free);
2486 * spi_res_add - add a spi_res to the spi_message
2487 * @message: the spi message
2488 * @res: the spi_resource
2490 void spi_res_add(struct spi_message *message, void *res)
2492 struct spi_res *sres = container_of(res, struct spi_res, data);
2494 WARN_ON(!list_empty(&sres->entry));
2495 list_add_tail(&sres->entry, &message->resources);
2497 EXPORT_SYMBOL_GPL(spi_res_add);
2500 * spi_res_release - release all spi resources for this message
2501 * @ctlr: the @spi_controller
2502 * @message: the @spi_message
2504 void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2506 struct spi_res *res;
2508 while (!list_empty(&message->resources)) {
2509 res = list_last_entry(&message->resources,
2510 struct spi_res, entry);
2513 res->release(ctlr, message, res->data);
2515 list_del(&res->entry);
2520 EXPORT_SYMBOL_GPL(spi_res_release);
2522 /*-------------------------------------------------------------------------*/
2524 /* Core methods for spi_message alterations */
2526 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
2527 struct spi_message *msg,
2530 struct spi_replaced_transfers *rxfer = res;
2533 /* call extra callback if requested */
2535 rxfer->release(ctlr, msg, res);
2537 /* insert replaced transfers back into the message */
2538 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2540 /* remove the formerly inserted entries */
2541 for (i = 0; i < rxfer->inserted; i++)
2542 list_del(&rxfer->inserted_transfers[i].transfer_list);
2546 * spi_replace_transfers - replace transfers with several transfers
2547 * and register change with spi_message.resources
2548 * @msg: the spi_message we work upon
2549 * @xfer_first: the first spi_transfer we want to replace
2550 * @remove: number of transfers to remove
2551 * @insert: the number of transfers we want to insert instead
2552 * @release: extra release code necessary in some circumstances
2553 * @extradatasize: extra data to allocate (with alignment guarantees
2554 * of struct @spi_transfer)
2557 * Returns: pointer to @spi_replaced_transfers,
2558 * PTR_ERR(...) in case of errors.
2560 struct spi_replaced_transfers *spi_replace_transfers(
2561 struct spi_message *msg,
2562 struct spi_transfer *xfer_first,
2565 spi_replaced_release_t release,
2566 size_t extradatasize,
2569 struct spi_replaced_transfers *rxfer;
2570 struct spi_transfer *xfer;
2573 /* allocate the structure using spi_res */
2574 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2575 insert * sizeof(struct spi_transfer)
2576 + sizeof(struct spi_replaced_transfers)
2580 return ERR_PTR(-ENOMEM);
2582 /* the release code to invoke before running the generic release */
2583 rxfer->release = release;
2585 /* assign extradata */
2588 &rxfer->inserted_transfers[insert];
2590 /* init the replaced_transfers list */
2591 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2593 /* assign the list_entry after which we should reinsert
2594 * the @replaced_transfers - it may be spi_message.messages!
2596 rxfer->replaced_after = xfer_first->transfer_list.prev;
2598 /* remove the requested number of transfers */
2599 for (i = 0; i < remove; i++) {
2600 /* if the entry after replaced_after it is msg->transfers
2601 * then we have been requested to remove more transfers
2602 * than are in the list
2604 if (rxfer->replaced_after->next == &msg->transfers) {
2605 dev_err(&msg->spi->dev,
2606 "requested to remove more spi_transfers than are available\n");
2607 /* insert replaced transfers back into the message */
2608 list_splice(&rxfer->replaced_transfers,
2609 rxfer->replaced_after);
2611 /* free the spi_replace_transfer structure */
2612 spi_res_free(rxfer);
2614 /* and return with an error */
2615 return ERR_PTR(-EINVAL);
2618 /* remove the entry after replaced_after from list of
2619 * transfers and add it to list of replaced_transfers
2621 list_move_tail(rxfer->replaced_after->next,
2622 &rxfer->replaced_transfers);
2625 /* create copy of the given xfer with identical settings
2626 * based on the first transfer to get removed
2628 for (i = 0; i < insert; i++) {
2629 /* we need to run in reverse order */
2630 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2632 /* copy all spi_transfer data */
2633 memcpy(xfer, xfer_first, sizeof(*xfer));
2636 list_add(&xfer->transfer_list, rxfer->replaced_after);
2638 /* clear cs_change and delay_usecs for all but the last */
2640 xfer->cs_change = false;
2641 xfer->delay_usecs = 0;
2645 /* set up inserted */
2646 rxfer->inserted = insert;
2648 /* and register it with spi_res/spi_message */
2649 spi_res_add(msg, rxfer);
2653 EXPORT_SYMBOL_GPL(spi_replace_transfers);
2655 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
2656 struct spi_message *msg,
2657 struct spi_transfer **xferp,
2661 struct spi_transfer *xfer = *xferp, *xfers;
2662 struct spi_replaced_transfers *srt;
2666 /* warn once about this fact that we are splitting a transfer */
2667 dev_warn_once(&msg->spi->dev,
2668 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2669 xfer->len, maxsize);
2671 /* calculate how many we have to replace */
2672 count = DIV_ROUND_UP(xfer->len, maxsize);
2674 /* create replacement */
2675 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2677 return PTR_ERR(srt);
2678 xfers = srt->inserted_transfers;
2680 /* now handle each of those newly inserted spi_transfers
2681 * note that the replacements spi_transfers all are preset
2682 * to the same values as *xferp, so tx_buf, rx_buf and len
2683 * are all identical (as well as most others)
2684 * so we just have to fix up len and the pointers.
2686 * this also includes support for the depreciated
2687 * spi_message.is_dma_mapped interface
2690 /* the first transfer just needs the length modified, so we
2691 * run it outside the loop
2693 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2695 /* all the others need rx_buf/tx_buf also set */
2696 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2697 /* update rx_buf, tx_buf and dma */
2698 if (xfers[i].rx_buf)
2699 xfers[i].rx_buf += offset;
2700 if (xfers[i].rx_dma)
2701 xfers[i].rx_dma += offset;
2702 if (xfers[i].tx_buf)
2703 xfers[i].tx_buf += offset;
2704 if (xfers[i].tx_dma)
2705 xfers[i].tx_dma += offset;
2708 xfers[i].len = min(maxsize, xfers[i].len - offset);
2711 /* we set up xferp to the last entry we have inserted,
2712 * so that we skip those already split transfers
2714 *xferp = &xfers[count - 1];
2716 /* increment statistics counters */
2717 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
2718 transfers_split_maxsize);
2719 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2720 transfers_split_maxsize);
2726 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2727 * when an individual transfer exceeds a
2729 * @ctlr: the @spi_controller for this transfer
2730 * @msg: the @spi_message to transform
2731 * @maxsize: the maximum when to apply this
2732 * @gfp: GFP allocation flags
2734 * Return: status of transformation
2736 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
2737 struct spi_message *msg,
2741 struct spi_transfer *xfer;
2744 /* iterate over the transfer_list,
2745 * but note that xfer is advanced to the last transfer inserted
2746 * to avoid checking sizes again unnecessarily (also xfer does
2747 * potentiall belong to a different list by the time the
2748 * replacement has happened
2750 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2751 if (xfer->len > maxsize) {
2752 ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
2761 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2763 /*-------------------------------------------------------------------------*/
2765 /* Core methods for SPI controller protocol drivers. Some of the
2766 * other core methods are currently defined as inline functions.
2769 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
2772 if (ctlr->bits_per_word_mask) {
2773 /* Only 32 bits fit in the mask */
2774 if (bits_per_word > 32)
2776 if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
2784 * spi_setup - setup SPI mode and clock rate
2785 * @spi: the device whose settings are being modified
2786 * Context: can sleep, and no requests are queued to the device
2788 * SPI protocol drivers may need to update the transfer mode if the
2789 * device doesn't work with its default. They may likewise need
2790 * to update clock rates or word sizes from initial values. This function
2791 * changes those settings, and must be called from a context that can sleep.
2792 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2793 * effect the next time the device is selected and data is transferred to
2794 * or from it. When this function returns, the spi device is deselected.
2796 * Note that this call will fail if the protocol driver specifies an option
2797 * that the underlying controller or its driver does not support. For
2798 * example, not all hardware supports wire transfers using nine bit words,
2799 * LSB-first wire encoding, or active-high chipselects.
2801 * Return: zero on success, else a negative error code.
2803 int spi_setup(struct spi_device *spi)
2805 unsigned bad_bits, ugly_bits;
2808 /* check mode to prevent that DUAL and QUAD set at the same time
2810 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2811 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2813 "setup: can not select dual and quad at the same time\n");
2816 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2818 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2819 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2821 /* help drivers fail *cleanly* when they need options
2822 * that aren't supported with their current controller
2824 bad_bits = spi->mode & ~spi->controller->mode_bits;
2825 ugly_bits = bad_bits &
2826 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2829 "setup: ignoring unsupported mode bits %x\n",
2831 spi->mode &= ~ugly_bits;
2832 bad_bits &= ~ugly_bits;
2835 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2840 if (!spi->bits_per_word)
2841 spi->bits_per_word = 8;
2843 status = __spi_validate_bits_per_word(spi->controller,
2844 spi->bits_per_word);
2848 if (!spi->max_speed_hz)
2849 spi->max_speed_hz = spi->controller->max_speed_hz;
2851 if (spi->controller->setup)
2852 status = spi->controller->setup(spi);
2854 spi_set_cs(spi, false);
2856 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2857 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2858 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2859 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2860 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2861 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2862 spi->bits_per_word, spi->max_speed_hz,
2867 EXPORT_SYMBOL_GPL(spi_setup);
2869 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2871 struct spi_controller *ctlr = spi->controller;
2872 struct spi_transfer *xfer;
2875 if (list_empty(&message->transfers))
2878 /* Half-duplex links include original MicroWire, and ones with
2879 * only one data pin like SPI_3WIRE (switches direction) or where
2880 * either MOSI or MISO is missing. They can also be caused by
2881 * software limitations.
2883 if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
2884 (spi->mode & SPI_3WIRE)) {
2885 unsigned flags = ctlr->flags;
2887 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2888 if (xfer->rx_buf && xfer->tx_buf)
2890 if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
2892 if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
2898 * Set transfer bits_per_word and max speed as spi device default if
2899 * it is not set for this transfer.
2900 * Set transfer tx_nbits and rx_nbits as single transfer default
2901 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2903 message->frame_length = 0;
2904 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2905 message->frame_length += xfer->len;
2906 if (!xfer->bits_per_word)
2907 xfer->bits_per_word = spi->bits_per_word;
2909 if (!xfer->speed_hz)
2910 xfer->speed_hz = spi->max_speed_hz;
2911 if (!xfer->speed_hz)
2912 xfer->speed_hz = ctlr->max_speed_hz;
2914 if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
2915 xfer->speed_hz = ctlr->max_speed_hz;
2917 if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
2921 * SPI transfer length should be multiple of SPI word size
2922 * where SPI word size should be power-of-two multiple
2924 if (xfer->bits_per_word <= 8)
2926 else if (xfer->bits_per_word <= 16)
2931 /* No partial transfers accepted */
2932 if (xfer->len % w_size)
2935 if (xfer->speed_hz && ctlr->min_speed_hz &&
2936 xfer->speed_hz < ctlr->min_speed_hz)
2939 if (xfer->tx_buf && !xfer->tx_nbits)
2940 xfer->tx_nbits = SPI_NBITS_SINGLE;
2941 if (xfer->rx_buf && !xfer->rx_nbits)
2942 xfer->rx_nbits = SPI_NBITS_SINGLE;
2943 /* check transfer tx/rx_nbits:
2944 * 1. check the value matches one of single, dual and quad
2945 * 2. check tx/rx_nbits match the mode in spi_device
2948 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2949 xfer->tx_nbits != SPI_NBITS_DUAL &&
2950 xfer->tx_nbits != SPI_NBITS_QUAD)
2952 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2953 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2955 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2956 !(spi->mode & SPI_TX_QUAD))
2959 /* check transfer rx_nbits */
2961 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2962 xfer->rx_nbits != SPI_NBITS_DUAL &&
2963 xfer->rx_nbits != SPI_NBITS_QUAD)
2965 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2966 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2968 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2969 !(spi->mode & SPI_RX_QUAD))
2974 message->status = -EINPROGRESS;
2979 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2981 struct spi_controller *ctlr = spi->controller;
2985 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
2986 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2988 trace_spi_message_submit(message);
2990 return ctlr->transfer(spi, message);
2994 * spi_async - asynchronous SPI transfer
2995 * @spi: device with which data will be exchanged
2996 * @message: describes the data transfers, including completion callback
2997 * Context: any (irqs may be blocked, etc)
2999 * This call may be used in_irq and other contexts which can't sleep,
3000 * as well as from task contexts which can sleep.
3002 * The completion callback is invoked in a context which can't sleep.
3003 * Before that invocation, the value of message->status is undefined.
3004 * When the callback is issued, message->status holds either zero (to
3005 * indicate complete success) or a negative error code. After that
3006 * callback returns, the driver which issued the transfer request may
3007 * deallocate the associated memory; it's no longer in use by any SPI
3008 * core or controller driver code.
3010 * Note that although all messages to a spi_device are handled in
3011 * FIFO order, messages may go to different devices in other orders.
3012 * Some device might be higher priority, or have various "hard" access
3013 * time requirements, for example.
3015 * On detection of any fault during the transfer, processing of
3016 * the entire message is aborted, and the device is deselected.
3017 * Until returning from the associated message completion callback,
3018 * no other spi_message queued to that device will be processed.
3019 * (This rule applies equally to all the synchronous transfer calls,
3020 * which are wrappers around this core asynchronous primitive.)
3022 * Return: zero on success, else a negative error code.
3024 int spi_async(struct spi_device *spi, struct spi_message *message)
3026 struct spi_controller *ctlr = spi->controller;
3028 unsigned long flags;
3030 ret = __spi_validate(spi, message);
3034 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3036 if (ctlr->bus_lock_flag)
3039 ret = __spi_async(spi, message);
3041 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3045 EXPORT_SYMBOL_GPL(spi_async);
3048 * spi_async_locked - version of spi_async with exclusive bus usage
3049 * @spi: device with which data will be exchanged
3050 * @message: describes the data transfers, including completion callback
3051 * Context: any (irqs may be blocked, etc)
3053 * This call may be used in_irq and other contexts which can't sleep,
3054 * as well as from task contexts which can sleep.
3056 * The completion callback is invoked in a context which can't sleep.
3057 * Before that invocation, the value of message->status is undefined.
3058 * When the callback is issued, message->status holds either zero (to
3059 * indicate complete success) or a negative error code. After that
3060 * callback returns, the driver which issued the transfer request may
3061 * deallocate the associated memory; it's no longer in use by any SPI
3062 * core or controller driver code.
3064 * Note that although all messages to a spi_device are handled in
3065 * FIFO order, messages may go to different devices in other orders.
3066 * Some device might be higher priority, or have various "hard" access
3067 * time requirements, for example.
3069 * On detection of any fault during the transfer, processing of
3070 * the entire message is aborted, and the device is deselected.
3071 * Until returning from the associated message completion callback,
3072 * no other spi_message queued to that device will be processed.
3073 * (This rule applies equally to all the synchronous transfer calls,
3074 * which are wrappers around this core asynchronous primitive.)
3076 * Return: zero on success, else a negative error code.
3078 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3080 struct spi_controller *ctlr = spi->controller;
3082 unsigned long flags;
3084 ret = __spi_validate(spi, message);
3088 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3090 ret = __spi_async(spi, message);
3092 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3097 EXPORT_SYMBOL_GPL(spi_async_locked);
3100 int spi_flash_read(struct spi_device *spi,
3101 struct spi_flash_read_message *msg)
3104 struct spi_controller *master = spi->controller;
3105 struct device *rx_dev = NULL;
3108 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
3109 msg->addr_nbits == SPI_NBITS_DUAL) &&
3110 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3112 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
3113 msg->addr_nbits == SPI_NBITS_QUAD) &&
3114 !(spi->mode & SPI_TX_QUAD))
3116 if (msg->data_nbits == SPI_NBITS_DUAL &&
3117 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3119 if (msg->data_nbits == SPI_NBITS_QUAD &&
3120 !(spi->mode & SPI_RX_QUAD))
3123 if (master->auto_runtime_pm) {
3124 ret = pm_runtime_get_sync(master->dev.parent);
3126 dev_err(&master->dev, "Failed to power device: %d\n",
3132 mutex_lock(&master->bus_lock_mutex);
3133 mutex_lock(&master->io_mutex);
3134 if (master->dma_rx && master->spi_flash_can_dma(spi, msg)) {
3135 rx_dev = master->dma_rx->device->dev;
3136 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
3140 msg->cur_msg_mapped = true;
3142 ret = master->spi_flash_read(spi, msg);
3143 if (msg->cur_msg_mapped)
3144 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
3146 mutex_unlock(&master->io_mutex);
3147 mutex_unlock(&master->bus_lock_mutex);
3149 if (master->auto_runtime_pm)
3150 pm_runtime_put(master->dev.parent);
3154 EXPORT_SYMBOL_GPL(spi_flash_read);
3156 /*-------------------------------------------------------------------------*/
3158 /* Utility methods for SPI protocol drivers, layered on
3159 * top of the core. Some other utility methods are defined as
3163 static void spi_complete(void *arg)
3168 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3170 DECLARE_COMPLETION_ONSTACK(done);
3172 struct spi_controller *ctlr = spi->controller;
3173 unsigned long flags;
3175 status = __spi_validate(spi, message);
3179 message->complete = spi_complete;
3180 message->context = &done;
3183 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3184 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3186 /* If we're not using the legacy transfer method then we will
3187 * try to transfer in the calling context so special case.
3188 * This code would be less tricky if we could remove the
3189 * support for driver implemented message queues.
3191 if (ctlr->transfer == spi_queued_transfer) {
3192 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3194 trace_spi_message_submit(message);
3196 status = __spi_queued_transfer(spi, message, false);
3198 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3200 status = spi_async_locked(spi, message);
3204 /* Push out the messages in the calling context if we
3207 if (ctlr->transfer == spi_queued_transfer) {
3208 SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3209 spi_sync_immediate);
3210 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3211 spi_sync_immediate);
3212 __spi_pump_messages(ctlr, false);
3215 wait_for_completion(&done);
3216 status = message->status;
3218 message->context = NULL;
3223 * spi_sync - blocking/synchronous SPI data transfers
3224 * @spi: device with which data will be exchanged
3225 * @message: describes the data transfers
3226 * Context: can sleep
3228 * This call may only be used from a context that may sleep. The sleep
3229 * is non-interruptible, and has no timeout. Low-overhead controller
3230 * drivers may DMA directly into and out of the message buffers.
3232 * Note that the SPI device's chip select is active during the message,
3233 * and then is normally disabled between messages. Drivers for some
3234 * frequently-used devices may want to minimize costs of selecting a chip,
3235 * by leaving it selected in anticipation that the next message will go
3236 * to the same chip. (That may increase power usage.)
3238 * Also, the caller is guaranteeing that the memory associated with the
3239 * message will not be freed before this call returns.
3241 * Return: zero on success, else a negative error code.
3243 int spi_sync(struct spi_device *spi, struct spi_message *message)
3247 mutex_lock(&spi->controller->bus_lock_mutex);
3248 ret = __spi_sync(spi, message);
3249 mutex_unlock(&spi->controller->bus_lock_mutex);
3253 EXPORT_SYMBOL_GPL(spi_sync);
3256 * spi_sync_locked - version of spi_sync with exclusive bus usage
3257 * @spi: device with which data will be exchanged
3258 * @message: describes the data transfers
3259 * Context: can sleep
3261 * This call may only be used from a context that may sleep. The sleep
3262 * is non-interruptible, and has no timeout. Low-overhead controller
3263 * drivers may DMA directly into and out of the message buffers.
3265 * This call should be used by drivers that require exclusive access to the
3266 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3267 * be released by a spi_bus_unlock call when the exclusive access is over.
3269 * Return: zero on success, else a negative error code.
3271 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3273 return __spi_sync(spi, message);
3275 EXPORT_SYMBOL_GPL(spi_sync_locked);
3278 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3279 * @ctlr: SPI bus master that should be locked for exclusive bus access
3280 * Context: can sleep
3282 * This call may only be used from a context that may sleep. The sleep
3283 * is non-interruptible, and has no timeout.
3285 * This call should be used by drivers that require exclusive access to the
3286 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3287 * exclusive access is over. Data transfer must be done by spi_sync_locked
3288 * and spi_async_locked calls when the SPI bus lock is held.
3290 * Return: always zero.
3292 int spi_bus_lock(struct spi_controller *ctlr)
3294 unsigned long flags;
3296 mutex_lock(&ctlr->bus_lock_mutex);
3298 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3299 ctlr->bus_lock_flag = 1;
3300 spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3302 /* mutex remains locked until spi_bus_unlock is called */
3306 EXPORT_SYMBOL_GPL(spi_bus_lock);
3309 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3310 * @ctlr: SPI bus master that was locked for exclusive bus access
3311 * Context: can sleep
3313 * This call may only be used from a context that may sleep. The sleep
3314 * is non-interruptible, and has no timeout.
3316 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3319 * Return: always zero.
3321 int spi_bus_unlock(struct spi_controller *ctlr)
3323 ctlr->bus_lock_flag = 0;
3325 mutex_unlock(&ctlr->bus_lock_mutex);
3329 EXPORT_SYMBOL_GPL(spi_bus_unlock);
3331 /* portable code must never pass more than 32 bytes */
3332 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3337 * spi_write_then_read - SPI synchronous write followed by read
3338 * @spi: device with which data will be exchanged
3339 * @txbuf: data to be written (need not be dma-safe)
3340 * @n_tx: size of txbuf, in bytes
3341 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3342 * @n_rx: size of rxbuf, in bytes
3343 * Context: can sleep
3345 * This performs a half duplex MicroWire style transaction with the
3346 * device, sending txbuf and then reading rxbuf. The return value
3347 * is zero for success, else a negative errno status code.
3348 * This call may only be used from a context that may sleep.
3350 * Parameters to this routine are always copied using a small buffer;
3351 * portable code should never use this for more than 32 bytes.
3352 * Performance-sensitive or bulk transfer code should instead use
3353 * spi_{async,sync}() calls with dma-safe buffers.
3355 * Return: zero on success, else a negative error code.
3357 int spi_write_then_read(struct spi_device *spi,
3358 const void *txbuf, unsigned n_tx,
3359 void *rxbuf, unsigned n_rx)
3361 static DEFINE_MUTEX(lock);
3364 struct spi_message message;
3365 struct spi_transfer x[2];
3368 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3369 * copying here, (as a pure convenience thing), but we can
3370 * keep heap costs out of the hot path unless someone else is
3371 * using the pre-allocated buffer or the transfer is too large.
3373 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3374 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3375 GFP_KERNEL | GFP_DMA);
3382 spi_message_init(&message);
3383 memset(x, 0, sizeof(x));
3386 spi_message_add_tail(&x[0], &message);
3390 spi_message_add_tail(&x[1], &message);
3393 memcpy(local_buf, txbuf, n_tx);
3394 x[0].tx_buf = local_buf;
3395 x[1].rx_buf = local_buf + n_tx;
3398 status = spi_sync(spi, &message);
3400 memcpy(rxbuf, x[1].rx_buf, n_rx);
3402 if (x[0].tx_buf == buf)
3403 mutex_unlock(&lock);
3409 EXPORT_SYMBOL_GPL(spi_write_then_read);
3411 /*-------------------------------------------------------------------------*/
3413 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3414 static int __spi_of_device_match(struct device *dev, void *data)
3416 return dev->of_node == data;
3419 /* must call put_device() when done with returned spi_device device */
3420 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3422 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3423 __spi_of_device_match);
3424 return dev ? to_spi_device(dev) : NULL;
3427 static int __spi_of_controller_match(struct device *dev, const void *data)
3429 return dev->of_node == data;
3432 /* the spi controllers are not using spi_bus, so we find it with another way */
3433 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
3437 dev = class_find_device(&spi_master_class, NULL, node,
3438 __spi_of_controller_match);
3439 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3440 dev = class_find_device(&spi_slave_class, NULL, node,
3441 __spi_of_controller_match);
3445 /* reference got in class_find_device */
3446 return container_of(dev, struct spi_controller, dev);
3449 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3452 struct of_reconfig_data *rd = arg;
3453 struct spi_controller *ctlr;
3454 struct spi_device *spi;
3456 switch (of_reconfig_get_state_change(action, arg)) {
3457 case OF_RECONFIG_CHANGE_ADD:
3458 ctlr = of_find_spi_controller_by_node(rd->dn->parent);
3460 return NOTIFY_OK; /* not for us */
3462 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3463 put_device(&ctlr->dev);
3467 spi = of_register_spi_device(ctlr, rd->dn);
3468 put_device(&ctlr->dev);
3471 pr_err("%s: failed to create for '%pOF'\n",
3473 of_node_clear_flag(rd->dn, OF_POPULATED);
3474 return notifier_from_errno(PTR_ERR(spi));
3478 case OF_RECONFIG_CHANGE_REMOVE:
3479 /* already depopulated? */
3480 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3483 /* find our device by node */
3484 spi = of_find_spi_device_by_node(rd->dn);
3486 return NOTIFY_OK; /* no? not meant for us */
3488 /* unregister takes one ref away */
3489 spi_unregister_device(spi);
3491 /* and put the reference of the find */
3492 put_device(&spi->dev);
3499 static struct notifier_block spi_of_notifier = {
3500 .notifier_call = of_spi_notify,
3502 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3503 extern struct notifier_block spi_of_notifier;
3504 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3506 #if IS_ENABLED(CONFIG_ACPI)
3507 static int spi_acpi_controller_match(struct device *dev, const void *data)
3509 return ACPI_COMPANION(dev->parent) == data;
3512 static int spi_acpi_device_match(struct device *dev, void *data)
3514 return ACPI_COMPANION(dev) == data;
3517 static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
3521 dev = class_find_device(&spi_master_class, NULL, adev,
3522 spi_acpi_controller_match);
3523 if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
3524 dev = class_find_device(&spi_slave_class, NULL, adev,
3525 spi_acpi_controller_match);
3529 return container_of(dev, struct spi_controller, dev);
3532 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3536 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3538 return dev ? to_spi_device(dev) : NULL;
3541 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3544 struct acpi_device *adev = arg;
3545 struct spi_controller *ctlr;
3546 struct spi_device *spi;
3549 case ACPI_RECONFIG_DEVICE_ADD:
3550 ctlr = acpi_spi_find_controller_by_adev(adev->parent);
3554 acpi_register_spi_device(ctlr, adev);
3555 put_device(&ctlr->dev);
3557 case ACPI_RECONFIG_DEVICE_REMOVE:
3558 if (!acpi_device_enumerated(adev))
3561 spi = acpi_spi_find_device_by_adev(adev);
3565 spi_unregister_device(spi);
3566 put_device(&spi->dev);
3573 static struct notifier_block spi_acpi_notifier = {
3574 .notifier_call = acpi_spi_notify,
3577 extern struct notifier_block spi_acpi_notifier;
3580 static int __init spi_init(void)
3584 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3590 status = bus_register(&spi_bus_type);
3594 status = class_register(&spi_master_class);
3598 if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
3599 status = class_register(&spi_slave_class);
3604 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3605 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3606 if (IS_ENABLED(CONFIG_ACPI))
3607 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3612 class_unregister(&spi_master_class);
3614 bus_unregister(&spi_bus_type);
3622 /* board_info is normally registered in arch_initcall(),
3623 * but even essential drivers wait till later
3625 * REVISIT only boardinfo really needs static linking. the rest (device and
3626 * driver registration) _could_ be dynamically linked (modular) ... costs
3627 * include needing to have boardinfo data structures be much more public.
3629 postcore_initcall(spi_init);