4 * Copyright (C) 2005 David Brownell
5 * Copyright (C) 2008 Secret Lab Technologies Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/kernel.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/cache.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/mutex.h>
25 #include <linux/of_device.h>
26 #include <linux/of_irq.h>
27 #include <linux/clk/clk-conf.h>
28 #include <linux/slab.h>
29 #include <linux/mod_devicetable.h>
30 #include <linux/spi/spi.h>
31 #include <linux/of_gpio.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_domain.h>
34 #include <linux/export.h>
35 #include <linux/sched/rt.h>
36 #include <linux/delay.h>
37 #include <linux/kthread.h>
38 #include <linux/ioport.h>
39 #include <linux/acpi.h>
40 #include <linux/highmem.h>
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/spi.h>
45 static void spidev_release(struct device *dev)
47 struct spi_device *spi = to_spi_device(dev);
49 /* spi masters may cleanup for released devices */
50 if (spi->master->cleanup)
51 spi->master->cleanup(spi);
53 spi_master_put(spi->master);
58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
60 const struct spi_device *spi = to_spi_device(dev);
63 len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
67 return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
69 static DEVICE_ATTR_RO(modalias);
71 #define SPI_STATISTICS_ATTRS(field, file) \
72 static ssize_t spi_master_##field##_show(struct device *dev, \
73 struct device_attribute *attr, \
76 struct spi_master *master = container_of(dev, \
77 struct spi_master, dev); \
78 return spi_statistics_##field##_show(&master->statistics, buf); \
80 static struct device_attribute dev_attr_spi_master_##field = { \
81 .attr = { .name = file, .mode = S_IRUGO }, \
82 .show = spi_master_##field##_show, \
84 static ssize_t spi_device_##field##_show(struct device *dev, \
85 struct device_attribute *attr, \
88 struct spi_device *spi = to_spi_device(dev); \
89 return spi_statistics_##field##_show(&spi->statistics, buf); \
91 static struct device_attribute dev_attr_spi_device_##field = { \
92 .attr = { .name = file, .mode = S_IRUGO }, \
93 .show = spi_device_##field##_show, \
96 #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string) \
97 static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
100 unsigned long flags; \
102 spin_lock_irqsave(&stat->lock, flags); \
103 len = sprintf(buf, format_string, stat->field); \
104 spin_unlock_irqrestore(&stat->lock, flags); \
107 SPI_STATISTICS_ATTRS(name, file)
109 #define SPI_STATISTICS_SHOW(field, format_string) \
110 SPI_STATISTICS_SHOW_NAME(field, __stringify(field), \
111 field, format_string)
113 SPI_STATISTICS_SHOW(messages, "%lu");
114 SPI_STATISTICS_SHOW(transfers, "%lu");
115 SPI_STATISTICS_SHOW(errors, "%lu");
116 SPI_STATISTICS_SHOW(timedout, "%lu");
118 SPI_STATISTICS_SHOW(spi_sync, "%lu");
119 SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
120 SPI_STATISTICS_SHOW(spi_async, "%lu");
122 SPI_STATISTICS_SHOW(bytes, "%llu");
123 SPI_STATISTICS_SHOW(bytes_rx, "%llu");
124 SPI_STATISTICS_SHOW(bytes_tx, "%llu");
126 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number) \
127 SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index, \
128 "transfer_bytes_histo_" number, \
129 transfer_bytes_histo[index], "%lu")
130 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0, "0-1");
131 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1, "2-3");
132 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2, "4-7");
133 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3, "8-15");
134 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4, "16-31");
135 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5, "32-63");
136 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6, "64-127");
137 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7, "128-255");
138 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8, "256-511");
139 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9, "512-1023");
140 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
141 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
142 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
143 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
144 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
145 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
146 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
148 SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
150 static struct attribute *spi_dev_attrs[] = {
151 &dev_attr_modalias.attr,
155 static const struct attribute_group spi_dev_group = {
156 .attrs = spi_dev_attrs,
159 static struct attribute *spi_device_statistics_attrs[] = {
160 &dev_attr_spi_device_messages.attr,
161 &dev_attr_spi_device_transfers.attr,
162 &dev_attr_spi_device_errors.attr,
163 &dev_attr_spi_device_timedout.attr,
164 &dev_attr_spi_device_spi_sync.attr,
165 &dev_attr_spi_device_spi_sync_immediate.attr,
166 &dev_attr_spi_device_spi_async.attr,
167 &dev_attr_spi_device_bytes.attr,
168 &dev_attr_spi_device_bytes_rx.attr,
169 &dev_attr_spi_device_bytes_tx.attr,
170 &dev_attr_spi_device_transfer_bytes_histo0.attr,
171 &dev_attr_spi_device_transfer_bytes_histo1.attr,
172 &dev_attr_spi_device_transfer_bytes_histo2.attr,
173 &dev_attr_spi_device_transfer_bytes_histo3.attr,
174 &dev_attr_spi_device_transfer_bytes_histo4.attr,
175 &dev_attr_spi_device_transfer_bytes_histo5.attr,
176 &dev_attr_spi_device_transfer_bytes_histo6.attr,
177 &dev_attr_spi_device_transfer_bytes_histo7.attr,
178 &dev_attr_spi_device_transfer_bytes_histo8.attr,
179 &dev_attr_spi_device_transfer_bytes_histo9.attr,
180 &dev_attr_spi_device_transfer_bytes_histo10.attr,
181 &dev_attr_spi_device_transfer_bytes_histo11.attr,
182 &dev_attr_spi_device_transfer_bytes_histo12.attr,
183 &dev_attr_spi_device_transfer_bytes_histo13.attr,
184 &dev_attr_spi_device_transfer_bytes_histo14.attr,
185 &dev_attr_spi_device_transfer_bytes_histo15.attr,
186 &dev_attr_spi_device_transfer_bytes_histo16.attr,
187 &dev_attr_spi_device_transfers_split_maxsize.attr,
191 static const struct attribute_group spi_device_statistics_group = {
192 .name = "statistics",
193 .attrs = spi_device_statistics_attrs,
196 static const struct attribute_group *spi_dev_groups[] = {
198 &spi_device_statistics_group,
202 static struct attribute *spi_master_statistics_attrs[] = {
203 &dev_attr_spi_master_messages.attr,
204 &dev_attr_spi_master_transfers.attr,
205 &dev_attr_spi_master_errors.attr,
206 &dev_attr_spi_master_timedout.attr,
207 &dev_attr_spi_master_spi_sync.attr,
208 &dev_attr_spi_master_spi_sync_immediate.attr,
209 &dev_attr_spi_master_spi_async.attr,
210 &dev_attr_spi_master_bytes.attr,
211 &dev_attr_spi_master_bytes_rx.attr,
212 &dev_attr_spi_master_bytes_tx.attr,
213 &dev_attr_spi_master_transfer_bytes_histo0.attr,
214 &dev_attr_spi_master_transfer_bytes_histo1.attr,
215 &dev_attr_spi_master_transfer_bytes_histo2.attr,
216 &dev_attr_spi_master_transfer_bytes_histo3.attr,
217 &dev_attr_spi_master_transfer_bytes_histo4.attr,
218 &dev_attr_spi_master_transfer_bytes_histo5.attr,
219 &dev_attr_spi_master_transfer_bytes_histo6.attr,
220 &dev_attr_spi_master_transfer_bytes_histo7.attr,
221 &dev_attr_spi_master_transfer_bytes_histo8.attr,
222 &dev_attr_spi_master_transfer_bytes_histo9.attr,
223 &dev_attr_spi_master_transfer_bytes_histo10.attr,
224 &dev_attr_spi_master_transfer_bytes_histo11.attr,
225 &dev_attr_spi_master_transfer_bytes_histo12.attr,
226 &dev_attr_spi_master_transfer_bytes_histo13.attr,
227 &dev_attr_spi_master_transfer_bytes_histo14.attr,
228 &dev_attr_spi_master_transfer_bytes_histo15.attr,
229 &dev_attr_spi_master_transfer_bytes_histo16.attr,
230 &dev_attr_spi_master_transfers_split_maxsize.attr,
234 static const struct attribute_group spi_master_statistics_group = {
235 .name = "statistics",
236 .attrs = spi_master_statistics_attrs,
239 static const struct attribute_group *spi_master_groups[] = {
240 &spi_master_statistics_group,
244 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
245 struct spi_transfer *xfer,
246 struct spi_master *master)
249 int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
254 spin_lock_irqsave(&stats->lock, flags);
257 stats->transfer_bytes_histo[l2len]++;
259 stats->bytes += xfer->len;
260 if ((xfer->tx_buf) &&
261 (xfer->tx_buf != master->dummy_tx))
262 stats->bytes_tx += xfer->len;
263 if ((xfer->rx_buf) &&
264 (xfer->rx_buf != master->dummy_rx))
265 stats->bytes_rx += xfer->len;
267 spin_unlock_irqrestore(&stats->lock, flags);
269 EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
271 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
272 * and the sysfs version makes coldplug work too.
275 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
276 const struct spi_device *sdev)
278 while (id->name[0]) {
279 if (!strcmp(sdev->modalias, id->name))
286 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
288 const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
290 return spi_match_id(sdrv->id_table, sdev);
292 EXPORT_SYMBOL_GPL(spi_get_device_id);
294 static int spi_match_device(struct device *dev, struct device_driver *drv)
296 const struct spi_device *spi = to_spi_device(dev);
297 const struct spi_driver *sdrv = to_spi_driver(drv);
299 /* Attempt an OF style match */
300 if (of_driver_match_device(dev, drv))
304 if (acpi_driver_match_device(dev, drv))
308 return !!spi_match_id(sdrv->id_table, spi);
310 return strcmp(spi->modalias, drv->name) == 0;
313 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
315 const struct spi_device *spi = to_spi_device(dev);
318 rc = acpi_device_uevent_modalias(dev, env);
322 add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
326 struct bus_type spi_bus_type = {
328 .dev_groups = spi_dev_groups,
329 .match = spi_match_device,
330 .uevent = spi_uevent,
332 EXPORT_SYMBOL_GPL(spi_bus_type);
335 static int spi_drv_probe(struct device *dev)
337 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
338 struct spi_device *spi = to_spi_device(dev);
341 ret = of_clk_set_defaults(dev->of_node, false);
346 spi->irq = of_irq_get(dev->of_node, 0);
347 if (spi->irq == -EPROBE_DEFER)
348 return -EPROBE_DEFER;
353 ret = dev_pm_domain_attach(dev, true);
354 if (ret != -EPROBE_DEFER) {
355 ret = sdrv->probe(spi);
357 dev_pm_domain_detach(dev, true);
363 static int spi_drv_remove(struct device *dev)
365 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
368 ret = sdrv->remove(to_spi_device(dev));
369 dev_pm_domain_detach(dev, true);
374 static void spi_drv_shutdown(struct device *dev)
376 const struct spi_driver *sdrv = to_spi_driver(dev->driver);
378 sdrv->shutdown(to_spi_device(dev));
382 * __spi_register_driver - register a SPI driver
383 * @owner: owner module of the driver to register
384 * @sdrv: the driver to register
387 * Return: zero on success, else a negative error code.
389 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
391 sdrv->driver.owner = owner;
392 sdrv->driver.bus = &spi_bus_type;
394 sdrv->driver.probe = spi_drv_probe;
396 sdrv->driver.remove = spi_drv_remove;
398 sdrv->driver.shutdown = spi_drv_shutdown;
399 return driver_register(&sdrv->driver);
401 EXPORT_SYMBOL_GPL(__spi_register_driver);
403 /*-------------------------------------------------------------------------*/
405 /* SPI devices should normally not be created by SPI device drivers; that
406 * would make them board-specific. Similarly with SPI master drivers.
407 * Device registration normally goes into like arch/.../mach.../board-YYY.c
408 * with other readonly (flashable) information about mainboard devices.
412 struct list_head list;
413 struct spi_board_info board_info;
416 static LIST_HEAD(board_list);
417 static LIST_HEAD(spi_master_list);
420 * Used to protect add/del opertion for board_info list and
421 * spi_master list, and their matching process
423 static DEFINE_MUTEX(board_lock);
426 * Prevents addition of devices with same chip select and
427 * addition of devices below an unregistering controller.
429 static DEFINE_MUTEX(spi_add_lock);
432 * spi_alloc_device - Allocate a new SPI device
433 * @master: Controller to which device is connected
436 * Allows a driver to allocate and initialize a spi_device without
437 * registering it immediately. This allows a driver to directly
438 * fill the spi_device with device parameters before calling
439 * spi_add_device() on it.
441 * Caller is responsible to call spi_add_device() on the returned
442 * spi_device structure to add it to the SPI master. If the caller
443 * needs to discard the spi_device without adding it, then it should
444 * call spi_dev_put() on it.
446 * Return: a pointer to the new device, or NULL.
448 struct spi_device *spi_alloc_device(struct spi_master *master)
450 struct spi_device *spi;
452 if (!spi_master_get(master))
455 spi = kzalloc(sizeof(*spi), GFP_KERNEL);
457 spi_master_put(master);
461 spi->master = master;
462 spi->dev.parent = &master->dev;
463 spi->dev.bus = &spi_bus_type;
464 spi->dev.release = spidev_release;
465 spi->cs_gpio = -ENOENT;
467 spin_lock_init(&spi->statistics.lock);
469 device_initialize(&spi->dev);
472 EXPORT_SYMBOL_GPL(spi_alloc_device);
474 static void spi_dev_set_name(struct spi_device *spi)
476 struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
479 dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
483 dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
487 static int spi_dev_check(struct device *dev, void *data)
489 struct spi_device *spi = to_spi_device(dev);
490 struct spi_device *new_spi = data;
492 if (spi->master == new_spi->master &&
493 spi->chip_select == new_spi->chip_select)
499 * spi_add_device - Add spi_device allocated with spi_alloc_device
500 * @spi: spi_device to register
502 * Companion function to spi_alloc_device. Devices allocated with
503 * spi_alloc_device can be added onto the spi bus with this function.
505 * Return: 0 on success; negative errno on failure
507 int spi_add_device(struct spi_device *spi)
509 struct spi_master *master = spi->master;
510 struct device *dev = master->dev.parent;
513 /* Chipselects are numbered 0..max; validate. */
514 if (spi->chip_select >= master->num_chipselect) {
515 dev_err(dev, "cs%d >= max %d\n",
517 master->num_chipselect);
521 /* Set the bus ID string */
522 spi_dev_set_name(spi);
524 /* We need to make sure there's no other device with this
525 * chipselect **BEFORE** we call setup(), else we'll trash
526 * its configuration. Lock against concurrent add() calls.
528 mutex_lock(&spi_add_lock);
530 status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
532 dev_err(dev, "chipselect %d already in use\n",
537 /* Controller may unregister concurrently */
538 if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
539 !device_is_registered(&master->dev)) {
544 if (master->cs_gpios)
545 spi->cs_gpio = master->cs_gpios[spi->chip_select];
547 /* Drivers may modify this initial i/o setup, but will
548 * normally rely on the device being setup. Devices
549 * using SPI_CS_HIGH can't coexist well otherwise...
551 status = spi_setup(spi);
553 dev_err(dev, "can't setup %s, status %d\n",
554 dev_name(&spi->dev), status);
558 /* Device may be bound to an active driver when this returns */
559 status = device_add(&spi->dev);
561 dev_err(dev, "can't add %s, status %d\n",
562 dev_name(&spi->dev), status);
564 dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
567 mutex_unlock(&spi_add_lock);
570 EXPORT_SYMBOL_GPL(spi_add_device);
573 * spi_new_device - instantiate one new SPI device
574 * @master: Controller to which device is connected
575 * @chip: Describes the SPI device
578 * On typical mainboards, this is purely internal; and it's not needed
579 * after board init creates the hard-wired devices. Some development
580 * platforms may not be able to use spi_register_board_info though, and
581 * this is exported so that for example a USB or parport based adapter
582 * driver could add devices (which it would learn about out-of-band).
584 * Return: the new device, or NULL.
586 struct spi_device *spi_new_device(struct spi_master *master,
587 struct spi_board_info *chip)
589 struct spi_device *proxy;
592 /* NOTE: caller did any chip->bus_num checks necessary.
594 * Also, unless we change the return value convention to use
595 * error-or-pointer (not NULL-or-pointer), troubleshootability
596 * suggests syslogged diagnostics are best here (ugh).
599 proxy = spi_alloc_device(master);
603 WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
605 proxy->chip_select = chip->chip_select;
606 proxy->max_speed_hz = chip->max_speed_hz;
607 proxy->mode = chip->mode;
608 proxy->irq = chip->irq;
609 strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
610 proxy->dev.platform_data = (void *) chip->platform_data;
611 proxy->controller_data = chip->controller_data;
612 proxy->controller_state = NULL;
614 status = spi_add_device(proxy);
622 EXPORT_SYMBOL_GPL(spi_new_device);
625 * spi_unregister_device - unregister a single SPI device
626 * @spi: spi_device to unregister
628 * Start making the passed SPI device vanish. Normally this would be handled
629 * by spi_unregister_master().
631 void spi_unregister_device(struct spi_device *spi)
636 if (spi->dev.of_node) {
637 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
638 of_node_put(spi->dev.of_node);
640 if (ACPI_COMPANION(&spi->dev))
641 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
642 device_unregister(&spi->dev);
644 EXPORT_SYMBOL_GPL(spi_unregister_device);
646 static void spi_match_master_to_boardinfo(struct spi_master *master,
647 struct spi_board_info *bi)
649 struct spi_device *dev;
651 if (master->bus_num != bi->bus_num)
654 dev = spi_new_device(master, bi);
656 dev_err(master->dev.parent, "can't create new device for %s\n",
661 * spi_register_board_info - register SPI devices for a given board
662 * @info: array of chip descriptors
663 * @n: how many descriptors are provided
666 * Board-specific early init code calls this (probably during arch_initcall)
667 * with segments of the SPI device table. Any device nodes are created later,
668 * after the relevant parent SPI controller (bus_num) is defined. We keep
669 * this table of devices forever, so that reloading a controller driver will
670 * not make Linux forget about these hard-wired devices.
672 * Other code can also call this, e.g. a particular add-on board might provide
673 * SPI devices through its expansion connector, so code initializing that board
674 * would naturally declare its SPI devices.
676 * The board info passed can safely be __initdata ... but be careful of
677 * any embedded pointers (platform_data, etc), they're copied as-is.
679 * Return: zero on success, else a negative error code.
681 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
683 struct boardinfo *bi;
689 bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
693 for (i = 0; i < n; i++, bi++, info++) {
694 struct spi_master *master;
696 memcpy(&bi->board_info, info, sizeof(*info));
697 mutex_lock(&board_lock);
698 list_add_tail(&bi->list, &board_list);
699 list_for_each_entry(master, &spi_master_list, list)
700 spi_match_master_to_boardinfo(master, &bi->board_info);
701 mutex_unlock(&board_lock);
707 /*-------------------------------------------------------------------------*/
709 static void spi_set_cs(struct spi_device *spi, bool enable)
711 if (spi->mode & SPI_CS_HIGH)
714 if (gpio_is_valid(spi->cs_gpio))
715 gpio_set_value(spi->cs_gpio, !enable);
716 else if (spi->master->set_cs)
717 spi->master->set_cs(spi, !enable);
720 #ifdef CONFIG_HAS_DMA
721 static int spi_map_buf(struct spi_master *master, struct device *dev,
722 struct sg_table *sgt, void *buf, size_t len,
723 enum dma_data_direction dir)
725 const bool vmalloced_buf = is_vmalloc_addr(buf);
726 unsigned int max_seg_size = dma_get_max_seg_size(dev);
727 #ifdef CONFIG_HIGHMEM
728 const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
729 (unsigned long)buf < (PKMAP_BASE +
730 (LAST_PKMAP * PAGE_SIZE)));
732 const bool kmap_buf = false;
736 struct page *vm_page;
741 if (vmalloced_buf || kmap_buf) {
742 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
743 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
744 } else if (virt_addr_valid(buf)) {
745 desc_len = min_t(int, max_seg_size, master->max_dma_len);
746 sgs = DIV_ROUND_UP(len, desc_len);
751 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
755 for (i = 0; i < sgs; i++) {
757 if (vmalloced_buf || kmap_buf) {
759 * Next scatterlist entry size is the minimum between
760 * the desc_len and the remaining buffer length that
763 min = min_t(size_t, desc_len,
765 PAGE_SIZE - offset_in_page(buf)));
767 vm_page = vmalloc_to_page(buf);
769 vm_page = kmap_to_page(buf);
774 sg_set_page(&sgt->sgl[i], vm_page,
775 min, offset_in_page(buf));
777 min = min_t(size_t, len, desc_len);
779 sg_set_buf(&sgt->sgl[i], sg_buf, min);
786 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
799 static void spi_unmap_buf(struct spi_master *master, struct device *dev,
800 struct sg_table *sgt, enum dma_data_direction dir)
802 if (sgt->orig_nents) {
803 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
808 static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
810 struct device *tx_dev, *rx_dev;
811 struct spi_transfer *xfer;
814 if (!master->can_dma)
818 tx_dev = master->dma_tx->device->dev;
820 tx_dev = master->dev.parent;
823 rx_dev = master->dma_rx->device->dev;
825 rx_dev = master->dev.parent;
827 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
828 if (!master->can_dma(master, msg->spi, xfer))
831 if (xfer->tx_buf != NULL) {
832 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
833 (void *)xfer->tx_buf, xfer->len,
839 if (xfer->rx_buf != NULL) {
840 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
841 xfer->rx_buf, xfer->len,
844 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
851 master->cur_msg_mapped = true;
856 static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
858 struct spi_transfer *xfer;
859 struct device *tx_dev, *rx_dev;
861 if (!master->cur_msg_mapped || !master->can_dma)
865 tx_dev = master->dma_tx->device->dev;
867 tx_dev = master->dev.parent;
870 rx_dev = master->dma_rx->device->dev;
872 rx_dev = master->dev.parent;
874 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
875 if (!master->can_dma(master, msg->spi, xfer))
878 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
879 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
884 #else /* !CONFIG_HAS_DMA */
885 static inline int spi_map_buf(struct spi_master *master,
886 struct device *dev, struct sg_table *sgt,
887 void *buf, size_t len,
888 enum dma_data_direction dir)
893 static inline void spi_unmap_buf(struct spi_master *master,
894 struct device *dev, struct sg_table *sgt,
895 enum dma_data_direction dir)
899 static inline int __spi_map_msg(struct spi_master *master,
900 struct spi_message *msg)
905 static inline int __spi_unmap_msg(struct spi_master *master,
906 struct spi_message *msg)
910 #endif /* !CONFIG_HAS_DMA */
912 static inline int spi_unmap_msg(struct spi_master *master,
913 struct spi_message *msg)
915 struct spi_transfer *xfer;
917 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
919 * Restore the original value of tx_buf or rx_buf if they are
922 if (xfer->tx_buf == master->dummy_tx)
924 if (xfer->rx_buf == master->dummy_rx)
928 return __spi_unmap_msg(master, msg);
931 static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
933 struct spi_transfer *xfer;
935 unsigned int max_tx, max_rx;
937 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
941 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
942 if ((master->flags & SPI_MASTER_MUST_TX) &&
944 max_tx = max(xfer->len, max_tx);
945 if ((master->flags & SPI_MASTER_MUST_RX) &&
947 max_rx = max(xfer->len, max_rx);
951 tmp = krealloc(master->dummy_tx, max_tx,
952 GFP_KERNEL | GFP_DMA);
955 master->dummy_tx = tmp;
956 memset(tmp, 0, max_tx);
960 tmp = krealloc(master->dummy_rx, max_rx,
961 GFP_KERNEL | GFP_DMA);
964 master->dummy_rx = tmp;
967 if (max_tx || max_rx) {
968 list_for_each_entry(xfer, &msg->transfers,
973 xfer->tx_buf = master->dummy_tx;
975 xfer->rx_buf = master->dummy_rx;
980 return __spi_map_msg(master, msg);
984 * spi_transfer_one_message - Default implementation of transfer_one_message()
986 * This is a standard implementation of transfer_one_message() for
987 * drivers which implement a transfer_one() operation. It provides
988 * standard handling of delays and chip select management.
990 static int spi_transfer_one_message(struct spi_master *master,
991 struct spi_message *msg)
993 struct spi_transfer *xfer;
994 bool keep_cs = false;
996 unsigned long long ms = 1;
997 struct spi_statistics *statm = &master->statistics;
998 struct spi_statistics *stats = &msg->spi->statistics;
1000 spi_set_cs(msg->spi, true);
1002 SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1003 SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1005 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1006 trace_spi_transfer_start(msg, xfer);
1008 spi_statistics_add_transfer_stats(statm, xfer, master);
1009 spi_statistics_add_transfer_stats(stats, xfer, master);
1011 if (xfer->tx_buf || xfer->rx_buf) {
1012 reinit_completion(&master->xfer_completion);
1014 ret = master->transfer_one(master, msg->spi, xfer);
1016 SPI_STATISTICS_INCREMENT_FIELD(statm,
1018 SPI_STATISTICS_INCREMENT_FIELD(stats,
1020 dev_err(&msg->spi->dev,
1021 "SPI transfer failed: %d\n", ret);
1027 ms = 8LL * 1000LL * xfer->len;
1028 do_div(ms, xfer->speed_hz);
1029 ms += ms + 200; /* some tolerance */
1034 ms = wait_for_completion_timeout(&master->xfer_completion,
1035 msecs_to_jiffies(ms));
1039 SPI_STATISTICS_INCREMENT_FIELD(statm,
1041 SPI_STATISTICS_INCREMENT_FIELD(stats,
1043 dev_err(&msg->spi->dev,
1044 "SPI transfer timed out\n");
1045 msg->status = -ETIMEDOUT;
1049 dev_err(&msg->spi->dev,
1050 "Bufferless transfer has length %u\n",
1054 trace_spi_transfer_stop(msg, xfer);
1056 if (msg->status != -EINPROGRESS)
1059 if (xfer->delay_usecs)
1060 udelay(xfer->delay_usecs);
1062 if (xfer->cs_change) {
1063 if (list_is_last(&xfer->transfer_list,
1067 spi_set_cs(msg->spi, false);
1069 spi_set_cs(msg->spi, true);
1073 msg->actual_length += xfer->len;
1077 if (ret != 0 || !keep_cs)
1078 spi_set_cs(msg->spi, false);
1080 if (msg->status == -EINPROGRESS)
1083 if (msg->status && master->handle_err)
1084 master->handle_err(master, msg);
1086 spi_res_release(master, msg);
1088 spi_finalize_current_message(master);
1094 * spi_finalize_current_transfer - report completion of a transfer
1095 * @master: the master reporting completion
1097 * Called by SPI drivers using the core transfer_one_message()
1098 * implementation to notify it that the current interrupt driven
1099 * transfer has finished and the next one may be scheduled.
1101 void spi_finalize_current_transfer(struct spi_master *master)
1103 complete(&master->xfer_completion);
1105 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1108 * __spi_pump_messages - function which processes spi message queue
1109 * @master: master to process queue for
1110 * @in_kthread: true if we are in the context of the message pump thread
1112 * This function checks if there is any spi message in the queue that
1113 * needs processing and if so call out to the driver to initialize hardware
1114 * and transfer each message.
1116 * Note that it is called both from the kthread itself and also from
1117 * inside spi_sync(); the queue extraction handling at the top of the
1118 * function should deal with this safely.
1120 static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1122 unsigned long flags;
1123 bool was_busy = false;
1127 spin_lock_irqsave(&master->queue_lock, flags);
1129 /* Make sure we are not already running a message */
1130 if (master->cur_msg) {
1131 spin_unlock_irqrestore(&master->queue_lock, flags);
1135 /* If another context is idling the device then defer */
1136 if (master->idling) {
1137 kthread_queue_work(&master->kworker, &master->pump_messages);
1138 spin_unlock_irqrestore(&master->queue_lock, flags);
1142 /* Check if the queue is idle */
1143 if (list_empty(&master->queue) || !master->running) {
1144 if (!master->busy) {
1145 spin_unlock_irqrestore(&master->queue_lock, flags);
1149 /* Only do teardown in the thread */
1151 kthread_queue_work(&master->kworker,
1152 &master->pump_messages);
1153 spin_unlock_irqrestore(&master->queue_lock, flags);
1157 master->busy = false;
1158 master->idling = true;
1159 spin_unlock_irqrestore(&master->queue_lock, flags);
1161 kfree(master->dummy_rx);
1162 master->dummy_rx = NULL;
1163 kfree(master->dummy_tx);
1164 master->dummy_tx = NULL;
1165 if (master->unprepare_transfer_hardware &&
1166 master->unprepare_transfer_hardware(master))
1167 dev_err(&master->dev,
1168 "failed to unprepare transfer hardware\n");
1169 if (master->auto_runtime_pm) {
1170 pm_runtime_mark_last_busy(master->dev.parent);
1171 pm_runtime_put_autosuspend(master->dev.parent);
1173 trace_spi_master_idle(master);
1175 spin_lock_irqsave(&master->queue_lock, flags);
1176 master->idling = false;
1177 spin_unlock_irqrestore(&master->queue_lock, flags);
1181 /* Extract head of queue */
1183 list_first_entry(&master->queue, struct spi_message, queue);
1185 list_del_init(&master->cur_msg->queue);
1189 master->busy = true;
1190 spin_unlock_irqrestore(&master->queue_lock, flags);
1192 mutex_lock(&master->io_mutex);
1194 if (!was_busy && master->auto_runtime_pm) {
1195 ret = pm_runtime_get_sync(master->dev.parent);
1197 dev_err(&master->dev, "Failed to power device: %d\n",
1199 mutex_unlock(&master->io_mutex);
1205 trace_spi_master_busy(master);
1207 if (!was_busy && master->prepare_transfer_hardware) {
1208 ret = master->prepare_transfer_hardware(master);
1210 dev_err(&master->dev,
1211 "failed to prepare transfer hardware\n");
1213 if (master->auto_runtime_pm)
1214 pm_runtime_put(master->dev.parent);
1215 mutex_unlock(&master->io_mutex);
1220 trace_spi_message_start(master->cur_msg);
1222 if (master->prepare_message) {
1223 ret = master->prepare_message(master, master->cur_msg);
1225 dev_err(&master->dev,
1226 "failed to prepare message: %d\n", ret);
1227 master->cur_msg->status = ret;
1228 spi_finalize_current_message(master);
1231 master->cur_msg_prepared = true;
1234 ret = spi_map_msg(master, master->cur_msg);
1236 master->cur_msg->status = ret;
1237 spi_finalize_current_message(master);
1241 ret = master->transfer_one_message(master, master->cur_msg);
1243 dev_err(&master->dev,
1244 "failed to transfer one message from queue\n");
1249 mutex_unlock(&master->io_mutex);
1251 /* Prod the scheduler in case transfer_one() was busy waiting */
1257 * spi_pump_messages - kthread work function which processes spi message queue
1258 * @work: pointer to kthread work struct contained in the master struct
1260 static void spi_pump_messages(struct kthread_work *work)
1262 struct spi_master *master =
1263 container_of(work, struct spi_master, pump_messages);
1265 __spi_pump_messages(master, true);
1268 static int spi_init_queue(struct spi_master *master)
1270 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1272 master->running = false;
1273 master->busy = false;
1275 kthread_init_worker(&master->kworker);
1276 master->kworker_task = kthread_run(kthread_worker_fn,
1277 &master->kworker, "%s",
1278 dev_name(&master->dev));
1279 if (IS_ERR(master->kworker_task)) {
1280 dev_err(&master->dev, "failed to create message pump task\n");
1281 return PTR_ERR(master->kworker_task);
1283 kthread_init_work(&master->pump_messages, spi_pump_messages);
1286 * Master config will indicate if this controller should run the
1287 * message pump with high (realtime) priority to reduce the transfer
1288 * latency on the bus by minimising the delay between a transfer
1289 * request and the scheduling of the message pump thread. Without this
1290 * setting the message pump thread will remain at default priority.
1293 dev_info(&master->dev,
1294 "will run message pump with realtime priority\n");
1295 sched_setscheduler(master->kworker_task, SCHED_FIFO, ¶m);
1302 * spi_get_next_queued_message() - called by driver to check for queued
1304 * @master: the master to check for queued messages
1306 * If there are more messages in the queue, the next message is returned from
1309 * Return: the next message in the queue, else NULL if the queue is empty.
1311 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1313 struct spi_message *next;
1314 unsigned long flags;
1316 /* get a pointer to the next message, if any */
1317 spin_lock_irqsave(&master->queue_lock, flags);
1318 next = list_first_entry_or_null(&master->queue, struct spi_message,
1320 spin_unlock_irqrestore(&master->queue_lock, flags);
1324 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1327 * spi_finalize_current_message() - the current message is complete
1328 * @master: the master to return the message to
1330 * Called by the driver to notify the core that the message in the front of the
1331 * queue is complete and can be removed from the queue.
1333 void spi_finalize_current_message(struct spi_master *master)
1335 struct spi_message *mesg;
1336 unsigned long flags;
1339 spin_lock_irqsave(&master->queue_lock, flags);
1340 mesg = master->cur_msg;
1341 spin_unlock_irqrestore(&master->queue_lock, flags);
1343 spi_unmap_msg(master, mesg);
1345 if (master->cur_msg_prepared && master->unprepare_message) {
1346 ret = master->unprepare_message(master, mesg);
1348 dev_err(&master->dev,
1349 "failed to unprepare message: %d\n", ret);
1353 spin_lock_irqsave(&master->queue_lock, flags);
1354 master->cur_msg = NULL;
1355 master->cur_msg_prepared = false;
1356 kthread_queue_work(&master->kworker, &master->pump_messages);
1357 spin_unlock_irqrestore(&master->queue_lock, flags);
1359 trace_spi_message_done(mesg);
1363 mesg->complete(mesg->context);
1365 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1367 static int spi_start_queue(struct spi_master *master)
1369 unsigned long flags;
1371 spin_lock_irqsave(&master->queue_lock, flags);
1373 if (master->running || master->busy) {
1374 spin_unlock_irqrestore(&master->queue_lock, flags);
1378 master->running = true;
1379 master->cur_msg = NULL;
1380 spin_unlock_irqrestore(&master->queue_lock, flags);
1382 kthread_queue_work(&master->kworker, &master->pump_messages);
1387 static int spi_stop_queue(struct spi_master *master)
1389 unsigned long flags;
1390 unsigned limit = 500;
1393 spin_lock_irqsave(&master->queue_lock, flags);
1396 * This is a bit lame, but is optimized for the common execution path.
1397 * A wait_queue on the master->busy could be used, but then the common
1398 * execution path (pump_messages) would be required to call wake_up or
1399 * friends on every SPI message. Do this instead.
1401 while ((!list_empty(&master->queue) || master->busy) && limit--) {
1402 spin_unlock_irqrestore(&master->queue_lock, flags);
1403 usleep_range(10000, 11000);
1404 spin_lock_irqsave(&master->queue_lock, flags);
1407 if (!list_empty(&master->queue) || master->busy)
1410 master->running = false;
1412 spin_unlock_irqrestore(&master->queue_lock, flags);
1415 dev_warn(&master->dev,
1416 "could not stop message queue\n");
1422 static int spi_destroy_queue(struct spi_master *master)
1426 ret = spi_stop_queue(master);
1429 * kthread_flush_worker will block until all work is done.
1430 * If the reason that stop_queue timed out is that the work will never
1431 * finish, then it does no good to call flush/stop thread, so
1435 dev_err(&master->dev, "problem destroying queue\n");
1439 kthread_flush_worker(&master->kworker);
1440 kthread_stop(master->kworker_task);
1445 static int __spi_queued_transfer(struct spi_device *spi,
1446 struct spi_message *msg,
1449 struct spi_master *master = spi->master;
1450 unsigned long flags;
1452 spin_lock_irqsave(&master->queue_lock, flags);
1454 if (!master->running) {
1455 spin_unlock_irqrestore(&master->queue_lock, flags);
1458 msg->actual_length = 0;
1459 msg->status = -EINPROGRESS;
1461 list_add_tail(&msg->queue, &master->queue);
1462 if (!master->busy && need_pump)
1463 kthread_queue_work(&master->kworker, &master->pump_messages);
1465 spin_unlock_irqrestore(&master->queue_lock, flags);
1470 * spi_queued_transfer - transfer function for queued transfers
1471 * @spi: spi device which is requesting transfer
1472 * @msg: spi message which is to handled is queued to driver queue
1474 * Return: zero on success, else a negative error code.
1476 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1478 return __spi_queued_transfer(spi, msg, true);
1481 static int spi_master_initialize_queue(struct spi_master *master)
1485 master->transfer = spi_queued_transfer;
1486 if (!master->transfer_one_message)
1487 master->transfer_one_message = spi_transfer_one_message;
1489 /* Initialize and start queue */
1490 ret = spi_init_queue(master);
1492 dev_err(&master->dev, "problem initializing queue\n");
1493 goto err_init_queue;
1495 master->queued = true;
1496 ret = spi_start_queue(master);
1498 dev_err(&master->dev, "problem starting queue\n");
1499 goto err_start_queue;
1505 spi_destroy_queue(master);
1510 /*-------------------------------------------------------------------------*/
1512 #if defined(CONFIG_OF)
1513 static struct spi_device *
1514 of_register_spi_device(struct spi_master *master, struct device_node *nc)
1516 struct spi_device *spi;
1520 /* Alloc an spi_device */
1521 spi = spi_alloc_device(master);
1523 dev_err(&master->dev, "spi_device alloc error for %s\n",
1529 /* Select device driver */
1530 rc = of_modalias_node(nc, spi->modalias,
1531 sizeof(spi->modalias));
1533 dev_err(&master->dev, "cannot find modalias for %s\n",
1538 /* Device address */
1539 rc = of_property_read_u32(nc, "reg", &value);
1541 dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1545 spi->chip_select = value;
1547 /* Mode (clock phase/polarity/etc.) */
1548 if (of_find_property(nc, "spi-cpha", NULL))
1549 spi->mode |= SPI_CPHA;
1550 if (of_find_property(nc, "spi-cpol", NULL))
1551 spi->mode |= SPI_CPOL;
1552 if (of_find_property(nc, "spi-cs-high", NULL))
1553 spi->mode |= SPI_CS_HIGH;
1554 if (of_find_property(nc, "spi-3wire", NULL))
1555 spi->mode |= SPI_3WIRE;
1556 if (of_find_property(nc, "spi-lsb-first", NULL))
1557 spi->mode |= SPI_LSB_FIRST;
1559 /* Device DUAL/QUAD mode */
1560 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1565 spi->mode |= SPI_TX_DUAL;
1568 spi->mode |= SPI_TX_QUAD;
1571 dev_warn(&master->dev,
1572 "spi-tx-bus-width %d not supported\n",
1578 if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1583 spi->mode |= SPI_RX_DUAL;
1586 spi->mode |= SPI_RX_QUAD;
1589 dev_warn(&master->dev,
1590 "spi-rx-bus-width %d not supported\n",
1597 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1599 dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1603 spi->max_speed_hz = value;
1605 /* Store a pointer to the node in the device structure */
1607 spi->dev.of_node = nc;
1609 /* Register the new device */
1610 rc = spi_add_device(spi);
1612 dev_err(&master->dev, "spi_device register error %s\n",
1614 goto err_of_node_put;
1627 * of_register_spi_devices() - Register child devices onto the SPI bus
1628 * @master: Pointer to spi_master device
1630 * Registers an spi_device for each child node of master node which has a 'reg'
1633 static void of_register_spi_devices(struct spi_master *master)
1635 struct spi_device *spi;
1636 struct device_node *nc;
1638 if (!master->dev.of_node)
1641 for_each_available_child_of_node(master->dev.of_node, nc) {
1642 if (of_node_test_and_set_flag(nc, OF_POPULATED))
1644 spi = of_register_spi_device(master, nc);
1646 dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1648 of_node_clear_flag(nc, OF_POPULATED);
1653 static void of_register_spi_devices(struct spi_master *master) { }
1657 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1659 struct spi_device *spi = data;
1660 struct spi_master *master = spi->master;
1662 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1663 struct acpi_resource_spi_serialbus *sb;
1665 sb = &ares->data.spi_serial_bus;
1666 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1668 * ACPI DeviceSelection numbering is handled by the
1669 * host controller driver in Windows and can vary
1670 * from driver to driver. In Linux we always expect
1671 * 0 .. max - 1 so we need to ask the driver to
1672 * translate between the two schemes.
1674 if (master->fw_translate_cs) {
1675 int cs = master->fw_translate_cs(master,
1676 sb->device_selection);
1679 spi->chip_select = cs;
1681 spi->chip_select = sb->device_selection;
1684 spi->max_speed_hz = sb->connection_speed;
1686 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1687 spi->mode |= SPI_CPHA;
1688 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1689 spi->mode |= SPI_CPOL;
1690 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1691 spi->mode |= SPI_CS_HIGH;
1693 } else if (spi->irq < 0) {
1696 if (acpi_dev_resource_interrupt(ares, 0, &r))
1700 /* Always tell the ACPI core to skip this resource */
1704 static acpi_status acpi_register_spi_device(struct spi_master *master,
1705 struct acpi_device *adev)
1707 struct list_head resource_list;
1708 struct spi_device *spi;
1711 if (acpi_bus_get_status(adev) || !adev->status.present ||
1712 acpi_device_enumerated(adev))
1715 spi = spi_alloc_device(master);
1717 dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1718 dev_name(&adev->dev));
1719 return AE_NO_MEMORY;
1722 ACPI_COMPANION_SET(&spi->dev, adev);
1725 INIT_LIST_HEAD(&resource_list);
1726 ret = acpi_dev_get_resources(adev, &resource_list,
1727 acpi_spi_add_resource, spi);
1728 acpi_dev_free_resource_list(&resource_list);
1730 if (ret < 0 || !spi->max_speed_hz) {
1736 spi->irq = acpi_dev_gpio_irq_get(adev, 0);
1738 acpi_device_set_enumerated(adev);
1740 adev->power.flags.ignore_parent = true;
1741 strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1742 if (spi_add_device(spi)) {
1743 adev->power.flags.ignore_parent = false;
1744 dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1745 dev_name(&adev->dev));
1752 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1753 void *data, void **return_value)
1755 struct spi_master *master = data;
1756 struct acpi_device *adev;
1758 if (acpi_bus_get_device(handle, &adev))
1761 return acpi_register_spi_device(master, adev);
1764 static void acpi_register_spi_devices(struct spi_master *master)
1769 handle = ACPI_HANDLE(master->dev.parent);
1773 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1774 acpi_spi_add_device, NULL,
1776 if (ACPI_FAILURE(status))
1777 dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1780 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1781 #endif /* CONFIG_ACPI */
1783 static void spi_master_release(struct device *dev)
1785 struct spi_master *master;
1787 master = container_of(dev, struct spi_master, dev);
1791 static struct class spi_master_class = {
1792 .name = "spi_master",
1793 .owner = THIS_MODULE,
1794 .dev_release = spi_master_release,
1795 .dev_groups = spi_master_groups,
1800 * spi_alloc_master - allocate SPI master controller
1801 * @dev: the controller, possibly using the platform_bus
1802 * @size: how much zeroed driver-private data to allocate; the pointer to this
1803 * memory is in the driver_data field of the returned device,
1804 * accessible with spi_master_get_devdata().
1805 * Context: can sleep
1807 * This call is used only by SPI master controller drivers, which are the
1808 * only ones directly touching chip registers. It's how they allocate
1809 * an spi_master structure, prior to calling spi_register_master().
1811 * This must be called from context that can sleep.
1813 * The caller is responsible for assigning the bus number and initializing
1814 * the master's methods before calling spi_register_master(); and (after errors
1815 * adding the device) calling spi_master_put() to prevent a memory leak.
1817 * Return: the SPI master structure on success, else NULL.
1819 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1821 struct spi_master *master;
1826 master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1830 device_initialize(&master->dev);
1831 master->bus_num = -1;
1832 master->num_chipselect = 1;
1833 master->dev.class = &spi_master_class;
1834 master->dev.parent = dev;
1835 pm_suspend_ignore_children(&master->dev, true);
1836 spi_master_set_devdata(master, &master[1]);
1840 EXPORT_SYMBOL_GPL(spi_alloc_master);
1842 static void devm_spi_release_master(struct device *dev, void *master)
1844 spi_master_put(*(struct spi_master **)master);
1848 * devm_spi_alloc_master - resource-managed spi_alloc_master()
1849 * @dev: physical device of SPI master
1850 * @size: how much zeroed driver-private data to allocate
1851 * Context: can sleep
1853 * Allocate an SPI master and automatically release a reference on it
1854 * when @dev is unbound from its driver. Drivers are thus relieved from
1855 * having to call spi_master_put().
1857 * The arguments to this function are identical to spi_alloc_master().
1859 * Return: the SPI master structure on success, else NULL.
1861 struct spi_master *devm_spi_alloc_master(struct device *dev, unsigned int size)
1863 struct spi_master **ptr, *master;
1865 ptr = devres_alloc(devm_spi_release_master, sizeof(*ptr),
1870 master = spi_alloc_master(dev, size);
1872 master->devm_allocated = true;
1874 devres_add(dev, ptr);
1881 EXPORT_SYMBOL_GPL(devm_spi_alloc_master);
1884 static int of_spi_register_master(struct spi_master *master)
1887 struct device_node *np = master->dev.of_node;
1892 nb = of_gpio_named_count(np, "cs-gpios");
1893 master->num_chipselect = max_t(int, nb, master->num_chipselect);
1895 /* Return error only for an incorrectly formed cs-gpios property */
1896 if (nb == 0 || nb == -ENOENT)
1901 cs = devm_kzalloc(&master->dev,
1902 sizeof(int) * master->num_chipselect,
1904 master->cs_gpios = cs;
1906 if (!master->cs_gpios)
1909 for (i = 0; i < master->num_chipselect; i++)
1912 for (i = 0; i < nb; i++)
1913 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1918 static int of_spi_register_master(struct spi_master *master)
1925 * spi_register_master - register SPI master controller
1926 * @master: initialized master, originally from spi_alloc_master()
1927 * Context: can sleep
1929 * SPI master controllers connect to their drivers using some non-SPI bus,
1930 * such as the platform bus. The final stage of probe() in that code
1931 * includes calling spi_register_master() to hook up to this SPI bus glue.
1933 * SPI controllers use board specific (often SOC specific) bus numbers,
1934 * and board-specific addressing for SPI devices combines those numbers
1935 * with chip select numbers. Since SPI does not directly support dynamic
1936 * device identification, boards need configuration tables telling which
1937 * chip is at which address.
1939 * This must be called from context that can sleep. It returns zero on
1940 * success, else a negative error code (dropping the master's refcount).
1941 * After a successful return, the caller is responsible for calling
1942 * spi_unregister_master().
1944 * Return: zero on success, else a negative error code.
1946 int spi_register_master(struct spi_master *master)
1948 static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1949 struct device *dev = master->dev.parent;
1950 struct boardinfo *bi;
1951 int status = -ENODEV;
1957 status = of_spi_register_master(master);
1961 /* even if it's just one always-selected device, there must
1962 * be at least one chipselect
1964 if (master->num_chipselect == 0)
1967 if ((master->bus_num < 0) && master->dev.of_node)
1968 master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1970 /* convention: dynamically assigned bus IDs count down from the max */
1971 if (master->bus_num < 0) {
1972 /* FIXME switch to an IDR based scheme, something like
1973 * I2C now uses, so we can't run out of "dynamic" IDs
1975 master->bus_num = atomic_dec_return(&dyn_bus_id);
1979 INIT_LIST_HEAD(&master->queue);
1980 spin_lock_init(&master->queue_lock);
1981 spin_lock_init(&master->bus_lock_spinlock);
1982 mutex_init(&master->bus_lock_mutex);
1983 mutex_init(&master->io_mutex);
1984 master->bus_lock_flag = 0;
1985 init_completion(&master->xfer_completion);
1986 if (!master->max_dma_len)
1987 master->max_dma_len = INT_MAX;
1989 /* register the device, then userspace will see it.
1990 * registration fails if the bus ID is in use.
1992 dev_set_name(&master->dev, "spi%u", master->bus_num);
1993 status = device_add(&master->dev);
1996 dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1997 dynamic ? " (dynamic)" : "");
1999 /* If we're using a queued driver, start the queue */
2000 if (master->transfer)
2001 dev_info(dev, "master is unqueued, this is deprecated\n");
2003 status = spi_master_initialize_queue(master);
2005 device_del(&master->dev);
2009 /* add statistics */
2010 spin_lock_init(&master->statistics.lock);
2012 mutex_lock(&board_lock);
2013 list_add_tail(&master->list, &spi_master_list);
2014 list_for_each_entry(bi, &board_list, list)
2015 spi_match_master_to_boardinfo(master, &bi->board_info);
2016 mutex_unlock(&board_lock);
2018 /* Register devices from the device tree and ACPI */
2019 of_register_spi_devices(master);
2020 acpi_register_spi_devices(master);
2024 EXPORT_SYMBOL_GPL(spi_register_master);
2026 static void devm_spi_unregister(struct device *dev, void *res)
2028 spi_unregister_master(*(struct spi_master **)res);
2032 * dev_spi_register_master - register managed SPI master controller
2033 * @dev: device managing SPI master
2034 * @master: initialized master, originally from spi_alloc_master()
2035 * Context: can sleep
2037 * Register a SPI device as with spi_register_master() which will
2038 * automatically be unregister
2040 * Return: zero on success, else a negative error code.
2042 int devm_spi_register_master(struct device *dev, struct spi_master *master)
2044 struct spi_master **ptr;
2047 ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
2051 ret = spi_register_master(master);
2054 devres_add(dev, ptr);
2061 EXPORT_SYMBOL_GPL(devm_spi_register_master);
2063 static int __unregister(struct device *dev, void *null)
2065 spi_unregister_device(to_spi_device(dev));
2070 * spi_unregister_master - unregister SPI master controller
2071 * @master: the master being unregistered
2072 * Context: can sleep
2074 * This call is used only by SPI master controller drivers, which are the
2075 * only ones directly touching chip registers.
2077 * This must be called from context that can sleep.
2079 void spi_unregister_master(struct spi_master *master)
2081 /* Prevent addition of new devices, unregister existing ones */
2082 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2083 mutex_lock(&spi_add_lock);
2085 device_for_each_child(&master->dev, NULL, __unregister);
2087 if (master->queued) {
2088 if (spi_destroy_queue(master))
2089 dev_err(&master->dev, "queue remove failed\n");
2092 mutex_lock(&board_lock);
2093 list_del(&master->list);
2094 mutex_unlock(&board_lock);
2096 device_del(&master->dev);
2098 /* Release the last reference on the master if its driver
2099 * has not yet been converted to devm_spi_alloc_master().
2101 if (!master->devm_allocated)
2102 put_device(&master->dev);
2104 if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2105 mutex_unlock(&spi_add_lock);
2107 EXPORT_SYMBOL_GPL(spi_unregister_master);
2109 int spi_master_suspend(struct spi_master *master)
2113 /* Basically no-ops for non-queued masters */
2114 if (!master->queued)
2117 ret = spi_stop_queue(master);
2119 dev_err(&master->dev, "queue stop failed\n");
2123 EXPORT_SYMBOL_GPL(spi_master_suspend);
2125 int spi_master_resume(struct spi_master *master)
2129 if (!master->queued)
2132 ret = spi_start_queue(master);
2134 dev_err(&master->dev, "queue restart failed\n");
2138 EXPORT_SYMBOL_GPL(spi_master_resume);
2140 static int __spi_master_match(struct device *dev, const void *data)
2142 struct spi_master *m;
2143 const u16 *bus_num = data;
2145 m = container_of(dev, struct spi_master, dev);
2146 return m->bus_num == *bus_num;
2150 * spi_busnum_to_master - look up master associated with bus_num
2151 * @bus_num: the master's bus number
2152 * Context: can sleep
2154 * This call may be used with devices that are registered after
2155 * arch init time. It returns a refcounted pointer to the relevant
2156 * spi_master (which the caller must release), or NULL if there is
2157 * no such master registered.
2159 * Return: the SPI master structure on success, else NULL.
2161 struct spi_master *spi_busnum_to_master(u16 bus_num)
2164 struct spi_master *master = NULL;
2166 dev = class_find_device(&spi_master_class, NULL, &bus_num,
2167 __spi_master_match);
2169 master = container_of(dev, struct spi_master, dev);
2170 /* reference got in class_find_device */
2173 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2175 /*-------------------------------------------------------------------------*/
2177 /* Core methods for SPI resource management */
2180 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2181 * during the processing of a spi_message while using
2183 * @spi: the spi device for which we allocate memory
2184 * @release: the release code to execute for this resource
2185 * @size: size to alloc and return
2186 * @gfp: GFP allocation flags
2188 * Return: the pointer to the allocated data
2190 * This may get enhanced in the future to allocate from a memory pool
2191 * of the @spi_device or @spi_master to avoid repeated allocations.
2193 void *spi_res_alloc(struct spi_device *spi,
2194 spi_res_release_t release,
2195 size_t size, gfp_t gfp)
2197 struct spi_res *sres;
2199 sres = kzalloc(sizeof(*sres) + size, gfp);
2203 INIT_LIST_HEAD(&sres->entry);
2204 sres->release = release;
2208 EXPORT_SYMBOL_GPL(spi_res_alloc);
2211 * spi_res_free - free an spi resource
2212 * @res: pointer to the custom data of a resource
2215 void spi_res_free(void *res)
2217 struct spi_res *sres = container_of(res, struct spi_res, data);
2222 WARN_ON(!list_empty(&sres->entry));
2225 EXPORT_SYMBOL_GPL(spi_res_free);
2228 * spi_res_add - add a spi_res to the spi_message
2229 * @message: the spi message
2230 * @res: the spi_resource
2232 void spi_res_add(struct spi_message *message, void *res)
2234 struct spi_res *sres = container_of(res, struct spi_res, data);
2236 WARN_ON(!list_empty(&sres->entry));
2237 list_add_tail(&sres->entry, &message->resources);
2239 EXPORT_SYMBOL_GPL(spi_res_add);
2242 * spi_res_release - release all spi resources for this message
2243 * @master: the @spi_master
2244 * @message: the @spi_message
2246 void spi_res_release(struct spi_master *master,
2247 struct spi_message *message)
2249 struct spi_res *res;
2251 while (!list_empty(&message->resources)) {
2252 res = list_last_entry(&message->resources,
2253 struct spi_res, entry);
2256 res->release(master, message, res->data);
2258 list_del(&res->entry);
2263 EXPORT_SYMBOL_GPL(spi_res_release);
2265 /*-------------------------------------------------------------------------*/
2267 /* Core methods for spi_message alterations */
2269 static void __spi_replace_transfers_release(struct spi_master *master,
2270 struct spi_message *msg,
2273 struct spi_replaced_transfers *rxfer = res;
2276 /* call extra callback if requested */
2278 rxfer->release(master, msg, res);
2280 /* insert replaced transfers back into the message */
2281 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2283 /* remove the formerly inserted entries */
2284 for (i = 0; i < rxfer->inserted; i++)
2285 list_del(&rxfer->inserted_transfers[i].transfer_list);
2289 * spi_replace_transfers - replace transfers with several transfers
2290 * and register change with spi_message.resources
2291 * @msg: the spi_message we work upon
2292 * @xfer_first: the first spi_transfer we want to replace
2293 * @remove: number of transfers to remove
2294 * @insert: the number of transfers we want to insert instead
2295 * @release: extra release code necessary in some circumstances
2296 * @extradatasize: extra data to allocate (with alignment guarantees
2297 * of struct @spi_transfer)
2300 * Returns: pointer to @spi_replaced_transfers,
2301 * PTR_ERR(...) in case of errors.
2303 struct spi_replaced_transfers *spi_replace_transfers(
2304 struct spi_message *msg,
2305 struct spi_transfer *xfer_first,
2308 spi_replaced_release_t release,
2309 size_t extradatasize,
2312 struct spi_replaced_transfers *rxfer;
2313 struct spi_transfer *xfer;
2316 /* allocate the structure using spi_res */
2317 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2318 insert * sizeof(struct spi_transfer)
2319 + sizeof(struct spi_replaced_transfers)
2323 return ERR_PTR(-ENOMEM);
2325 /* the release code to invoke before running the generic release */
2326 rxfer->release = release;
2328 /* assign extradata */
2331 &rxfer->inserted_transfers[insert];
2333 /* init the replaced_transfers list */
2334 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2336 /* assign the list_entry after which we should reinsert
2337 * the @replaced_transfers - it may be spi_message.messages!
2339 rxfer->replaced_after = xfer_first->transfer_list.prev;
2341 /* remove the requested number of transfers */
2342 for (i = 0; i < remove; i++) {
2343 /* if the entry after replaced_after it is msg->transfers
2344 * then we have been requested to remove more transfers
2345 * than are in the list
2347 if (rxfer->replaced_after->next == &msg->transfers) {
2348 dev_err(&msg->spi->dev,
2349 "requested to remove more spi_transfers than are available\n");
2350 /* insert replaced transfers back into the message */
2351 list_splice(&rxfer->replaced_transfers,
2352 rxfer->replaced_after);
2354 /* free the spi_replace_transfer structure */
2355 spi_res_free(rxfer);
2357 /* and return with an error */
2358 return ERR_PTR(-EINVAL);
2361 /* remove the entry after replaced_after from list of
2362 * transfers and add it to list of replaced_transfers
2364 list_move_tail(rxfer->replaced_after->next,
2365 &rxfer->replaced_transfers);
2368 /* create copy of the given xfer with identical settings
2369 * based on the first transfer to get removed
2371 for (i = 0; i < insert; i++) {
2372 /* we need to run in reverse order */
2373 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2375 /* copy all spi_transfer data */
2376 memcpy(xfer, xfer_first, sizeof(*xfer));
2379 list_add(&xfer->transfer_list, rxfer->replaced_after);
2381 /* clear cs_change and delay_usecs for all but the last */
2383 xfer->cs_change = false;
2384 xfer->delay_usecs = 0;
2388 /* set up inserted */
2389 rxfer->inserted = insert;
2391 /* and register it with spi_res/spi_message */
2392 spi_res_add(msg, rxfer);
2396 EXPORT_SYMBOL_GPL(spi_replace_transfers);
2398 static int __spi_split_transfer_maxsize(struct spi_master *master,
2399 struct spi_message *msg,
2400 struct spi_transfer **xferp,
2404 struct spi_transfer *xfer = *xferp, *xfers;
2405 struct spi_replaced_transfers *srt;
2409 /* warn once about this fact that we are splitting a transfer */
2410 dev_warn_once(&msg->spi->dev,
2411 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2412 xfer->len, maxsize);
2414 /* calculate how many we have to replace */
2415 count = DIV_ROUND_UP(xfer->len, maxsize);
2417 /* create replacement */
2418 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2420 return PTR_ERR(srt);
2421 xfers = srt->inserted_transfers;
2423 /* now handle each of those newly inserted spi_transfers
2424 * note that the replacements spi_transfers all are preset
2425 * to the same values as *xferp, so tx_buf, rx_buf and len
2426 * are all identical (as well as most others)
2427 * so we just have to fix up len and the pointers.
2429 * this also includes support for the depreciated
2430 * spi_message.is_dma_mapped interface
2433 /* the first transfer just needs the length modified, so we
2434 * run it outside the loop
2436 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2438 /* all the others need rx_buf/tx_buf also set */
2439 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2440 /* update rx_buf, tx_buf and dma */
2441 if (xfers[i].rx_buf)
2442 xfers[i].rx_buf += offset;
2443 if (xfers[i].rx_dma)
2444 xfers[i].rx_dma += offset;
2445 if (xfers[i].tx_buf)
2446 xfers[i].tx_buf += offset;
2447 if (xfers[i].tx_dma)
2448 xfers[i].tx_dma += offset;
2451 xfers[i].len = min(maxsize, xfers[i].len - offset);
2454 /* we set up xferp to the last entry we have inserted,
2455 * so that we skip those already split transfers
2457 *xferp = &xfers[count - 1];
2459 /* increment statistics counters */
2460 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2461 transfers_split_maxsize);
2462 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2463 transfers_split_maxsize);
2469 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2470 * when an individual transfer exceeds a
2472 * @master: the @spi_master for this transfer
2473 * @msg: the @spi_message to transform
2474 * @maxsize: the maximum when to apply this
2475 * @gfp: GFP allocation flags
2477 * Return: status of transformation
2479 int spi_split_transfers_maxsize(struct spi_master *master,
2480 struct spi_message *msg,
2484 struct spi_transfer *xfer;
2487 /* iterate over the transfer_list,
2488 * but note that xfer is advanced to the last transfer inserted
2489 * to avoid checking sizes again unnecessarily (also xfer does
2490 * potentiall belong to a different list by the time the
2491 * replacement has happened
2493 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2494 if (xfer->len > maxsize) {
2495 ret = __spi_split_transfer_maxsize(
2496 master, msg, &xfer, maxsize, gfp);
2504 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2506 /*-------------------------------------------------------------------------*/
2508 /* Core methods for SPI master protocol drivers. Some of the
2509 * other core methods are currently defined as inline functions.
2512 static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
2514 if (master->bits_per_word_mask) {
2515 /* Only 32 bits fit in the mask */
2516 if (bits_per_word > 32)
2518 if (!(master->bits_per_word_mask &
2519 SPI_BPW_MASK(bits_per_word)))
2527 * spi_setup - setup SPI mode and clock rate
2528 * @spi: the device whose settings are being modified
2529 * Context: can sleep, and no requests are queued to the device
2531 * SPI protocol drivers may need to update the transfer mode if the
2532 * device doesn't work with its default. They may likewise need
2533 * to update clock rates or word sizes from initial values. This function
2534 * changes those settings, and must be called from a context that can sleep.
2535 * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
2536 * effect the next time the device is selected and data is transferred to
2537 * or from it. When this function returns, the spi device is deselected.
2539 * Note that this call will fail if the protocol driver specifies an option
2540 * that the underlying controller or its driver does not support. For
2541 * example, not all hardware supports wire transfers using nine bit words,
2542 * LSB-first wire encoding, or active-high chipselects.
2544 * Return: zero on success, else a negative error code.
2546 int spi_setup(struct spi_device *spi)
2548 unsigned bad_bits, ugly_bits;
2551 /* check mode to prevent that DUAL and QUAD set at the same time
2553 if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2554 ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2556 "setup: can not select dual and quad at the same time\n");
2559 /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2561 if ((spi->mode & SPI_3WIRE) && (spi->mode &
2562 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2564 /* help drivers fail *cleanly* when they need options
2565 * that aren't supported with their current master
2567 bad_bits = spi->mode & ~spi->master->mode_bits;
2568 ugly_bits = bad_bits &
2569 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
2572 "setup: ignoring unsupported mode bits %x\n",
2574 spi->mode &= ~ugly_bits;
2575 bad_bits &= ~ugly_bits;
2578 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2583 if (!spi->bits_per_word)
2584 spi->bits_per_word = 8;
2586 status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
2590 if (!spi->max_speed_hz)
2591 spi->max_speed_hz = spi->master->max_speed_hz;
2593 if (spi->master->setup)
2594 status = spi->master->setup(spi);
2596 spi_set_cs(spi, false);
2598 dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
2599 (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
2600 (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
2601 (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
2602 (spi->mode & SPI_3WIRE) ? "3wire, " : "",
2603 (spi->mode & SPI_LOOP) ? "loopback, " : "",
2604 spi->bits_per_word, spi->max_speed_hz,
2609 EXPORT_SYMBOL_GPL(spi_setup);
2611 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2613 struct spi_master *master = spi->master;
2614 struct spi_transfer *xfer;
2617 if (list_empty(&message->transfers))
2620 /* Half-duplex links include original MicroWire, and ones with
2621 * only one data pin like SPI_3WIRE (switches direction) or where
2622 * either MOSI or MISO is missing. They can also be caused by
2623 * software limitations.
2625 if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2626 || (spi->mode & SPI_3WIRE)) {
2627 unsigned flags = master->flags;
2629 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2630 if (xfer->rx_buf && xfer->tx_buf)
2632 if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2634 if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2640 * Set transfer bits_per_word and max speed as spi device default if
2641 * it is not set for this transfer.
2642 * Set transfer tx_nbits and rx_nbits as single transfer default
2643 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2645 message->frame_length = 0;
2646 list_for_each_entry(xfer, &message->transfers, transfer_list) {
2647 message->frame_length += xfer->len;
2648 if (!xfer->bits_per_word)
2649 xfer->bits_per_word = spi->bits_per_word;
2651 if (!xfer->speed_hz)
2652 xfer->speed_hz = spi->max_speed_hz;
2653 if (!xfer->speed_hz)
2654 xfer->speed_hz = master->max_speed_hz;
2656 if (master->max_speed_hz &&
2657 xfer->speed_hz > master->max_speed_hz)
2658 xfer->speed_hz = master->max_speed_hz;
2660 if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2664 * SPI transfer length should be multiple of SPI word size
2665 * where SPI word size should be power-of-two multiple
2667 if (xfer->bits_per_word <= 8)
2669 else if (xfer->bits_per_word <= 16)
2674 /* No partial transfers accepted */
2675 if (xfer->len % w_size)
2678 if (xfer->speed_hz && master->min_speed_hz &&
2679 xfer->speed_hz < master->min_speed_hz)
2682 if (xfer->tx_buf && !xfer->tx_nbits)
2683 xfer->tx_nbits = SPI_NBITS_SINGLE;
2684 if (xfer->rx_buf && !xfer->rx_nbits)
2685 xfer->rx_nbits = SPI_NBITS_SINGLE;
2686 /* check transfer tx/rx_nbits:
2687 * 1. check the value matches one of single, dual and quad
2688 * 2. check tx/rx_nbits match the mode in spi_device
2691 if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2692 xfer->tx_nbits != SPI_NBITS_DUAL &&
2693 xfer->tx_nbits != SPI_NBITS_QUAD)
2695 if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2696 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2698 if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2699 !(spi->mode & SPI_TX_QUAD))
2702 /* check transfer rx_nbits */
2704 if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2705 xfer->rx_nbits != SPI_NBITS_DUAL &&
2706 xfer->rx_nbits != SPI_NBITS_QUAD)
2708 if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2709 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2711 if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2712 !(spi->mode & SPI_RX_QUAD))
2717 message->status = -EINPROGRESS;
2722 static int __spi_async(struct spi_device *spi, struct spi_message *message)
2724 struct spi_master *master = spi->master;
2728 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2729 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2731 trace_spi_message_submit(message);
2733 return master->transfer(spi, message);
2737 * spi_async - asynchronous SPI transfer
2738 * @spi: device with which data will be exchanged
2739 * @message: describes the data transfers, including completion callback
2740 * Context: any (irqs may be blocked, etc)
2742 * This call may be used in_irq and other contexts which can't sleep,
2743 * as well as from task contexts which can sleep.
2745 * The completion callback is invoked in a context which can't sleep.
2746 * Before that invocation, the value of message->status is undefined.
2747 * When the callback is issued, message->status holds either zero (to
2748 * indicate complete success) or a negative error code. After that
2749 * callback returns, the driver which issued the transfer request may
2750 * deallocate the associated memory; it's no longer in use by any SPI
2751 * core or controller driver code.
2753 * Note that although all messages to a spi_device are handled in
2754 * FIFO order, messages may go to different devices in other orders.
2755 * Some device might be higher priority, or have various "hard" access
2756 * time requirements, for example.
2758 * On detection of any fault during the transfer, processing of
2759 * the entire message is aborted, and the device is deselected.
2760 * Until returning from the associated message completion callback,
2761 * no other spi_message queued to that device will be processed.
2762 * (This rule applies equally to all the synchronous transfer calls,
2763 * which are wrappers around this core asynchronous primitive.)
2765 * Return: zero on success, else a negative error code.
2767 int spi_async(struct spi_device *spi, struct spi_message *message)
2769 struct spi_master *master = spi->master;
2771 unsigned long flags;
2773 ret = __spi_validate(spi, message);
2777 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2779 if (master->bus_lock_flag)
2782 ret = __spi_async(spi, message);
2784 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2788 EXPORT_SYMBOL_GPL(spi_async);
2791 * spi_async_locked - version of spi_async with exclusive bus usage
2792 * @spi: device with which data will be exchanged
2793 * @message: describes the data transfers, including completion callback
2794 * Context: any (irqs may be blocked, etc)
2796 * This call may be used in_irq and other contexts which can't sleep,
2797 * as well as from task contexts which can sleep.
2799 * The completion callback is invoked in a context which can't sleep.
2800 * Before that invocation, the value of message->status is undefined.
2801 * When the callback is issued, message->status holds either zero (to
2802 * indicate complete success) or a negative error code. After that
2803 * callback returns, the driver which issued the transfer request may
2804 * deallocate the associated memory; it's no longer in use by any SPI
2805 * core or controller driver code.
2807 * Note that although all messages to a spi_device are handled in
2808 * FIFO order, messages may go to different devices in other orders.
2809 * Some device might be higher priority, or have various "hard" access
2810 * time requirements, for example.
2812 * On detection of any fault during the transfer, processing of
2813 * the entire message is aborted, and the device is deselected.
2814 * Until returning from the associated message completion callback,
2815 * no other spi_message queued to that device will be processed.
2816 * (This rule applies equally to all the synchronous transfer calls,
2817 * which are wrappers around this core asynchronous primitive.)
2819 * Return: zero on success, else a negative error code.
2821 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2823 struct spi_master *master = spi->master;
2825 unsigned long flags;
2827 ret = __spi_validate(spi, message);
2831 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2833 ret = __spi_async(spi, message);
2835 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2840 EXPORT_SYMBOL_GPL(spi_async_locked);
2843 int spi_flash_read(struct spi_device *spi,
2844 struct spi_flash_read_message *msg)
2847 struct spi_master *master = spi->master;
2848 struct device *rx_dev = NULL;
2851 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2852 msg->addr_nbits == SPI_NBITS_DUAL) &&
2853 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2855 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2856 msg->addr_nbits == SPI_NBITS_QUAD) &&
2857 !(spi->mode & SPI_TX_QUAD))
2859 if (msg->data_nbits == SPI_NBITS_DUAL &&
2860 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2862 if (msg->data_nbits == SPI_NBITS_QUAD &&
2863 !(spi->mode & SPI_RX_QUAD))
2866 if (master->auto_runtime_pm) {
2867 ret = pm_runtime_get_sync(master->dev.parent);
2869 dev_err(&master->dev, "Failed to power device: %d\n",
2875 mutex_lock(&master->bus_lock_mutex);
2876 mutex_lock(&master->io_mutex);
2877 if (master->dma_rx) {
2878 rx_dev = master->dma_rx->device->dev;
2879 ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
2883 msg->cur_msg_mapped = true;
2885 ret = master->spi_flash_read(spi, msg);
2886 if (msg->cur_msg_mapped)
2887 spi_unmap_buf(master, rx_dev, &msg->rx_sg,
2889 mutex_unlock(&master->io_mutex);
2890 mutex_unlock(&master->bus_lock_mutex);
2892 if (master->auto_runtime_pm)
2893 pm_runtime_put(master->dev.parent);
2897 EXPORT_SYMBOL_GPL(spi_flash_read);
2899 /*-------------------------------------------------------------------------*/
2901 /* Utility methods for SPI master protocol drivers, layered on
2902 * top of the core. Some other utility methods are defined as
2906 static void spi_complete(void *arg)
2911 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2913 DECLARE_COMPLETION_ONSTACK(done);
2915 struct spi_master *master = spi->master;
2916 unsigned long flags;
2918 status = __spi_validate(spi, message);
2922 message->complete = spi_complete;
2923 message->context = &done;
2926 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2927 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2929 /* If we're not using the legacy transfer method then we will
2930 * try to transfer in the calling context so special case.
2931 * This code would be less tricky if we could remove the
2932 * support for driver implemented message queues.
2934 if (master->transfer == spi_queued_transfer) {
2935 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2937 trace_spi_message_submit(message);
2939 status = __spi_queued_transfer(spi, message, false);
2941 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2943 status = spi_async_locked(spi, message);
2947 /* Push out the messages in the calling context if we
2950 if (master->transfer == spi_queued_transfer) {
2951 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2952 spi_sync_immediate);
2953 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2954 spi_sync_immediate);
2955 __spi_pump_messages(master, false);
2958 wait_for_completion(&done);
2959 status = message->status;
2961 message->context = NULL;
2966 * spi_sync - blocking/synchronous SPI data transfers
2967 * @spi: device with which data will be exchanged
2968 * @message: describes the data transfers
2969 * Context: can sleep
2971 * This call may only be used from a context that may sleep. The sleep
2972 * is non-interruptible, and has no timeout. Low-overhead controller
2973 * drivers may DMA directly into and out of the message buffers.
2975 * Note that the SPI device's chip select is active during the message,
2976 * and then is normally disabled between messages. Drivers for some
2977 * frequently-used devices may want to minimize costs of selecting a chip,
2978 * by leaving it selected in anticipation that the next message will go
2979 * to the same chip. (That may increase power usage.)
2981 * Also, the caller is guaranteeing that the memory associated with the
2982 * message will not be freed before this call returns.
2984 * Return: zero on success, else a negative error code.
2986 int spi_sync(struct spi_device *spi, struct spi_message *message)
2990 mutex_lock(&spi->master->bus_lock_mutex);
2991 ret = __spi_sync(spi, message);
2992 mutex_unlock(&spi->master->bus_lock_mutex);
2996 EXPORT_SYMBOL_GPL(spi_sync);
2999 * spi_sync_locked - version of spi_sync with exclusive bus usage
3000 * @spi: device with which data will be exchanged
3001 * @message: describes the data transfers
3002 * Context: can sleep
3004 * This call may only be used from a context that may sleep. The sleep
3005 * is non-interruptible, and has no timeout. Low-overhead controller
3006 * drivers may DMA directly into and out of the message buffers.
3008 * This call should be used by drivers that require exclusive access to the
3009 * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3010 * be released by a spi_bus_unlock call when the exclusive access is over.
3012 * Return: zero on success, else a negative error code.
3014 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3016 return __spi_sync(spi, message);
3018 EXPORT_SYMBOL_GPL(spi_sync_locked);
3021 * spi_bus_lock - obtain a lock for exclusive SPI bus usage
3022 * @master: SPI bus master that should be locked for exclusive bus access
3023 * Context: can sleep
3025 * This call may only be used from a context that may sleep. The sleep
3026 * is non-interruptible, and has no timeout.
3028 * This call should be used by drivers that require exclusive access to the
3029 * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3030 * exclusive access is over. Data transfer must be done by spi_sync_locked
3031 * and spi_async_locked calls when the SPI bus lock is held.
3033 * Return: always zero.
3035 int spi_bus_lock(struct spi_master *master)
3037 unsigned long flags;
3039 mutex_lock(&master->bus_lock_mutex);
3041 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
3042 master->bus_lock_flag = 1;
3043 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
3045 /* mutex remains locked until spi_bus_unlock is called */
3049 EXPORT_SYMBOL_GPL(spi_bus_lock);
3052 * spi_bus_unlock - release the lock for exclusive SPI bus usage
3053 * @master: SPI bus master that was locked for exclusive bus access
3054 * Context: can sleep
3056 * This call may only be used from a context that may sleep. The sleep
3057 * is non-interruptible, and has no timeout.
3059 * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3062 * Return: always zero.
3064 int spi_bus_unlock(struct spi_master *master)
3066 master->bus_lock_flag = 0;
3068 mutex_unlock(&master->bus_lock_mutex);
3072 EXPORT_SYMBOL_GPL(spi_bus_unlock);
3074 /* portable code must never pass more than 32 bytes */
3075 #define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
3080 * spi_write_then_read - SPI synchronous write followed by read
3081 * @spi: device with which data will be exchanged
3082 * @txbuf: data to be written (need not be dma-safe)
3083 * @n_tx: size of txbuf, in bytes
3084 * @rxbuf: buffer into which data will be read (need not be dma-safe)
3085 * @n_rx: size of rxbuf, in bytes
3086 * Context: can sleep
3088 * This performs a half duplex MicroWire style transaction with the
3089 * device, sending txbuf and then reading rxbuf. The return value
3090 * is zero for success, else a negative errno status code.
3091 * This call may only be used from a context that may sleep.
3093 * Parameters to this routine are always copied using a small buffer;
3094 * portable code should never use this for more than 32 bytes.
3095 * Performance-sensitive or bulk transfer code should instead use
3096 * spi_{async,sync}() calls with dma-safe buffers.
3098 * Return: zero on success, else a negative error code.
3100 int spi_write_then_read(struct spi_device *spi,
3101 const void *txbuf, unsigned n_tx,
3102 void *rxbuf, unsigned n_rx)
3104 static DEFINE_MUTEX(lock);
3107 struct spi_message message;
3108 struct spi_transfer x[2];
3111 /* Use preallocated DMA-safe buffer if we can. We can't avoid
3112 * copying here, (as a pure convenience thing), but we can
3113 * keep heap costs out of the hot path unless someone else is
3114 * using the pre-allocated buffer or the transfer is too large.
3116 if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
3117 local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
3118 GFP_KERNEL | GFP_DMA);
3125 spi_message_init(&message);
3126 memset(x, 0, sizeof(x));
3129 spi_message_add_tail(&x[0], &message);
3133 spi_message_add_tail(&x[1], &message);
3136 memcpy(local_buf, txbuf, n_tx);
3137 x[0].tx_buf = local_buf;
3138 x[1].rx_buf = local_buf + n_tx;
3141 status = spi_sync(spi, &message);
3143 memcpy(rxbuf, x[1].rx_buf, n_rx);
3145 if (x[0].tx_buf == buf)
3146 mutex_unlock(&lock);
3152 EXPORT_SYMBOL_GPL(spi_write_then_read);
3154 /*-------------------------------------------------------------------------*/
3156 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3157 static int __spi_of_device_match(struct device *dev, void *data)
3159 return dev->of_node == data;
3162 /* must call put_device() when done with returned spi_device device */
3163 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3165 struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3166 __spi_of_device_match);
3167 return dev ? to_spi_device(dev) : NULL;
3170 static int __spi_of_master_match(struct device *dev, const void *data)
3172 return dev->of_node == data;
3175 /* the spi masters are not using spi_bus, so we find it with another way */
3176 static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3180 dev = class_find_device(&spi_master_class, NULL, node,
3181 __spi_of_master_match);
3185 /* reference got in class_find_device */
3186 return container_of(dev, struct spi_master, dev);
3189 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3192 struct of_reconfig_data *rd = arg;
3193 struct spi_master *master;
3194 struct spi_device *spi;
3196 switch (of_reconfig_get_state_change(action, arg)) {
3197 case OF_RECONFIG_CHANGE_ADD:
3198 master = of_find_spi_master_by_node(rd->dn->parent);
3200 return NOTIFY_OK; /* not for us */
3202 if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3203 put_device(&master->dev);
3207 spi = of_register_spi_device(master, rd->dn);
3208 put_device(&master->dev);
3211 pr_err("%s: failed to create for '%s'\n",
3212 __func__, rd->dn->full_name);
3213 of_node_clear_flag(rd->dn, OF_POPULATED);
3214 return notifier_from_errno(PTR_ERR(spi));
3218 case OF_RECONFIG_CHANGE_REMOVE:
3219 /* already depopulated? */
3220 if (!of_node_check_flag(rd->dn, OF_POPULATED))
3223 /* find our device by node */
3224 spi = of_find_spi_device_by_node(rd->dn);
3226 return NOTIFY_OK; /* no? not meant for us */
3228 /* unregister takes one ref away */
3229 spi_unregister_device(spi);
3231 /* and put the reference of the find */
3232 put_device(&spi->dev);
3239 static struct notifier_block spi_of_notifier = {
3240 .notifier_call = of_spi_notify,
3242 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3243 extern struct notifier_block spi_of_notifier;
3244 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3246 #if IS_ENABLED(CONFIG_ACPI)
3247 static int spi_acpi_master_match(struct device *dev, const void *data)
3249 return ACPI_COMPANION(dev->parent) == data;
3252 static int spi_acpi_device_match(struct device *dev, void *data)
3254 return ACPI_COMPANION(dev) == data;
3257 static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
3261 dev = class_find_device(&spi_master_class, NULL, adev,
3262 spi_acpi_master_match);
3266 return container_of(dev, struct spi_master, dev);
3269 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
3273 dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
3275 return dev ? to_spi_device(dev) : NULL;
3278 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
3281 struct acpi_device *adev = arg;
3282 struct spi_master *master;
3283 struct spi_device *spi;
3286 case ACPI_RECONFIG_DEVICE_ADD:
3287 master = acpi_spi_find_master_by_adev(adev->parent);
3291 acpi_register_spi_device(master, adev);
3292 put_device(&master->dev);
3294 case ACPI_RECONFIG_DEVICE_REMOVE:
3295 if (!acpi_device_enumerated(adev))
3298 spi = acpi_spi_find_device_by_adev(adev);
3302 spi_unregister_device(spi);
3303 put_device(&spi->dev);
3310 static struct notifier_block spi_acpi_notifier = {
3311 .notifier_call = acpi_spi_notify,
3314 extern struct notifier_block spi_acpi_notifier;
3317 static int __init spi_init(void)
3321 buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3327 status = bus_register(&spi_bus_type);
3331 status = class_register(&spi_master_class);
3335 if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3336 WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
3337 if (IS_ENABLED(CONFIG_ACPI))
3338 WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3343 bus_unregister(&spi_bus_type);
3351 /* board_info is normally registered in arch_initcall(),
3352 * but even essential drivers wait till later
3354 * REVISIT only boardinfo really needs static linking. the rest (device and
3355 * driver registration) _could_ be dynamically linked (modular) ... costs
3356 * include needing to have boardinfo data structures be much more public.
3358 postcore_initcall(spi_init);