2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
30 * The subsystem keeps a global list of dma_device structs it is protected by a
31 * mutex, dma_list_mutex.
33 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
38 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
41 * See Documentation/driver-api/dmaengine for more details
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/platform_device.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
51 #include <linux/device.h>
52 #include <linux/dmaengine.h>
53 #include <linux/hardirq.h>
54 #include <linux/spinlock.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/mutex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rculist.h>
60 #include <linux/idr.h>
61 #include <linux/slab.h>
62 #include <linux/acpi.h>
63 #include <linux/acpi_dma.h>
64 #include <linux/of_dma.h>
65 #include <linux/mempool.h>
67 static DEFINE_MUTEX(dma_list_mutex);
68 static DEFINE_IDA(dma_ida);
69 static LIST_HEAD(dma_device_list);
70 static long dmaengine_ref_count;
72 /* --- sysfs implementation --- */
75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
78 * Must be called under dma_list_mutex
80 static struct dma_chan *dev_to_dma_chan(struct device *dev)
82 struct dma_chan_dev *chan_dev;
84 chan_dev = container_of(dev, typeof(*chan_dev), device);
85 return chan_dev->chan;
88 static ssize_t memcpy_count_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
91 struct dma_chan *chan;
92 unsigned long count = 0;
96 mutex_lock(&dma_list_mutex);
97 chan = dev_to_dma_chan(dev);
99 for_each_possible_cpu(i)
100 count += per_cpu_ptr(chan->local, i)->memcpy_count;
101 err = sprintf(buf, "%lu\n", count);
104 mutex_unlock(&dma_list_mutex);
108 static DEVICE_ATTR_RO(memcpy_count);
110 static ssize_t bytes_transferred_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
113 struct dma_chan *chan;
114 unsigned long count = 0;
118 mutex_lock(&dma_list_mutex);
119 chan = dev_to_dma_chan(dev);
121 for_each_possible_cpu(i)
122 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
123 err = sprintf(buf, "%lu\n", count);
126 mutex_unlock(&dma_list_mutex);
130 static DEVICE_ATTR_RO(bytes_transferred);
132 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
135 struct dma_chan *chan;
138 mutex_lock(&dma_list_mutex);
139 chan = dev_to_dma_chan(dev);
141 err = sprintf(buf, "%d\n", chan->client_count);
144 mutex_unlock(&dma_list_mutex);
148 static DEVICE_ATTR_RO(in_use);
150 static struct attribute *dma_dev_attrs[] = {
151 &dev_attr_memcpy_count.attr,
152 &dev_attr_bytes_transferred.attr,
153 &dev_attr_in_use.attr,
156 ATTRIBUTE_GROUPS(dma_dev);
158 static void chan_dev_release(struct device *dev)
160 struct dma_chan_dev *chan_dev;
162 chan_dev = container_of(dev, typeof(*chan_dev), device);
163 if (atomic_dec_and_test(chan_dev->idr_ref)) {
164 ida_free(&dma_ida, chan_dev->dev_id);
165 kfree(chan_dev->idr_ref);
170 static struct class dma_devclass = {
172 .dev_groups = dma_dev_groups,
173 .dev_release = chan_dev_release,
176 /* --- client and device registration --- */
178 #define dma_device_satisfies_mask(device, mask) \
179 __dma_device_satisfies_mask((device), &(mask))
181 __dma_device_satisfies_mask(struct dma_device *device,
182 const dma_cap_mask_t *want)
186 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
188 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
191 static struct module *dma_chan_to_owner(struct dma_chan *chan)
193 return chan->device->owner;
197 * balance_ref_count - catch up the channel reference count
198 * @chan - channel to balance ->client_count versus dmaengine_ref_count
200 * balance_ref_count must be called under dma_list_mutex
202 static void balance_ref_count(struct dma_chan *chan)
204 struct module *owner = dma_chan_to_owner(chan);
206 while (chan->client_count < dmaengine_ref_count) {
208 chan->client_count++;
213 * dma_chan_get - try to grab a dma channel's parent driver module
214 * @chan - channel to grab
216 * Must be called under dma_list_mutex
218 static int dma_chan_get(struct dma_chan *chan)
220 struct module *owner = dma_chan_to_owner(chan);
223 /* The channel is already in use, update client count */
224 if (chan->client_count) {
226 chan->client_count++;
230 if (!try_module_get(owner))
233 /* allocate upon first client reference */
234 if (chan->device->device_alloc_chan_resources) {
235 ret = chan->device->device_alloc_chan_resources(chan);
240 chan->client_count++;
242 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
243 balance_ref_count(chan);
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
256 * Must be called under dma_list_mutex
258 static void dma_chan_put(struct dma_chan *chan)
260 /* This channel is not in use, bail out */
261 if (!chan->client_count)
264 chan->client_count--;
265 module_put(dma_chan_to_owner(chan));
267 /* This channel is not in use anymore, free it */
268 if (!chan->client_count && chan->device->device_free_chan_resources) {
269 /* Make sure all operations have completed */
270 dmaengine_synchronize(chan);
271 chan->device->device_free_chan_resources(chan);
274 /* If the channel is used via a DMA request router, free the mapping */
275 if (chan->router && chan->router->route_free) {
276 chan->router->route_free(chan->router->dev, chan->route_data);
278 chan->route_data = NULL;
282 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
284 enum dma_status status;
285 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
287 dma_async_issue_pending(chan);
289 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
290 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
291 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
294 if (status != DMA_IN_PROGRESS)
301 EXPORT_SYMBOL(dma_sync_wait);
304 * dma_cap_mask_all - enable iteration over all operation types
306 static dma_cap_mask_t dma_cap_mask_all;
309 * dma_chan_tbl_ent - tracks channel allocations per core/operation
310 * @chan - associated channel for this entry
312 struct dma_chan_tbl_ent {
313 struct dma_chan *chan;
317 * channel_table - percpu lookup table for memory-to-memory offload providers
319 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
321 static int __init dma_channel_table_init(void)
323 enum dma_transaction_type cap;
326 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
328 /* 'interrupt', 'private', and 'slave' are channel capabilities,
329 * but are not associated with an operation so they do not need
330 * an entry in the channel_table
332 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
333 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
334 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
336 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
337 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
338 if (!channel_table[cap]) {
345 pr_err("initialization failure\n");
346 for_each_dma_cap_mask(cap, dma_cap_mask_all)
347 free_percpu(channel_table[cap]);
352 arch_initcall(dma_channel_table_init);
355 * dma_find_channel - find a channel to carry out the operation
356 * @tx_type: transaction type
358 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
360 return this_cpu_read(channel_table[tx_type]->chan);
362 EXPORT_SYMBOL(dma_find_channel);
365 * dma_issue_pending_all - flush all pending operations across all channels
367 void dma_issue_pending_all(void)
369 struct dma_device *device;
370 struct dma_chan *chan;
373 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 list_for_each_entry(chan, &device->channels, device_node)
377 if (chan->client_count)
378 device->device_issue_pending(chan);
382 EXPORT_SYMBOL(dma_issue_pending_all);
385 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
387 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
389 int node = dev_to_node(chan->device->dev);
390 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
394 * min_chan - returns the channel with min count and in the same numa-node as the cpu
395 * @cap: capability to match
396 * @cpu: cpu index which the channel should be close to
398 * If some channels are close to the given cpu, the one with the lowest
399 * reference count is returned. Otherwise, cpu is ignored and only the
400 * reference count is taken into account.
401 * Must be called under dma_list_mutex.
403 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
405 struct dma_device *device;
406 struct dma_chan *chan;
407 struct dma_chan *min = NULL;
408 struct dma_chan *localmin = NULL;
410 list_for_each_entry(device, &dma_device_list, global_node) {
411 if (!dma_has_cap(cap, device->cap_mask) ||
412 dma_has_cap(DMA_PRIVATE, device->cap_mask))
414 list_for_each_entry(chan, &device->channels, device_node) {
415 if (!chan->client_count)
417 if (!min || chan->table_count < min->table_count)
420 if (dma_chan_is_local(chan, cpu))
422 chan->table_count < localmin->table_count)
427 chan = localmin ? localmin : min;
436 * dma_channel_rebalance - redistribute the available channels
438 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
439 * operation type) in the SMP case, and operation isolation (avoid
440 * multi-tasking channels) in the non-SMP case. Must be called under
443 static void dma_channel_rebalance(void)
445 struct dma_chan *chan;
446 struct dma_device *device;
450 /* undo the last distribution */
451 for_each_dma_cap_mask(cap, dma_cap_mask_all)
452 for_each_possible_cpu(cpu)
453 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
455 list_for_each_entry(device, &dma_device_list, global_node) {
456 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
458 list_for_each_entry(chan, &device->channels, device_node)
459 chan->table_count = 0;
462 /* don't populate the channel_table if no clients are available */
463 if (!dmaengine_ref_count)
466 /* redistribute available channels */
467 for_each_dma_cap_mask(cap, dma_cap_mask_all)
468 for_each_online_cpu(cpu) {
469 chan = min_chan(cap, cpu);
470 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
474 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
476 struct dma_device *device;
481 device = chan->device;
483 /* check if the channel supports slave transactions */
484 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
485 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
489 * Check whether it reports it uses the generic slave
490 * capabilities, if not, that means it doesn't support any
491 * kind of slave capabilities reporting.
493 if (!device->directions)
496 caps->src_addr_widths = device->src_addr_widths;
497 caps->dst_addr_widths = device->dst_addr_widths;
498 caps->directions = device->directions;
499 caps->max_burst = device->max_burst;
500 caps->residue_granularity = device->residue_granularity;
501 caps->descriptor_reuse = device->descriptor_reuse;
502 caps->cmd_pause = !!device->device_pause;
503 caps->cmd_resume = !!device->device_resume;
504 caps->cmd_terminate = !!device->device_terminate_all;
508 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
510 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
511 struct dma_device *dev,
512 dma_filter_fn fn, void *fn_param)
514 struct dma_chan *chan;
516 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
517 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
520 /* devices with multiple channels need special handling as we need to
521 * ensure that all channels are either private or public.
523 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
524 list_for_each_entry(chan, &dev->channels, device_node) {
525 /* some channels are already publicly allocated */
526 if (chan->client_count)
530 list_for_each_entry(chan, &dev->channels, device_node) {
531 if (chan->client_count) {
532 dev_dbg(dev->dev, "%s: %s busy\n",
533 __func__, dma_chan_name(chan));
536 if (fn && !fn(chan, fn_param)) {
537 dev_dbg(dev->dev, "%s: %s filter said false\n",
538 __func__, dma_chan_name(chan));
547 static struct dma_chan *find_candidate(struct dma_device *device,
548 const dma_cap_mask_t *mask,
549 dma_filter_fn fn, void *fn_param)
551 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
555 /* Found a suitable channel, try to grab, prep, and return it.
556 * We first set DMA_PRIVATE to disable balance_ref_count as this
557 * channel will not be published in the general-purpose
560 dma_cap_set(DMA_PRIVATE, device->cap_mask);
561 device->privatecnt++;
562 err = dma_chan_get(chan);
565 if (err == -ENODEV) {
566 dev_dbg(device->dev, "%s: %s module removed\n",
567 __func__, dma_chan_name(chan));
568 list_del_rcu(&device->global_node);
571 "%s: failed to get %s: (%d)\n",
572 __func__, dma_chan_name(chan), err);
574 if (--device->privatecnt == 0)
575 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
581 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
585 * dma_get_slave_channel - try to get specific channel exclusively
586 * @chan: target channel
588 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
592 /* lock against __dma_request_channel */
593 mutex_lock(&dma_list_mutex);
595 if (chan->client_count == 0) {
596 struct dma_device *device = chan->device;
598 dma_cap_set(DMA_PRIVATE, device->cap_mask);
599 device->privatecnt++;
600 err = dma_chan_get(chan);
602 dev_dbg(chan->device->dev,
603 "%s: failed to get %s: (%d)\n",
604 __func__, dma_chan_name(chan), err);
606 if (--device->privatecnt == 0)
607 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
612 mutex_unlock(&dma_list_mutex);
617 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
619 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
622 struct dma_chan *chan;
625 dma_cap_set(DMA_SLAVE, mask);
627 /* lock against __dma_request_channel */
628 mutex_lock(&dma_list_mutex);
630 chan = find_candidate(device, &mask, NULL, NULL);
632 mutex_unlock(&dma_list_mutex);
634 return IS_ERR(chan) ? NULL : chan;
636 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
639 * __dma_request_channel - try to allocate an exclusive channel
640 * @mask: capabilities that the channel must satisfy
641 * @fn: optional callback to disposition available channels
642 * @fn_param: opaque parameter to pass to dma_filter_fn
644 * Returns pointer to appropriate DMA channel on success or NULL.
646 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
647 dma_filter_fn fn, void *fn_param)
649 struct dma_device *device, *_d;
650 struct dma_chan *chan = NULL;
653 mutex_lock(&dma_list_mutex);
654 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
655 chan = find_candidate(device, mask, fn, fn_param);
661 mutex_unlock(&dma_list_mutex);
663 pr_debug("%s: %s (%s)\n",
665 chan ? "success" : "fail",
666 chan ? dma_chan_name(chan) : NULL);
670 EXPORT_SYMBOL_GPL(__dma_request_channel);
672 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
678 if (!device->filter.mapcnt)
681 for (i = 0; i < device->filter.mapcnt; i++) {
682 const struct dma_slave_map *map = &device->filter.map[i];
684 if (!strcmp(map->devname, dev_name(dev)) &&
685 !strcmp(map->slave, name))
693 * dma_request_chan - try to allocate an exclusive slave channel
694 * @dev: pointer to client device structure
695 * @name: slave channel name
697 * Returns pointer to appropriate DMA channel on success or an error pointer.
699 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
701 struct dma_device *d, *_d;
702 struct dma_chan *chan = NULL;
704 /* If device-tree is present get slave info from here */
706 chan = of_dma_request_slave_channel(dev->of_node, name);
708 /* If device was enumerated by ACPI get slave info from here */
709 if (has_acpi_companion(dev) && !chan)
710 chan = acpi_dma_request_slave_chan_by_name(dev, name);
713 /* Valid channel found or requester need to be deferred */
714 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
718 /* Try to find the channel via the DMA filter map(s) */
719 mutex_lock(&dma_list_mutex);
720 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
722 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
728 dma_cap_set(DMA_SLAVE, mask);
730 chan = find_candidate(d, &mask, d->filter.fn, map->param);
734 mutex_unlock(&dma_list_mutex);
736 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
738 EXPORT_SYMBOL_GPL(dma_request_chan);
741 * dma_request_slave_channel - try to allocate an exclusive slave channel
742 * @dev: pointer to client device structure
743 * @name: slave channel name
745 * Returns pointer to appropriate DMA channel on success or NULL.
747 struct dma_chan *dma_request_slave_channel(struct device *dev,
750 struct dma_chan *ch = dma_request_chan(dev, name);
756 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
759 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
760 * @mask: capabilities that the channel must satisfy
762 * Returns pointer to appropriate DMA channel on success or an error pointer.
764 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
766 struct dma_chan *chan;
769 return ERR_PTR(-ENODEV);
771 chan = __dma_request_channel(mask, NULL, NULL);
773 mutex_lock(&dma_list_mutex);
774 if (list_empty(&dma_device_list))
775 chan = ERR_PTR(-EPROBE_DEFER);
777 chan = ERR_PTR(-ENODEV);
778 mutex_unlock(&dma_list_mutex);
783 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
785 void dma_release_channel(struct dma_chan *chan)
787 mutex_lock(&dma_list_mutex);
788 WARN_ONCE(chan->client_count != 1,
789 "chan reference count %d != 1\n", chan->client_count);
791 /* drop PRIVATE cap enabled by __dma_request_channel() */
792 if (--chan->device->privatecnt == 0)
793 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
794 mutex_unlock(&dma_list_mutex);
796 EXPORT_SYMBOL_GPL(dma_release_channel);
799 * dmaengine_get - register interest in dma_channels
801 void dmaengine_get(void)
803 struct dma_device *device, *_d;
804 struct dma_chan *chan;
807 mutex_lock(&dma_list_mutex);
808 dmaengine_ref_count++;
810 /* try to grab channels */
811 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
812 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
814 list_for_each_entry(chan, &device->channels, device_node) {
815 err = dma_chan_get(chan);
816 if (err == -ENODEV) {
817 /* module removed before we could use it */
818 list_del_rcu(&device->global_node);
821 dev_dbg(chan->device->dev,
822 "%s: failed to get %s: (%d)\n",
823 __func__, dma_chan_name(chan), err);
827 /* if this is the first reference and there were channels
828 * waiting we need to rebalance to get those channels
829 * incorporated into the channel table
831 if (dmaengine_ref_count == 1)
832 dma_channel_rebalance();
833 mutex_unlock(&dma_list_mutex);
835 EXPORT_SYMBOL(dmaengine_get);
838 * dmaengine_put - let dma drivers be removed when ref_count == 0
840 void dmaengine_put(void)
842 struct dma_device *device;
843 struct dma_chan *chan;
845 mutex_lock(&dma_list_mutex);
846 dmaengine_ref_count--;
847 BUG_ON(dmaengine_ref_count < 0);
848 /* drop channel references */
849 list_for_each_entry(device, &dma_device_list, global_node) {
850 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
852 list_for_each_entry(chan, &device->channels, device_node)
855 mutex_unlock(&dma_list_mutex);
857 EXPORT_SYMBOL(dmaengine_put);
859 static bool device_has_all_tx_types(struct dma_device *device)
861 /* A device that satisfies this test has channels that will never cause
862 * an async_tx channel switch event as all possible operation types can
865 #ifdef CONFIG_ASYNC_TX_DMA
866 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
870 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
871 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
875 #if IS_ENABLED(CONFIG_ASYNC_XOR)
876 if (!dma_has_cap(DMA_XOR, device->cap_mask))
879 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
880 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
885 #if IS_ENABLED(CONFIG_ASYNC_PQ)
886 if (!dma_has_cap(DMA_PQ, device->cap_mask))
889 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
890 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
898 static int get_dma_id(struct dma_device *device)
900 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
909 * dma_async_device_register - registers DMA devices found
910 * @device: &dma_device
912 int dma_async_device_register(struct dma_device *device)
915 struct dma_chan* chan;
921 /* validate device routines */
923 pr_err("DMAdevice must have dev\n");
927 device->owner = device->dev->driver->owner;
929 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
931 "Device claims capability %s, but op is not defined\n",
936 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
938 "Device claims capability %s, but op is not defined\n",
943 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
945 "Device claims capability %s, but op is not defined\n",
950 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
952 "Device claims capability %s, but op is not defined\n",
957 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
959 "Device claims capability %s, but op is not defined\n",
964 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
966 "Device claims capability %s, but op is not defined\n",
971 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
973 "Device claims capability %s, but op is not defined\n",
978 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
980 "Device claims capability %s, but op is not defined\n",
985 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
987 "Device claims capability %s, but op is not defined\n",
993 if (!device->device_tx_status) {
994 dev_err(device->dev, "Device tx_status is not defined\n");
999 if (!device->device_issue_pending) {
1000 dev_err(device->dev, "Device issue_pending is not defined\n");
1004 /* note: this only matters in the
1005 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1007 if (device_has_all_tx_types(device))
1008 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1010 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1013 rc = get_dma_id(device);
1019 atomic_set(idr_ref, 0);
1021 /* represent channels in sysfs. Probably want devs too */
1022 list_for_each_entry(chan, &device->channels, device_node) {
1024 chan->local = alloc_percpu(typeof(*chan->local));
1025 if (chan->local == NULL)
1027 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1028 if (chan->dev == NULL) {
1029 free_percpu(chan->local);
1034 chan->chan_id = chancnt++;
1035 chan->dev->device.class = &dma_devclass;
1036 chan->dev->device.parent = device->dev;
1037 chan->dev->chan = chan;
1038 chan->dev->idr_ref = idr_ref;
1039 chan->dev->dev_id = device->dev_id;
1040 atomic_inc(idr_ref);
1041 dev_set_name(&chan->dev->device, "dma%dchan%d",
1042 device->dev_id, chan->chan_id);
1044 rc = device_register(&chan->dev->device);
1046 free_percpu(chan->local);
1049 atomic_dec(idr_ref);
1052 chan->client_count = 0;
1056 dev_err(device->dev, "%s: device has no channels!\n", __func__);
1061 device->chancnt = chancnt;
1063 mutex_lock(&dma_list_mutex);
1064 /* take references on public channels */
1065 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1066 list_for_each_entry(chan, &device->channels, device_node) {
1067 /* if clients are already waiting for channels we need
1068 * to take references on their behalf
1070 if (dma_chan_get(chan) == -ENODEV) {
1071 /* note we can only get here for the first
1072 * channel as the remaining channels are
1073 * guaranteed to get a reference
1076 mutex_unlock(&dma_list_mutex);
1080 list_add_tail_rcu(&device->global_node, &dma_device_list);
1081 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1082 device->privatecnt++; /* Always private */
1083 dma_channel_rebalance();
1084 mutex_unlock(&dma_list_mutex);
1089 /* if we never registered a channel just release the idr */
1090 if (atomic_read(idr_ref) == 0) {
1091 ida_free(&dma_ida, device->dev_id);
1096 list_for_each_entry(chan, &device->channels, device_node) {
1097 if (chan->local == NULL)
1099 mutex_lock(&dma_list_mutex);
1100 chan->dev->chan = NULL;
1101 mutex_unlock(&dma_list_mutex);
1102 device_unregister(&chan->dev->device);
1103 free_percpu(chan->local);
1107 EXPORT_SYMBOL(dma_async_device_register);
1110 * dma_async_device_unregister - unregister a DMA device
1111 * @device: &dma_device
1113 * This routine is called by dma driver exit routines, dmaengine holds module
1114 * references to prevent it being called while channels are in use.
1116 void dma_async_device_unregister(struct dma_device *device)
1118 struct dma_chan *chan;
1120 mutex_lock(&dma_list_mutex);
1121 list_del_rcu(&device->global_node);
1122 dma_channel_rebalance();
1123 mutex_unlock(&dma_list_mutex);
1125 list_for_each_entry(chan, &device->channels, device_node) {
1126 WARN_ONCE(chan->client_count,
1127 "%s called while %d clients hold a reference\n",
1128 __func__, chan->client_count);
1129 mutex_lock(&dma_list_mutex);
1130 chan->dev->chan = NULL;
1131 mutex_unlock(&dma_list_mutex);
1132 device_unregister(&chan->dev->device);
1133 free_percpu(chan->local);
1136 EXPORT_SYMBOL(dma_async_device_unregister);
1138 static void dmam_device_release(struct device *dev, void *res)
1140 struct dma_device *device;
1142 device = *(struct dma_device **)res;
1143 dma_async_device_unregister(device);
1147 * dmaenginem_async_device_register - registers DMA devices found
1148 * @device: &dma_device
1150 * The operation is managed and will be undone on driver detach.
1152 int dmaenginem_async_device_register(struct dma_device *device)
1157 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1161 ret = dma_async_device_register(device);
1163 *(struct dma_device **)p = device;
1164 devres_add(device->dev, p);
1171 EXPORT_SYMBOL(dmaenginem_async_device_register);
1173 struct dmaengine_unmap_pool {
1174 struct kmem_cache *cache;
1180 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1181 static struct dmaengine_unmap_pool unmap_pool[] = {
1183 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1190 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1192 int order = get_count_order(nr);
1196 return &unmap_pool[0];
1197 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1199 return &unmap_pool[1];
1201 return &unmap_pool[2];
1203 return &unmap_pool[3];
1211 static void dmaengine_unmap(struct kref *kref)
1213 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1214 struct device *dev = unmap->dev;
1217 cnt = unmap->to_cnt;
1218 for (i = 0; i < cnt; i++)
1219 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1221 cnt += unmap->from_cnt;
1222 for (; i < cnt; i++)
1223 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1225 cnt += unmap->bidi_cnt;
1226 for (; i < cnt; i++) {
1227 if (unmap->addr[i] == 0)
1229 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1232 cnt = unmap->map_cnt;
1233 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1236 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1239 kref_put(&unmap->kref, dmaengine_unmap);
1241 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1243 static void dmaengine_destroy_unmap_pool(void)
1247 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1248 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1250 mempool_destroy(p->pool);
1252 kmem_cache_destroy(p->cache);
1257 static int __init dmaengine_init_unmap_pool(void)
1261 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1262 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1265 size = sizeof(struct dmaengine_unmap_data) +
1266 sizeof(dma_addr_t) * p->size;
1268 p->cache = kmem_cache_create(p->name, size, 0,
1269 SLAB_HWCACHE_ALIGN, NULL);
1272 p->pool = mempool_create_slab_pool(1, p->cache);
1277 if (i == ARRAY_SIZE(unmap_pool))
1280 dmaengine_destroy_unmap_pool();
1284 struct dmaengine_unmap_data *
1285 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1287 struct dmaengine_unmap_data *unmap;
1289 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1293 memset(unmap, 0, sizeof(*unmap));
1294 kref_init(&unmap->kref);
1296 unmap->map_cnt = nr;
1300 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1302 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1303 struct dma_chan *chan)
1306 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1307 spin_lock_init(&tx->lock);
1310 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1312 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1313 * @tx: in-flight transaction to wait on
1316 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1318 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1321 return DMA_COMPLETE;
1323 while (tx->cookie == -EBUSY) {
1324 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1325 dev_err(tx->chan->device->dev,
1326 "%s timeout waiting for descriptor submission\n",
1332 return dma_sync_wait(tx->chan, tx->cookie);
1334 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1336 /* dma_run_dependencies - helper routine for dma drivers to process
1337 * (start) dependent operations on their target channel
1338 * @tx: transaction with dependencies
1340 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1342 struct dma_async_tx_descriptor *dep = txd_next(tx);
1343 struct dma_async_tx_descriptor *dep_next;
1344 struct dma_chan *chan;
1349 /* we'll submit tx->next now, so clear the link */
1353 /* keep submitting up until a channel switch is detected
1354 * in that case we will be called again as a result of
1355 * processing the interrupt from async_tx_channel_switch
1357 for (; dep; dep = dep_next) {
1359 txd_clear_parent(dep);
1360 dep_next = txd_next(dep);
1361 if (dep_next && dep_next->chan == chan)
1362 txd_clear_next(dep); /* ->next will be submitted */
1364 dep_next = NULL; /* submit current dep and terminate */
1367 dep->tx_submit(dep);
1370 chan->device->device_issue_pending(chan);
1372 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1374 static int __init dma_bus_init(void)
1376 int err = dmaengine_init_unmap_pool();
1380 return class_register(&dma_devclass);
1382 arch_initcall(dma_bus_init);