2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
45 * See Documentation/dmaengine.txt for more details
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h>
69 static DEFINE_MUTEX(dma_list_mutex);
70 static DEFINE_IDR(dma_idr);
71 static LIST_HEAD(dma_device_list);
72 static long dmaengine_ref_count;
74 /* --- sysfs implementation --- */
77 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
80 * Must be called under dma_list_mutex
82 static struct dma_chan *dev_to_dma_chan(struct device *dev)
84 struct dma_chan_dev *chan_dev;
86 chan_dev = container_of(dev, typeof(*chan_dev), device);
87 return chan_dev->chan;
90 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
92 struct dma_chan *chan;
93 unsigned long count = 0;
97 mutex_lock(&dma_list_mutex);
98 chan = dev_to_dma_chan(dev);
100 for_each_possible_cpu(i)
101 count += per_cpu_ptr(chan->local, i)->memcpy_count;
102 err = sprintf(buf, "%lu\n", count);
105 mutex_unlock(&dma_list_mutex);
110 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
113 struct dma_chan *chan;
114 unsigned long count = 0;
118 mutex_lock(&dma_list_mutex);
119 chan = dev_to_dma_chan(dev);
121 for_each_possible_cpu(i)
122 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
123 err = sprintf(buf, "%lu\n", count);
126 mutex_unlock(&dma_list_mutex);
131 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
133 struct dma_chan *chan;
136 mutex_lock(&dma_list_mutex);
137 chan = dev_to_dma_chan(dev);
139 err = sprintf(buf, "%d\n", chan->client_count);
142 mutex_unlock(&dma_list_mutex);
147 static struct device_attribute dma_attrs[] = {
148 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
149 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
150 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
154 static void chan_dev_release(struct device *dev)
156 struct dma_chan_dev *chan_dev;
158 chan_dev = container_of(dev, typeof(*chan_dev), device);
159 if (atomic_dec_and_test(chan_dev->idr_ref)) {
160 mutex_lock(&dma_list_mutex);
161 idr_remove(&dma_idr, chan_dev->dev_id);
162 mutex_unlock(&dma_list_mutex);
163 kfree(chan_dev->idr_ref);
168 static struct class dma_devclass = {
170 .dev_attrs = dma_attrs,
171 .dev_release = chan_dev_release,
174 /* --- client and device registration --- */
176 #define dma_device_satisfies_mask(device, mask) \
177 __dma_device_satisfies_mask((device), &(mask))
179 __dma_device_satisfies_mask(struct dma_device *device,
180 const dma_cap_mask_t *want)
184 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
186 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
189 static struct module *dma_chan_to_owner(struct dma_chan *chan)
191 return chan->device->dev->driver->owner;
195 * balance_ref_count - catch up the channel reference count
196 * @chan - channel to balance ->client_count versus dmaengine_ref_count
198 * balance_ref_count must be called under dma_list_mutex
200 static void balance_ref_count(struct dma_chan *chan)
202 struct module *owner = dma_chan_to_owner(chan);
204 while (chan->client_count < dmaengine_ref_count) {
206 chan->client_count++;
211 * dma_chan_get - try to grab a dma channel's parent driver module
212 * @chan - channel to grab
214 * Must be called under dma_list_mutex
216 static int dma_chan_get(struct dma_chan *chan)
219 struct module *owner = dma_chan_to_owner(chan);
221 if (chan->client_count) {
224 } else if (try_module_get(owner))
228 chan->client_count++;
230 /* allocate upon first client reference */
231 if (chan->client_count == 1 && err == 0) {
232 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
236 chan->client_count = 0;
238 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
239 balance_ref_count(chan);
246 * dma_chan_put - drop a reference to a dma channel's parent driver module
247 * @chan - channel to release
249 * Must be called under dma_list_mutex
251 static void dma_chan_put(struct dma_chan *chan)
253 if (!chan->client_count)
254 return; /* this channel failed alloc_chan_resources */
255 chan->client_count--;
256 module_put(dma_chan_to_owner(chan));
257 if (chan->client_count == 0)
258 chan->device->device_free_chan_resources(chan);
261 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
263 enum dma_status status;
264 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
266 dma_async_issue_pending(chan);
268 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
269 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
270 pr_err("%s: timeout!\n", __func__);
273 if (status != DMA_IN_PROGRESS)
280 EXPORT_SYMBOL(dma_sync_wait);
283 * dma_cap_mask_all - enable iteration over all operation types
285 static dma_cap_mask_t dma_cap_mask_all;
288 * dma_chan_tbl_ent - tracks channel allocations per core/operation
289 * @chan - associated channel for this entry
291 struct dma_chan_tbl_ent {
292 struct dma_chan *chan;
296 * channel_table - percpu lookup table for memory-to-memory offload providers
298 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
300 static int __init dma_channel_table_init(void)
302 enum dma_transaction_type cap;
305 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
307 /* 'interrupt', 'private', and 'slave' are channel capabilities,
308 * but are not associated with an operation so they do not need
309 * an entry in the channel_table
311 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
312 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
313 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
315 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
316 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
317 if (!channel_table[cap]) {
324 pr_err("initialization failure\n");
325 for_each_dma_cap_mask(cap, dma_cap_mask_all)
326 if (channel_table[cap])
327 free_percpu(channel_table[cap]);
332 arch_initcall(dma_channel_table_init);
335 * dma_find_channel - find a channel to carry out the operation
336 * @tx_type: transaction type
338 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
340 return this_cpu_read(channel_table[tx_type]->chan);
342 EXPORT_SYMBOL(dma_find_channel);
345 * net_dma_find_channel - find a channel for net_dma
346 * net_dma has alignment requirements
348 struct dma_chan *net_dma_find_channel(void)
350 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
351 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
356 EXPORT_SYMBOL(net_dma_find_channel);
359 * dma_issue_pending_all - flush all pending operations across all channels
361 void dma_issue_pending_all(void)
363 struct dma_device *device;
364 struct dma_chan *chan;
367 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
368 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
370 list_for_each_entry(chan, &device->channels, device_node)
371 if (chan->client_count)
372 device->device_issue_pending(chan);
376 EXPORT_SYMBOL(dma_issue_pending_all);
379 * nth_chan - returns the nth channel of the given capability
380 * @cap: capability to match
381 * @n: nth channel desired
383 * Defaults to returning the channel with the desired capability and the
384 * lowest reference count when 'n' cannot be satisfied. Must be called
385 * under dma_list_mutex.
387 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
389 struct dma_device *device;
390 struct dma_chan *chan;
391 struct dma_chan *ret = NULL;
392 struct dma_chan *min = NULL;
394 list_for_each_entry(device, &dma_device_list, global_node) {
395 if (!dma_has_cap(cap, device->cap_mask) ||
396 dma_has_cap(DMA_PRIVATE, device->cap_mask))
398 list_for_each_entry(chan, &device->channels, device_node) {
399 if (!chan->client_count)
403 else if (chan->table_count < min->table_count)
425 * dma_channel_rebalance - redistribute the available channels
427 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
428 * operation type) in the SMP case, and operation isolation (avoid
429 * multi-tasking channels) in the non-SMP case. Must be called under
432 static void dma_channel_rebalance(void)
434 struct dma_chan *chan;
435 struct dma_device *device;
440 /* undo the last distribution */
441 for_each_dma_cap_mask(cap, dma_cap_mask_all)
442 for_each_possible_cpu(cpu)
443 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
445 list_for_each_entry(device, &dma_device_list, global_node) {
446 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
448 list_for_each_entry(chan, &device->channels, device_node)
449 chan->table_count = 0;
452 /* don't populate the channel_table if no clients are available */
453 if (!dmaengine_ref_count)
456 /* redistribute available channels */
458 for_each_dma_cap_mask(cap, dma_cap_mask_all)
459 for_each_online_cpu(cpu) {
460 if (num_possible_cpus() > 1)
461 chan = nth_chan(cap, n++);
463 chan = nth_chan(cap, -1);
465 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
469 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
470 struct dma_device *dev,
471 dma_filter_fn fn, void *fn_param)
473 struct dma_chan *chan;
475 if (!__dma_device_satisfies_mask(dev, mask)) {
476 pr_debug("%s: wrong capabilities\n", __func__);
479 /* devices with multiple channels need special handling as we need to
480 * ensure that all channels are either private or public.
482 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
483 list_for_each_entry(chan, &dev->channels, device_node) {
484 /* some channels are already publicly allocated */
485 if (chan->client_count)
489 list_for_each_entry(chan, &dev->channels, device_node) {
490 if (chan->client_count) {
491 pr_debug("%s: %s busy\n",
492 __func__, dma_chan_name(chan));
495 if (fn && !fn(chan, fn_param)) {
496 pr_debug("%s: %s filter said false\n",
497 __func__, dma_chan_name(chan));
507 * dma_request_slave_channel - try to get specific channel exclusively
508 * @chan: target channel
510 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
514 /* lock against __dma_request_channel */
515 mutex_lock(&dma_list_mutex);
517 if (chan->client_count == 0) {
518 err = dma_chan_get(chan);
520 pr_debug("%s: failed to get %s: (%d)\n",
521 __func__, dma_chan_name(chan), err);
525 mutex_unlock(&dma_list_mutex);
530 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
533 * __dma_request_channel - try to allocate an exclusive channel
534 * @mask: capabilities that the channel must satisfy
535 * @fn: optional callback to disposition available channels
536 * @fn_param: opaque parameter to pass to dma_filter_fn
538 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
539 dma_filter_fn fn, void *fn_param)
541 struct dma_device *device, *_d;
542 struct dma_chan *chan = NULL;
546 mutex_lock(&dma_list_mutex);
547 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
548 chan = private_candidate(mask, device, fn, fn_param);
550 /* Found a suitable channel, try to grab, prep, and
551 * return it. We first set DMA_PRIVATE to disable
552 * balance_ref_count as this channel will not be
553 * published in the general-purpose allocator
555 dma_cap_set(DMA_PRIVATE, device->cap_mask);
556 device->privatecnt++;
557 err = dma_chan_get(chan);
559 if (err == -ENODEV) {
560 pr_debug("%s: %s module removed\n",
561 __func__, dma_chan_name(chan));
562 list_del_rcu(&device->global_node);
564 pr_debug("%s: failed to get %s: (%d)\n",
565 __func__, dma_chan_name(chan), err);
568 if (--device->privatecnt == 0)
569 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
573 mutex_unlock(&dma_list_mutex);
575 pr_debug("%s: %s (%s)\n",
577 chan ? "success" : "fail",
578 chan ? dma_chan_name(chan) : NULL);
582 EXPORT_SYMBOL_GPL(__dma_request_channel);
585 * dma_request_slave_channel - try to allocate an exclusive slave channel
586 * @dev: pointer to client device structure
587 * @name: slave channel name
589 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name)
591 /* If device-tree is present get slave info from here */
593 return of_dma_request_slave_channel(dev->of_node, name);
595 /* If device was enumerated by ACPI get slave info from here */
596 if (ACPI_HANDLE(dev))
597 return acpi_dma_request_slave_chan_by_name(dev, name);
601 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
603 void dma_release_channel(struct dma_chan *chan)
605 mutex_lock(&dma_list_mutex);
606 WARN_ONCE(chan->client_count != 1,
607 "chan reference count %d != 1\n", chan->client_count);
609 /* drop PRIVATE cap enabled by __dma_request_channel() */
610 if (--chan->device->privatecnt == 0)
611 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
612 mutex_unlock(&dma_list_mutex);
614 EXPORT_SYMBOL_GPL(dma_release_channel);
617 * dmaengine_get - register interest in dma_channels
619 void dmaengine_get(void)
621 struct dma_device *device, *_d;
622 struct dma_chan *chan;
625 mutex_lock(&dma_list_mutex);
626 dmaengine_ref_count++;
628 /* try to grab channels */
629 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
630 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
632 list_for_each_entry(chan, &device->channels, device_node) {
633 err = dma_chan_get(chan);
634 if (err == -ENODEV) {
635 /* module removed before we could use it */
636 list_del_rcu(&device->global_node);
639 pr_debug("%s: failed to get %s: (%d)\n",
640 __func__, dma_chan_name(chan), err);
644 /* if this is the first reference and there were channels
645 * waiting we need to rebalance to get those channels
646 * incorporated into the channel table
648 if (dmaengine_ref_count == 1)
649 dma_channel_rebalance();
650 mutex_unlock(&dma_list_mutex);
652 EXPORT_SYMBOL(dmaengine_get);
655 * dmaengine_put - let dma drivers be removed when ref_count == 0
657 void dmaengine_put(void)
659 struct dma_device *device;
660 struct dma_chan *chan;
662 mutex_lock(&dma_list_mutex);
663 dmaengine_ref_count--;
664 BUG_ON(dmaengine_ref_count < 0);
665 /* drop channel references */
666 list_for_each_entry(device, &dma_device_list, global_node) {
667 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
669 list_for_each_entry(chan, &device->channels, device_node)
672 mutex_unlock(&dma_list_mutex);
674 EXPORT_SYMBOL(dmaengine_put);
676 static bool device_has_all_tx_types(struct dma_device *device)
678 /* A device that satisfies this test has channels that will never cause
679 * an async_tx channel switch event as all possible operation types can
682 #ifdef CONFIG_ASYNC_TX_DMA
683 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
687 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
688 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
692 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
693 if (!dma_has_cap(DMA_XOR, device->cap_mask))
696 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
697 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
702 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
703 if (!dma_has_cap(DMA_PQ, device->cap_mask))
706 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
707 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
715 static int get_dma_id(struct dma_device *device)
719 mutex_lock(&dma_list_mutex);
721 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
725 mutex_unlock(&dma_list_mutex);
726 return rc < 0 ? rc : 0;
730 * dma_async_device_register - registers DMA devices found
731 * @device: &dma_device
733 int dma_async_device_register(struct dma_device *device)
736 struct dma_chan* chan;
742 /* validate device routines */
743 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
744 !device->device_prep_dma_memcpy);
745 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
746 !device->device_prep_dma_xor);
747 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
748 !device->device_prep_dma_xor_val);
749 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
750 !device->device_prep_dma_pq);
751 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
752 !device->device_prep_dma_pq_val);
753 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
754 !device->device_prep_dma_interrupt);
755 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
756 !device->device_prep_dma_sg);
757 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
758 !device->device_prep_dma_cyclic);
759 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
760 !device->device_control);
761 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
762 !device->device_prep_interleaved_dma);
764 BUG_ON(!device->device_alloc_chan_resources);
765 BUG_ON(!device->device_free_chan_resources);
766 BUG_ON(!device->device_tx_status);
767 BUG_ON(!device->device_issue_pending);
768 BUG_ON(!device->dev);
770 /* note: this only matters in the
771 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
773 if (device_has_all_tx_types(device))
774 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
776 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
779 rc = get_dma_id(device);
785 atomic_set(idr_ref, 0);
787 /* represent channels in sysfs. Probably want devs too */
788 list_for_each_entry(chan, &device->channels, device_node) {
790 chan->local = alloc_percpu(typeof(*chan->local));
791 if (chan->local == NULL)
793 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
794 if (chan->dev == NULL) {
795 free_percpu(chan->local);
800 chan->chan_id = chancnt++;
801 chan->dev->device.class = &dma_devclass;
802 chan->dev->device.parent = device->dev;
803 chan->dev->chan = chan;
804 chan->dev->idr_ref = idr_ref;
805 chan->dev->dev_id = device->dev_id;
807 dev_set_name(&chan->dev->device, "dma%dchan%d",
808 device->dev_id, chan->chan_id);
810 rc = device_register(&chan->dev->device);
812 free_percpu(chan->local);
818 chan->client_count = 0;
820 device->chancnt = chancnt;
822 mutex_lock(&dma_list_mutex);
823 /* take references on public channels */
824 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
825 list_for_each_entry(chan, &device->channels, device_node) {
826 /* if clients are already waiting for channels we need
827 * to take references on their behalf
829 if (dma_chan_get(chan) == -ENODEV) {
830 /* note we can only get here for the first
831 * channel as the remaining channels are
832 * guaranteed to get a reference
835 mutex_unlock(&dma_list_mutex);
839 list_add_tail_rcu(&device->global_node, &dma_device_list);
840 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
841 device->privatecnt++; /* Always private */
842 dma_channel_rebalance();
843 mutex_unlock(&dma_list_mutex);
848 /* if we never registered a channel just release the idr */
849 if (atomic_read(idr_ref) == 0) {
850 mutex_lock(&dma_list_mutex);
851 idr_remove(&dma_idr, device->dev_id);
852 mutex_unlock(&dma_list_mutex);
857 list_for_each_entry(chan, &device->channels, device_node) {
858 if (chan->local == NULL)
860 mutex_lock(&dma_list_mutex);
861 chan->dev->chan = NULL;
862 mutex_unlock(&dma_list_mutex);
863 device_unregister(&chan->dev->device);
864 free_percpu(chan->local);
868 EXPORT_SYMBOL(dma_async_device_register);
871 * dma_async_device_unregister - unregister a DMA device
872 * @device: &dma_device
874 * This routine is called by dma driver exit routines, dmaengine holds module
875 * references to prevent it being called while channels are in use.
877 void dma_async_device_unregister(struct dma_device *device)
879 struct dma_chan *chan;
881 mutex_lock(&dma_list_mutex);
882 list_del_rcu(&device->global_node);
883 dma_channel_rebalance();
884 mutex_unlock(&dma_list_mutex);
886 list_for_each_entry(chan, &device->channels, device_node) {
887 WARN_ONCE(chan->client_count,
888 "%s called while %d clients hold a reference\n",
889 __func__, chan->client_count);
890 mutex_lock(&dma_list_mutex);
891 chan->dev->chan = NULL;
892 mutex_unlock(&dma_list_mutex);
893 device_unregister(&chan->dev->device);
894 free_percpu(chan->local);
897 EXPORT_SYMBOL(dma_async_device_unregister);
900 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
901 * @chan: DMA channel to offload copy to
902 * @dest: destination address (virtual)
903 * @src: source address (virtual)
906 * Both @dest and @src must be mappable to a bus address according to the
907 * DMA mapping API rules for streaming mappings.
908 * Both @dest and @src must stay memory resident (kernel memory or locked
912 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
913 void *src, size_t len)
915 struct dma_device *dev = chan->device;
916 struct dma_async_tx_descriptor *tx;
917 dma_addr_t dma_dest, dma_src;
921 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
922 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
923 flags = DMA_CTRL_ACK |
924 DMA_COMPL_SRC_UNMAP_SINGLE |
925 DMA_COMPL_DEST_UNMAP_SINGLE;
926 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
929 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
930 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
935 cookie = tx->tx_submit(tx);
938 __this_cpu_add(chan->local->bytes_transferred, len);
939 __this_cpu_inc(chan->local->memcpy_count);
944 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
947 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
948 * @chan: DMA channel to offload copy to
949 * @page: destination page
950 * @offset: offset in page to copy to
951 * @kdata: source address (virtual)
954 * Both @page/@offset and @kdata must be mappable to a bus address according
955 * to the DMA mapping API rules for streaming mappings.
956 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
957 * locked user space pages)
960 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
961 unsigned int offset, void *kdata, size_t len)
963 struct dma_device *dev = chan->device;
964 struct dma_async_tx_descriptor *tx;
965 dma_addr_t dma_dest, dma_src;
969 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
970 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
971 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
972 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
975 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
976 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
981 cookie = tx->tx_submit(tx);
984 __this_cpu_add(chan->local->bytes_transferred, len);
985 __this_cpu_inc(chan->local->memcpy_count);
990 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
993 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
994 * @chan: DMA channel to offload copy to
995 * @dest_pg: destination page
996 * @dest_off: offset in page to copy to
997 * @src_pg: source page
998 * @src_off: offset in page to copy from
1001 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1002 * address according to the DMA mapping API rules for streaming mappings.
1003 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1004 * (kernel memory or locked user space pages).
1007 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1008 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1011 struct dma_device *dev = chan->device;
1012 struct dma_async_tx_descriptor *tx;
1013 dma_addr_t dma_dest, dma_src;
1014 dma_cookie_t cookie;
1015 unsigned long flags;
1017 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
1018 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
1020 flags = DMA_CTRL_ACK;
1021 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
1024 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
1025 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
1029 tx->callback = NULL;
1030 cookie = tx->tx_submit(tx);
1033 __this_cpu_add(chan->local->bytes_transferred, len);
1034 __this_cpu_inc(chan->local->memcpy_count);
1039 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1041 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1042 struct dma_chan *chan)
1045 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1046 spin_lock_init(&tx->lock);
1049 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1051 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1052 * @tx: in-flight transaction to wait on
1055 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1057 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1062 while (tx->cookie == -EBUSY) {
1063 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1064 pr_err("%s timeout waiting for descriptor submission\n",
1070 return dma_sync_wait(tx->chan, tx->cookie);
1072 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1074 /* dma_run_dependencies - helper routine for dma drivers to process
1075 * (start) dependent operations on their target channel
1076 * @tx: transaction with dependencies
1078 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1080 struct dma_async_tx_descriptor *dep = txd_next(tx);
1081 struct dma_async_tx_descriptor *dep_next;
1082 struct dma_chan *chan;
1087 /* we'll submit tx->next now, so clear the link */
1091 /* keep submitting up until a channel switch is detected
1092 * in that case we will be called again as a result of
1093 * processing the interrupt from async_tx_channel_switch
1095 for (; dep; dep = dep_next) {
1097 txd_clear_parent(dep);
1098 dep_next = txd_next(dep);
1099 if (dep_next && dep_next->chan == chan)
1100 txd_clear_next(dep); /* ->next will be submitted */
1102 dep_next = NULL; /* submit current dep and terminate */
1105 dep->tx_submit(dep);
1108 chan->device->device_issue_pending(chan);
1110 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1112 static int __init dma_bus_init(void)
1114 return class_register(&dma_devclass);
1116 arch_initcall(dma_bus_init);