2 * core routines for the asynchronous memory transfer/transform api
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/rculist.h>
27 #include <linux/kernel.h>
28 #include <linux/async_tx.h>
30 #ifdef CONFIG_DMA_ENGINE
31 static enum dma_state_client
32 dma_channel_add_remove(struct dma_client *client,
33 struct dma_chan *chan, enum dma_state state);
35 static struct dma_client async_tx_dma = {
36 .event_callback = dma_channel_add_remove,
37 /* .cap_mask == 0 defaults to all channels */
41 * dma_cap_mask_all - enable iteration over all operation types
43 static dma_cap_mask_t dma_cap_mask_all;
46 * chan_ref_percpu - tracks channel allocations per core/opertion
48 struct chan_ref_percpu {
49 struct dma_chan_ref *ref;
52 static int channel_table_initialized;
53 static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
56 * async_tx_lock - protect modification of async_tx_master_list and serialize
57 * rebalance operations
59 static spinlock_t async_tx_lock;
61 static LIST_HEAD(async_tx_master_list);
63 /* async_tx_issue_pending_all - start all transactions on all channels */
64 void async_tx_issue_pending_all(void)
66 struct dma_chan_ref *ref;
69 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
70 ref->chan->device->device_issue_pending(ref->chan);
73 EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
76 free_dma_chan_ref(struct rcu_head *rcu)
78 struct dma_chan_ref *ref;
79 ref = container_of(rcu, struct dma_chan_ref, rcu);
84 init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
86 INIT_LIST_HEAD(&ref->node);
87 INIT_RCU_HEAD(&ref->rcu);
89 atomic_set(&ref->count, 0);
93 * get_chan_ref_by_cap - returns the nth channel of the given capability
94 * defaults to returning the channel with the desired capability and the
95 * lowest reference count if the index can not be satisfied
96 * @cap: capability to match
97 * @index: nth channel desired, passing -1 has the effect of forcing the
98 * default return value
100 static struct dma_chan_ref *
101 get_chan_ref_by_cap(enum dma_transaction_type cap, int index)
103 struct dma_chan_ref *ret_ref = NULL, *min_ref = NULL, *ref;
106 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
107 if (dma_has_cap(cap, ref->chan->device->cap_mask)) {
110 else if (atomic_read(&ref->count) <
111 atomic_read(&min_ref->count))
125 atomic_inc(&ret_ref->count);
131 * async_tx_rebalance - redistribute the available channels, optimize
132 * for cpu isolation in the SMP case, and opertaion isolation in the
135 static void async_tx_rebalance(void)
137 int cpu, cap, cpu_idx = 0;
140 if (!channel_table_initialized)
143 spin_lock_irqsave(&async_tx_lock, flags);
145 /* undo the last distribution */
146 for_each_dma_cap_mask(cap, dma_cap_mask_all)
147 for_each_possible_cpu(cpu) {
148 struct dma_chan_ref *ref =
149 per_cpu_ptr(channel_table[cap], cpu)->ref;
151 atomic_set(&ref->count, 0);
152 per_cpu_ptr(channel_table[cap], cpu)->ref =
157 for_each_dma_cap_mask(cap, dma_cap_mask_all)
158 for_each_online_cpu(cpu) {
159 struct dma_chan_ref *new;
161 new = get_chan_ref_by_cap(cap, cpu_idx++);
163 new = get_chan_ref_by_cap(cap, -1);
165 per_cpu_ptr(channel_table[cap], cpu)->ref = new;
168 spin_unlock_irqrestore(&async_tx_lock, flags);
171 static enum dma_state_client
172 dma_channel_add_remove(struct dma_client *client,
173 struct dma_chan *chan, enum dma_state state)
175 unsigned long found, flags;
176 struct dma_chan_ref *master_ref, *ref;
177 enum dma_state_client ack = DMA_DUP; /* default: take no action */
180 case DMA_RESOURCE_AVAILABLE:
183 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
184 if (ref->chan == chan) {
190 pr_debug("async_tx: dma resource available [%s]\n",
191 found ? "old" : "new");
198 /* add the channel to the generic management list */
199 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
201 init_dma_chan_ref(master_ref, chan);
202 spin_lock_irqsave(&async_tx_lock, flags);
203 list_add_tail_rcu(&master_ref->node,
204 &async_tx_master_list);
205 spin_unlock_irqrestore(&async_tx_lock,
208 printk(KERN_WARNING "async_tx: unable to create"
209 " new master entry in response to"
210 " a DMA_RESOURCE_ADDED event"
215 async_tx_rebalance();
217 case DMA_RESOURCE_REMOVED:
219 spin_lock_irqsave(&async_tx_lock, flags);
220 list_for_each_entry(ref, &async_tx_master_list, node)
221 if (ref->chan == chan) {
222 list_del_rcu(&ref->node);
223 call_rcu(&ref->rcu, free_dma_chan_ref);
227 spin_unlock_irqrestore(&async_tx_lock, flags);
229 pr_debug("async_tx: dma resource removed [%s]\n",
230 found ? "ours" : "not ours");
237 async_tx_rebalance();
239 case DMA_RESOURCE_SUSPEND:
240 case DMA_RESOURCE_RESUME:
241 printk(KERN_WARNING "async_tx: does not support dma channel"
242 " suspend/resume\n");
254 enum dma_transaction_type cap;
256 spin_lock_init(&async_tx_lock);
257 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
259 /* an interrupt will never be an explicit operation type.
260 * clearing this bit prevents allocation to a slot in 'channel_table'
262 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
264 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
265 channel_table[cap] = alloc_percpu(struct chan_ref_percpu);
266 if (!channel_table[cap])
270 channel_table_initialized = 1;
271 dma_async_client_register(&async_tx_dma);
272 dma_async_client_chan_request(&async_tx_dma);
274 printk(KERN_INFO "async_tx: api initialized (async)\n");
278 printk(KERN_ERR "async_tx: initialization failure\n");
281 free_percpu(channel_table[cap]);
286 static void __exit async_tx_exit(void)
288 enum dma_transaction_type cap;
290 channel_table_initialized = 0;
292 for_each_dma_cap_mask(cap, dma_cap_mask_all)
293 if (channel_table[cap])
294 free_percpu(channel_table[cap]);
296 dma_async_client_unregister(&async_tx_dma);
300 * __async_tx_find_channel - find a channel to carry out the operation or let
301 * the transaction execute synchronously
302 * @depend_tx: transaction dependency
303 * @tx_type: transaction type
306 __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
307 enum dma_transaction_type tx_type)
309 /* see if we can keep the chain on one channel */
311 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
312 return depend_tx->chan;
313 else if (likely(channel_table_initialized)) {
314 struct dma_chan_ref *ref;
316 ref = per_cpu_ptr(channel_table[tx_type], cpu)->ref;
318 return ref ? ref->chan : NULL;
322 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
324 static int __init async_tx_init(void)
326 printk(KERN_INFO "async_tx: api initialized (sync-only)\n");
330 static void __exit async_tx_exit(void)
338 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
340 * @depend_tx: the operation that must finish before the new operation runs
341 * @tx: the new operation
344 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
345 struct dma_async_tx_descriptor *tx)
347 struct dma_chan *chan;
348 struct dma_device *device;
349 struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
351 /* first check to see if we can still append to depend_tx */
352 spin_lock_bh(&depend_tx->lock);
353 if (depend_tx->parent && depend_tx->chan == tx->chan) {
354 tx->parent = depend_tx;
355 depend_tx->next = tx;
358 spin_unlock_bh(&depend_tx->lock);
363 chan = depend_tx->chan;
364 device = chan->device;
366 /* see if we can schedule an interrupt
367 * otherwise poll for completion
369 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
370 intr_tx = device->device_prep_dma_interrupt(chan, 0);
375 intr_tx->callback = NULL;
376 intr_tx->callback_param = NULL;
377 tx->parent = intr_tx;
378 /* safe to set ->next outside the lock since we know we are
383 /* check if we need to append */
384 spin_lock_bh(&depend_tx->lock);
385 if (depend_tx->parent) {
386 intr_tx->parent = depend_tx;
387 depend_tx->next = intr_tx;
388 async_tx_ack(intr_tx);
391 spin_unlock_bh(&depend_tx->lock);
394 intr_tx->parent = NULL;
395 intr_tx->tx_submit(intr_tx);
396 async_tx_ack(intr_tx);
399 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
400 panic("%s: DMA_ERROR waiting for depend_tx\n",
408 * submit_disposition - while holding depend_tx->lock we must avoid submitting
409 * new operations to prevent a circular locking dependency with
410 * drivers that already hold a channel lock when calling
411 * async_tx_run_dependencies.
412 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
413 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
414 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
416 enum submit_disposition {
418 ASYNC_TX_CHANNEL_SWITCH,
419 ASYNC_TX_DIRECT_SUBMIT,
423 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
424 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
425 dma_async_tx_callback cb_fn, void *cb_param)
427 tx->callback = cb_fn;
428 tx->callback_param = cb_param;
431 enum submit_disposition s;
433 /* sanity check the dependency chain:
434 * 1/ if ack is already set then we cannot be sure
435 * we are referring to the correct operation
436 * 2/ dependencies are 1:1 i.e. two transactions can
437 * not depend on the same parent
439 BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
442 /* the lock prevents async_tx_run_dependencies from missing
443 * the setting of ->next when ->parent != NULL
445 spin_lock_bh(&depend_tx->lock);
446 if (depend_tx->parent) {
447 /* we have a parent so we can not submit directly
448 * if we are staying on the same channel: append
449 * else: channel switch
451 if (depend_tx->chan == chan) {
452 tx->parent = depend_tx;
453 depend_tx->next = tx;
454 s = ASYNC_TX_SUBMITTED;
456 s = ASYNC_TX_CHANNEL_SWITCH;
458 /* we do not have a parent so we may be able to submit
459 * directly if we are staying on the same channel
461 if (depend_tx->chan == chan)
462 s = ASYNC_TX_DIRECT_SUBMIT;
464 s = ASYNC_TX_CHANNEL_SWITCH;
466 spin_unlock_bh(&depend_tx->lock);
469 case ASYNC_TX_SUBMITTED:
471 case ASYNC_TX_CHANNEL_SWITCH:
472 async_tx_channel_switch(depend_tx, tx);
474 case ASYNC_TX_DIRECT_SUBMIT:
484 if (flags & ASYNC_TX_ACK)
487 if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
488 async_tx_ack(depend_tx);
490 EXPORT_SYMBOL_GPL(async_tx_submit);
493 * async_trigger_callback - schedules the callback function to be run after
494 * any dependent operations have been completed.
495 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
496 * @depend_tx: 'callback' requires the completion of this transaction
497 * @cb_fn: function to call after depend_tx completes
498 * @cb_param: parameter to pass to the callback routine
500 struct dma_async_tx_descriptor *
501 async_trigger_callback(enum async_tx_flags flags,
502 struct dma_async_tx_descriptor *depend_tx,
503 dma_async_tx_callback cb_fn, void *cb_param)
505 struct dma_chan *chan;
506 struct dma_device *device;
507 struct dma_async_tx_descriptor *tx;
510 chan = depend_tx->chan;
511 device = chan->device;
513 /* see if we can schedule an interrupt
514 * otherwise poll for completion
516 if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
519 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
524 pr_debug("%s: (async)\n", __func__);
526 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
528 pr_debug("%s: (sync)\n", __func__);
530 /* wait for any prerequisite operations */
531 async_tx_quiesce(&depend_tx);
533 async_tx_sync_epilog(cb_fn, cb_param);
538 EXPORT_SYMBOL_GPL(async_trigger_callback);
541 * async_tx_quiesce - ensure tx is complete and freeable upon return
542 * @tx - transaction to quiesce
544 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
547 /* if ack is already set then we cannot be sure
548 * we are referring to the correct operation
550 BUG_ON(async_tx_test_ack(*tx));
551 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
552 panic("DMA_ERROR waiting for transaction\n");
557 EXPORT_SYMBOL_GPL(async_tx_quiesce);
559 module_init(async_tx_init);
560 module_exit(async_tx_exit);
562 MODULE_AUTHOR("Intel Corporation");
563 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
564 MODULE_LICENSE("GPL");