}
}
+ reinit_completion(&ctlr->cur_msg_completion);
ret = ctlr->transfer_one_message(ctlr, msg);
if (ret) {
dev_err(&ctlr->dev,
"failed to transfer one message from queue\n");
return ret;
+ } else {
+ wait_for_completion(&ctlr->cur_msg_completion);
}
return 0;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
+
+ if (!ret)
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
mutex_unlock(&ctlr->io_mutex);
/* Prod the scheduler in case transfer_one() was busy waiting */
{
struct spi_transfer *xfer;
struct spi_message *mesg;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&ctlr->queue_lock, flags);
mesg = ctlr->cur_msg;
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
mesg->prepared = false;
- if (!mesg->sync) {
- /*
- * This message was sent via the async message queue. Handle
- * the queue and kick the worker thread to do the
- * idling/shutdown or send the next message if needed.
- */
- spin_lock_irqsave(&ctlr->queue_lock, flags);
- WARN(ctlr->cur_msg != mesg,
- "Finalizing queued message that is not the current head of queue!");
- ctlr->cur_msg = NULL;
- ctlr->fallback = false;
- kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
- spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- }
+ complete(&ctlr->cur_msg_completion);
trace_spi_message_done(mesg);
}
ctlr->bus_lock_flag = 0;
init_completion(&ctlr->xfer_completion);
+ init_completion(&ctlr->cur_msg_completion);
if (!ctlr->max_dma_len)
ctlr->max_dma_len = INT_MAX;
if (ret)
goto out;
+ ctlr->cur_msg = NULL;
+ ctlr->fallback = false;
+
if (!was_busy) {
kfree(ctlr->dummy_rx);
ctlr->dummy_rx = NULL;
* will catch those cases.
*/
if (READ_ONCE(ctlr->queue_empty)) {
- message->sync = true;
message->actual_length = 0;
message->status = -EINPROGRESS;
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
* @cur_msg: the currently in-flight message
+ * @cur_msg_completion: a completion for the current in-flight message
* @cur_msg_mapped: message has been mapped for DMA
* @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip
* selected
spinlock_t queue_lock;
struct list_head queue;
struct spi_message *cur_msg;
+ struct completion cur_msg_completion;
bool busy;
bool running;
bool rt;
* @state: for use by whichever driver currently owns the message
* @resources: for resource management when the spi message is processed
* @prepared: spi_prepare_message was called for the this message
- * @sync: this message took the direct sync path skipping the async queue
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
/* spi_prepare_message was called for this message */
bool prepared;
-
- /* this message is skipping the async queue */
- bool sync;
};
static inline void spi_message_init_no_memset(struct spi_message *m)