1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
19 void __iomem *base, u32 offset, u32 *out)
21 u32 tmp = readl(base + offset);
23 /* If the value is invalid, the link is down */
24 if (PCI_INVALID_READ(tmp))
32 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
33 void __iomem *base, u32 offset,
34 u32 mask, u32 shift, u32 *out)
39 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
43 *out = (tmp & mask) >> shift;
48 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
51 writel(val, base + offset);
54 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
55 u32 offset, u32 mask, u32 shift, u32 val)
60 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
65 tmp |= (val << shift);
66 mhi_write_reg(mhi_cntrl, base, offset, tmp);
69 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
72 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
73 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
76 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
77 struct db_cfg *db_cfg,
78 void __iomem *db_addr,
81 if (db_cfg->db_mode) {
82 db_cfg->db_val = db_val;
83 mhi_write_db(mhi_cntrl, db_addr, db_val);
88 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
89 struct db_cfg *db_cfg,
90 void __iomem *db_addr,
93 db_cfg->db_val = db_val;
94 mhi_write_db(mhi_cntrl, db_addr, db_val);
97 void mhi_ring_er_db(struct mhi_event *mhi_event)
99 struct mhi_ring *ring = &mhi_event->ring;
101 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
102 ring->db_addr, *ring->ctxt_wp);
105 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
108 struct mhi_ring *ring = &mhi_cmd->ring;
110 db = ring->iommu_base + (ring->wp - ring->base);
112 mhi_write_db(mhi_cntrl, ring->db_addr, db);
115 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
116 struct mhi_chan *mhi_chan)
118 struct mhi_ring *ring = &mhi_chan->tre_ring;
121 db = ring->iommu_base + (ring->wp - ring->base);
123 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
127 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
130 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
132 return (ret) ? MHI_EE_MAX : exec;
135 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
138 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
139 MHISTATUS_MHISTATE_MASK,
140 MHISTATUS_MHISTATE_SHIFT, &state);
141 return ret ? MHI_STATE_MAX : state;
144 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
145 struct mhi_buf_info *buf_info)
147 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
148 buf_info->v_addr, buf_info->len,
150 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
156 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
157 struct mhi_buf_info *buf_info)
159 void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
160 &buf_info->p_addr, GFP_ATOMIC);
165 if (buf_info->dir == DMA_TO_DEVICE)
166 memcpy(buf, buf_info->v_addr, buf_info->len);
168 buf_info->bb_addr = buf;
173 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
174 struct mhi_buf_info *buf_info)
176 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
180 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
181 struct mhi_buf_info *buf_info)
183 if (buf_info->dir == DMA_FROM_DEVICE)
184 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
186 mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
190 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
191 struct mhi_ring *ring)
195 if (ring->wp < ring->rp) {
196 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
198 nr_el = (ring->rp - ring->base) / ring->el_size;
199 nr_el += ((ring->base + ring->len - ring->wp) /
206 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
208 return (addr - ring->iommu_base) + ring->base;
211 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
212 struct mhi_ring *ring)
214 ring->wp += ring->el_size;
215 if (ring->wp >= (ring->base + ring->len))
216 ring->wp = ring->base;
221 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
222 struct mhi_ring *ring)
224 ring->rp += ring->el_size;
225 if (ring->rp >= (ring->base + ring->len))
226 ring->rp = ring->base;
231 int mhi_destroy_device(struct device *dev, void *data)
233 struct mhi_device *mhi_dev;
234 struct mhi_controller *mhi_cntrl;
236 if (dev->bus != &mhi_bus_type)
239 mhi_dev = to_mhi_device(dev);
240 mhi_cntrl = mhi_dev->mhi_cntrl;
242 /* Only destroy virtual devices thats attached to bus */
243 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
247 * For the suspend and resume case, this function will get called
248 * without mhi_unregister_controller(). Hence, we need to drop the
249 * references to mhi_dev created for ul and dl channels. We can
250 * be sure that there will be no instances of mhi_dev left after
253 if (mhi_dev->ul_chan)
254 put_device(&mhi_dev->ul_chan->mhi_dev->dev);
256 if (mhi_dev->dl_chan)
257 put_device(&mhi_dev->dl_chan->mhi_dev->dev);
259 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
262 /* Notify the client and remove the device from MHI bus */
269 static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
271 struct mhi_driver *mhi_drv;
273 if (!mhi_dev->dev.driver)
276 mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
278 if (mhi_drv->status_cb)
279 mhi_drv->status_cb(mhi_dev, cb_reason);
282 /* Bind MHI channels to MHI devices */
283 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
285 struct mhi_chan *mhi_chan;
286 struct mhi_device *mhi_dev;
287 struct device *dev = &mhi_cntrl->mhi_dev->dev;
290 mhi_chan = mhi_cntrl->mhi_chan;
291 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
292 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
293 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
295 mhi_dev = mhi_alloc_device(mhi_cntrl);
299 mhi_dev->dev_type = MHI_DEVICE_XFER;
300 switch (mhi_chan->dir) {
302 mhi_dev->ul_chan = mhi_chan;
303 mhi_dev->ul_chan_id = mhi_chan->chan;
305 case DMA_FROM_DEVICE:
306 /* We use dl_chan as offload channels */
307 mhi_dev->dl_chan = mhi_chan;
308 mhi_dev->dl_chan_id = mhi_chan->chan;
311 dev_err(dev, "Direction not supported\n");
312 put_device(&mhi_dev->dev);
316 get_device(&mhi_dev->dev);
317 mhi_chan->mhi_dev = mhi_dev;
319 /* Check next channel if it matches */
320 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
321 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
324 if (mhi_chan->dir == DMA_TO_DEVICE) {
325 mhi_dev->ul_chan = mhi_chan;
326 mhi_dev->ul_chan_id = mhi_chan->chan;
328 mhi_dev->dl_chan = mhi_chan;
329 mhi_dev->dl_chan_id = mhi_chan->chan;
331 get_device(&mhi_dev->dev);
332 mhi_chan->mhi_dev = mhi_dev;
336 /* Channel name is same for both UL and DL */
337 mhi_dev->chan_name = mhi_chan->name;
338 dev_set_name(&mhi_dev->dev, "%04x_%s", mhi_chan->chan,
341 /* Init wakeup source if available */
342 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
343 device_init_wakeup(&mhi_dev->dev, true);
345 ret = device_add(&mhi_dev->dev);
347 put_device(&mhi_dev->dev);
351 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
353 struct mhi_event *mhi_event = dev;
354 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
355 struct mhi_event_ctxt *er_ctxt =
356 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
357 struct mhi_ring *ev_ring = &mhi_event->ring;
358 void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
360 /* Only proceed if event ring has pending events */
361 if (ev_ring->rp == dev_rp)
364 /* For client managed event ring, notify pending data */
365 if (mhi_event->cl_manage) {
366 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
367 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
370 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
372 tasklet_schedule(&mhi_event->task);
378 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
380 struct mhi_controller *mhi_cntrl = dev;
381 enum mhi_state state = MHI_STATE_MAX;
382 enum mhi_pm_state pm_state = 0;
383 enum mhi_ee_type ee = 0;
385 write_lock_irq(&mhi_cntrl->pm_lock);
386 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
387 state = mhi_get_mhi_state(mhi_cntrl);
389 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
392 if (state == MHI_STATE_SYS_ERR) {
393 dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
394 pm_state = mhi_tryset_pm_state(mhi_cntrl,
395 MHI_PM_SYS_ERR_DETECT);
397 write_unlock_irq(&mhi_cntrl->pm_lock);
399 /* If device in RDDM don't bother processing SYS error */
400 if (mhi_cntrl->ee == MHI_EE_RDDM) {
401 if (mhi_cntrl->ee != ee) {
402 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
403 wake_up_all(&mhi_cntrl->state_event);
408 if (pm_state == MHI_PM_SYS_ERR_DETECT) {
409 wake_up_all(&mhi_cntrl->state_event);
411 /* For fatal errors, we let controller decide next step */
413 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
415 schedule_work(&mhi_cntrl->syserr_worker);
423 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
425 struct mhi_controller *mhi_cntrl = dev;
427 /* Wake up events waiting for state change */
428 wake_up_all(&mhi_cntrl->state_event);
430 return IRQ_WAKE_THREAD;
433 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
434 struct mhi_ring *ring)
439 ring->wp += ring->el_size;
440 ctxt_wp = *ring->ctxt_wp + ring->el_size;
442 if (ring->wp >= (ring->base + ring->len)) {
443 ring->wp = ring->base;
444 ctxt_wp = ring->iommu_base;
447 *ring->ctxt_wp = ctxt_wp;
450 ring->rp += ring->el_size;
451 if (ring->rp >= (ring->base + ring->len))
452 ring->rp = ring->base;
454 /* Update to all cores */
458 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
459 struct mhi_tre *event,
460 struct mhi_chan *mhi_chan)
462 struct mhi_ring *buf_ring, *tre_ring;
463 struct device *dev = &mhi_cntrl->mhi_dev->dev;
464 struct mhi_result result;
465 unsigned long flags = 0;
468 ev_code = MHI_TRE_GET_EV_CODE(event);
469 buf_ring = &mhi_chan->buf_ring;
470 tre_ring = &mhi_chan->tre_ring;
472 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
476 * If it's a DB Event then we need to grab the lock
477 * with preemption disabled and as a write because we
478 * have to update db register and there are chances that
479 * another thread could be doing the same.
481 if (ev_code >= MHI_EV_CC_OOB)
482 write_lock_irqsave(&mhi_chan->lock, flags);
484 read_lock_bh(&mhi_chan->lock);
486 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
487 goto end_process_tx_event;
490 case MHI_EV_CC_OVERFLOW:
494 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
495 struct mhi_tre *local_rp, *ev_tre;
497 struct mhi_buf_info *buf_info;
500 /* Get the TRB this event points to */
501 ev_tre = mhi_to_virtual(tre_ring, ptr);
504 if (dev_rp >= (tre_ring->base + tre_ring->len))
505 dev_rp = tre_ring->base;
507 result.dir = mhi_chan->dir;
509 local_rp = tre_ring->rp;
510 while (local_rp != dev_rp) {
511 buf_info = buf_ring->rp;
512 /* If it's the last TRE, get length from the event */
513 if (local_rp == ev_tre)
514 xfer_len = MHI_TRE_GET_EV_LEN(event);
516 xfer_len = buf_info->len;
518 /* Unmap if it's not pre-mapped by client */
519 if (likely(!buf_info->pre_mapped))
520 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
522 result.buf_addr = buf_info->cb_buf;
523 result.bytes_xferd = xfer_len;
524 mhi_del_ring_element(mhi_cntrl, buf_ring);
525 mhi_del_ring_element(mhi_cntrl, tre_ring);
526 local_rp = tre_ring->rp;
529 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
531 if (mhi_chan->dir == DMA_TO_DEVICE)
532 atomic_dec(&mhi_cntrl->pending_pkts);
535 * Recycle the buffer if buffer is pre-allocated,
536 * if there is an error, not much we can do apart
537 * from dropping the packet
539 if (mhi_chan->pre_alloc) {
540 if (mhi_queue_buf(mhi_chan->mhi_dev,
543 buf_info->len, MHI_EOT)) {
545 "Error recycling buffer for chan:%d\n",
547 kfree(buf_info->cb_buf);
554 case MHI_EV_CC_DB_MODE:
558 mhi_chan->db_cfg.db_mode = 1;
559 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
560 if (tre_ring->wp != tre_ring->rp &&
561 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
562 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
564 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
567 case MHI_EV_CC_BAD_TRE:
569 dev_err(dev, "Unknown event 0x%x\n", ev_code);
571 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
573 end_process_tx_event:
574 if (ev_code >= MHI_EV_CC_OOB)
575 write_unlock_irqrestore(&mhi_chan->lock, flags);
577 read_unlock_bh(&mhi_chan->lock);
582 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
583 struct mhi_tre *event,
584 struct mhi_chan *mhi_chan)
586 struct mhi_ring *buf_ring, *tre_ring;
587 struct mhi_buf_info *buf_info;
588 struct mhi_result result;
590 u32 cookie; /* offset to local descriptor */
593 buf_ring = &mhi_chan->buf_ring;
594 tre_ring = &mhi_chan->tre_ring;
596 ev_code = MHI_TRE_GET_EV_CODE(event);
597 cookie = MHI_TRE_GET_EV_COOKIE(event);
598 xfer_len = MHI_TRE_GET_EV_LEN(event);
600 /* Received out of bound cookie */
601 WARN_ON(cookie >= buf_ring->len);
603 buf_info = buf_ring->base + cookie;
605 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
607 result.bytes_xferd = xfer_len;
608 result.buf_addr = buf_info->cb_buf;
609 result.dir = mhi_chan->dir;
611 read_lock_bh(&mhi_chan->lock);
613 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
614 goto end_process_rsc_event;
616 WARN_ON(!buf_info->used);
618 /* notify the client */
619 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
622 * Note: We're arbitrarily incrementing RP even though, completion
623 * packet we processed might not be the same one, reason we can do this
624 * is because device guaranteed to cache descriptors in order it
625 * receive, so even though completion event is different we can re-use
626 * all descriptors in between.
628 * Transfer Ring has descriptors: A, B, C, D
629 * Last descriptor host queue is D (WP) and first descriptor
630 * host queue is A (RP).
631 * The completion event we just serviced is descriptor C.
632 * Then we can safely queue descriptors to replace A, B, and C
633 * even though host did not receive any completions.
635 mhi_del_ring_element(mhi_cntrl, tre_ring);
636 buf_info->used = false;
638 end_process_rsc_event:
639 read_unlock_bh(&mhi_chan->lock);
644 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
647 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
648 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
649 struct mhi_ring *mhi_ring = &cmd_ring->ring;
650 struct mhi_tre *cmd_pkt;
651 struct mhi_chan *mhi_chan;
654 cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
656 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
657 mhi_chan = &mhi_cntrl->mhi_chan[chan];
658 write_lock_bh(&mhi_chan->lock);
659 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
660 complete(&mhi_chan->completion);
661 write_unlock_bh(&mhi_chan->lock);
663 mhi_del_ring_element(mhi_cntrl, mhi_ring);
666 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
667 struct mhi_event *mhi_event,
670 struct mhi_tre *dev_rp, *local_rp;
671 struct mhi_ring *ev_ring = &mhi_event->ring;
672 struct mhi_event_ctxt *er_ctxt =
673 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
674 struct mhi_chan *mhi_chan;
675 struct device *dev = &mhi_cntrl->mhi_dev->dev;
680 * This is a quick check to avoid unnecessary event processing
681 * in case MHI is already in error state, but it's still possible
682 * to transition to error state while processing events
684 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
687 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
688 local_rp = ev_ring->rp;
690 while (dev_rp != local_rp) {
691 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
694 case MHI_PKT_TYPE_BW_REQ_EVENT:
696 struct mhi_link_info *link_info;
698 link_info = &mhi_cntrl->mhi_link_info;
699 write_lock_irq(&mhi_cntrl->pm_lock);
700 link_info->target_link_speed =
701 MHI_TRE_GET_EV_LINKSPEED(local_rp);
702 link_info->target_link_width =
703 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
704 write_unlock_irq(&mhi_cntrl->pm_lock);
705 dev_dbg(dev, "Received BW_REQ event\n");
706 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
709 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
711 enum mhi_state new_state;
713 new_state = MHI_TRE_GET_EV_STATE(local_rp);
715 dev_dbg(dev, "State change event to state: %s\n",
716 TO_MHI_STATE_STR(new_state));
720 mhi_pm_m0_transition(mhi_cntrl);
723 mhi_pm_m1_transition(mhi_cntrl);
726 mhi_pm_m3_transition(mhi_cntrl);
728 case MHI_STATE_SYS_ERR:
730 enum mhi_pm_state new_state;
732 dev_dbg(dev, "System error detected\n");
733 write_lock_irq(&mhi_cntrl->pm_lock);
734 new_state = mhi_tryset_pm_state(mhi_cntrl,
735 MHI_PM_SYS_ERR_DETECT);
736 write_unlock_irq(&mhi_cntrl->pm_lock);
737 if (new_state == MHI_PM_SYS_ERR_DETECT)
738 schedule_work(&mhi_cntrl->syserr_worker);
742 dev_err(dev, "Invalid state: %s\n",
743 TO_MHI_STATE_STR(new_state));
748 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
749 mhi_process_cmd_completion(mhi_cntrl, local_rp);
751 case MHI_PKT_TYPE_EE_EVENT:
753 enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
754 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
756 dev_dbg(dev, "Received EE event: %s\n",
757 TO_MHI_EXEC_STR(event));
760 st = DEV_ST_TRANSITION_SBL;
764 st = DEV_ST_TRANSITION_MISSION_MODE;
767 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
768 write_lock_irq(&mhi_cntrl->pm_lock);
769 mhi_cntrl->ee = event;
770 write_unlock_irq(&mhi_cntrl->pm_lock);
771 wake_up_all(&mhi_cntrl->state_event);
775 "Unhandled EE event: 0x%x\n", type);
777 if (st != DEV_ST_TRANSITION_MAX)
778 mhi_queue_state_transition(mhi_cntrl, st);
782 case MHI_PKT_TYPE_TX_EVENT:
783 chan = MHI_TRE_GET_EV_CHID(local_rp);
784 mhi_chan = &mhi_cntrl->mhi_chan[chan];
785 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
789 dev_err(dev, "Unhandled event type: %d\n", type);
793 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
794 local_rp = ev_ring->rp;
795 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
799 read_lock_bh(&mhi_cntrl->pm_lock);
800 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
801 mhi_ring_er_db(mhi_event);
802 read_unlock_bh(&mhi_cntrl->pm_lock);
807 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
808 struct mhi_event *mhi_event,
811 struct mhi_tre *dev_rp, *local_rp;
812 struct mhi_ring *ev_ring = &mhi_event->ring;
813 struct mhi_event_ctxt *er_ctxt =
814 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
817 struct mhi_chan *mhi_chan;
819 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
822 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
823 local_rp = ev_ring->rp;
825 while (dev_rp != local_rp && event_quota > 0) {
826 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
828 chan = MHI_TRE_GET_EV_CHID(local_rp);
829 mhi_chan = &mhi_cntrl->mhi_chan[chan];
831 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
832 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
834 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
835 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
839 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
840 local_rp = ev_ring->rp;
841 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
844 read_lock_bh(&mhi_cntrl->pm_lock);
845 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
846 mhi_ring_er_db(mhi_event);
847 read_unlock_bh(&mhi_cntrl->pm_lock);
852 void mhi_ev_task(unsigned long data)
854 struct mhi_event *mhi_event = (struct mhi_event *)data;
855 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
857 /* process all pending events */
858 spin_lock_bh(&mhi_event->lock);
859 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
860 spin_unlock_bh(&mhi_event->lock);
863 void mhi_ctrl_ev_task(unsigned long data)
865 struct mhi_event *mhi_event = (struct mhi_event *)data;
866 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
867 struct device *dev = &mhi_cntrl->mhi_dev->dev;
868 enum mhi_state state;
869 enum mhi_pm_state pm_state = 0;
873 * We can check PM state w/o a lock here because there is no way
874 * PM state can change from reg access valid to no access while this
875 * thread being executed.
877 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
879 * We may have a pending event but not allowed to
880 * process it since we are probably in a suspended state,
881 * so trigger a resume.
883 mhi_cntrl->runtime_get(mhi_cntrl);
884 mhi_cntrl->runtime_put(mhi_cntrl);
889 /* Process ctrl events events */
890 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
893 * We received an IRQ but no events to process, maybe device went to
894 * SYS_ERR state? Check the state to confirm.
897 write_lock_irq(&mhi_cntrl->pm_lock);
898 state = mhi_get_mhi_state(mhi_cntrl);
899 if (state == MHI_STATE_SYS_ERR) {
900 dev_dbg(dev, "System error detected\n");
901 pm_state = mhi_tryset_pm_state(mhi_cntrl,
902 MHI_PM_SYS_ERR_DETECT);
904 write_unlock_irq(&mhi_cntrl->pm_lock);
905 if (pm_state == MHI_PM_SYS_ERR_DETECT)
906 schedule_work(&mhi_cntrl->syserr_worker);
910 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
911 struct mhi_ring *ring)
913 void *tmp = ring->wp + ring->el_size;
915 if (tmp >= (ring->base + ring->len))
918 return (tmp == ring->rp);
921 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
922 struct sk_buff *skb, size_t len, enum mhi_flags mflags)
924 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
925 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
927 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
928 struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
929 struct mhi_buf_info *buf_info;
930 struct mhi_tre *mhi_tre;
933 /* If MHI host pre-allocates buffers then client drivers cannot queue */
934 if (mhi_chan->pre_alloc)
937 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
940 read_lock_bh(&mhi_cntrl->pm_lock);
941 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
942 read_unlock_bh(&mhi_cntrl->pm_lock);
946 /* we're in M3 or transitioning to M3 */
947 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
948 mhi_cntrl->runtime_get(mhi_cntrl);
949 mhi_cntrl->runtime_put(mhi_cntrl);
952 /* Toggle wake to exit out of M2 */
953 mhi_cntrl->wake_toggle(mhi_cntrl);
955 /* Generate the TRE */
956 buf_info = buf_ring->wp;
958 buf_info->v_addr = skb->data;
959 buf_info->cb_buf = skb;
960 buf_info->wp = tre_ring->wp;
961 buf_info->dir = mhi_chan->dir;
963 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
967 mhi_tre = tre_ring->wp;
969 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
970 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
971 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
974 mhi_add_ring_element(mhi_cntrl, tre_ring);
975 mhi_add_ring_element(mhi_cntrl, buf_ring);
977 if (mhi_chan->dir == DMA_TO_DEVICE)
978 atomic_inc(&mhi_cntrl->pending_pkts);
980 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
981 read_lock_bh(&mhi_chan->lock);
982 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
983 read_unlock_bh(&mhi_chan->lock);
986 read_unlock_bh(&mhi_cntrl->pm_lock);
991 read_unlock_bh(&mhi_cntrl->pm_lock);
995 EXPORT_SYMBOL_GPL(mhi_queue_skb);
997 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
998 struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1000 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1001 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1003 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1004 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1005 struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
1006 struct mhi_buf_info *buf_info;
1007 struct mhi_tre *mhi_tre;
1009 /* If MHI host pre-allocates buffers then client drivers cannot queue */
1010 if (mhi_chan->pre_alloc)
1013 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1016 read_lock_bh(&mhi_cntrl->pm_lock);
1017 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1018 dev_err(dev, "MHI is not in activate state, PM state: %s\n",
1019 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1020 read_unlock_bh(&mhi_cntrl->pm_lock);
1025 /* we're in M3 or transitioning to M3 */
1026 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
1027 mhi_cntrl->runtime_get(mhi_cntrl);
1028 mhi_cntrl->runtime_put(mhi_cntrl);
1031 /* Toggle wake to exit out of M2 */
1032 mhi_cntrl->wake_toggle(mhi_cntrl);
1034 /* Generate the TRE */
1035 buf_info = buf_ring->wp;
1036 WARN_ON(buf_info->used);
1037 buf_info->p_addr = mhi_buf->dma_addr;
1038 buf_info->pre_mapped = true;
1039 buf_info->cb_buf = mhi_buf;
1040 buf_info->wp = tre_ring->wp;
1041 buf_info->dir = mhi_chan->dir;
1042 buf_info->len = len;
1044 mhi_tre = tre_ring->wp;
1046 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1047 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
1048 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
1051 mhi_add_ring_element(mhi_cntrl, tre_ring);
1052 mhi_add_ring_element(mhi_cntrl, buf_ring);
1054 if (mhi_chan->dir == DMA_TO_DEVICE)
1055 atomic_inc(&mhi_cntrl->pending_pkts);
1057 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1058 read_lock_bh(&mhi_chan->lock);
1059 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1060 read_unlock_bh(&mhi_chan->lock);
1063 read_unlock_bh(&mhi_cntrl->pm_lock);
1067 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1069 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1070 void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
1072 struct mhi_ring *buf_ring, *tre_ring;
1073 struct mhi_tre *mhi_tre;
1074 struct mhi_buf_info *buf_info;
1075 int eot, eob, chain, bei;
1078 buf_ring = &mhi_chan->buf_ring;
1079 tre_ring = &mhi_chan->tre_ring;
1081 buf_info = buf_ring->wp;
1082 buf_info->v_addr = buf;
1083 buf_info->cb_buf = cb;
1084 buf_info->wp = tre_ring->wp;
1085 buf_info->dir = mhi_chan->dir;
1086 buf_info->len = buf_len;
1088 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1092 eob = !!(flags & MHI_EOB);
1093 eot = !!(flags & MHI_EOT);
1094 chain = !!(flags & MHI_CHAIN);
1095 bei = !!(mhi_chan->intmod);
1097 mhi_tre = tre_ring->wp;
1098 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1099 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
1100 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1103 mhi_add_ring_element(mhi_cntrl, tre_ring);
1104 mhi_add_ring_element(mhi_cntrl, buf_ring);
1109 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1110 void *buf, size_t len, enum mhi_flags mflags)
1112 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1113 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1115 struct mhi_ring *tre_ring;
1116 unsigned long flags;
1120 * this check here only as a guard, it's always
1121 * possible mhi can enter error while executing rest of function,
1122 * which is not fatal so we do not need to hold pm_lock
1124 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1127 tre_ring = &mhi_chan->tre_ring;
1128 if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1131 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
1135 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1137 /* we're in M3 or transitioning to M3 */
1138 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
1139 mhi_cntrl->runtime_get(mhi_cntrl);
1140 mhi_cntrl->runtime_put(mhi_cntrl);
1143 /* Toggle wake to exit out of M2 */
1144 mhi_cntrl->wake_toggle(mhi_cntrl);
1146 if (mhi_chan->dir == DMA_TO_DEVICE)
1147 atomic_inc(&mhi_cntrl->pending_pkts);
1149 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1150 unsigned long flags;
1152 read_lock_irqsave(&mhi_chan->lock, flags);
1153 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1154 read_unlock_irqrestore(&mhi_chan->lock, flags);
1157 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1161 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1163 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1164 struct mhi_chan *mhi_chan,
1165 enum mhi_cmd_type cmd)
1167 struct mhi_tre *cmd_tre = NULL;
1168 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1169 struct mhi_ring *ring = &mhi_cmd->ring;
1170 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1174 chan = mhi_chan->chan;
1176 spin_lock_bh(&mhi_cmd->lock);
1177 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1178 spin_unlock_bh(&mhi_cmd->lock);
1182 /* prepare the cmd tre */
1185 case MHI_CMD_RESET_CHAN:
1186 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1187 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1188 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1190 case MHI_CMD_START_CHAN:
1191 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1192 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1193 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1196 dev_err(dev, "Command not supported\n");
1200 /* queue to hardware */
1201 mhi_add_ring_element(mhi_cntrl, ring);
1202 read_lock_bh(&mhi_cntrl->pm_lock);
1203 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1204 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1205 read_unlock_bh(&mhi_cntrl->pm_lock);
1206 spin_unlock_bh(&mhi_cmd->lock);
1211 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1212 struct mhi_chan *mhi_chan)
1215 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1217 dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1219 /* no more processing events for this channel */
1220 mutex_lock(&mhi_chan->mutex);
1221 write_lock_irq(&mhi_chan->lock);
1222 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
1223 write_unlock_irq(&mhi_chan->lock);
1224 mutex_unlock(&mhi_chan->mutex);
1228 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1229 write_unlock_irq(&mhi_chan->lock);
1231 reinit_completion(&mhi_chan->completion);
1232 read_lock_bh(&mhi_cntrl->pm_lock);
1233 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1234 read_unlock_bh(&mhi_cntrl->pm_lock);
1235 goto error_invalid_state;
1238 mhi_cntrl->wake_toggle(mhi_cntrl);
1239 read_unlock_bh(&mhi_cntrl->pm_lock);
1241 mhi_cntrl->runtime_get(mhi_cntrl);
1242 mhi_cntrl->runtime_put(mhi_cntrl);
1243 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1245 goto error_invalid_state;
1247 /* even if it fails we will still reset */
1248 ret = wait_for_completion_timeout(&mhi_chan->completion,
1249 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1250 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1252 "Failed to receive cmd completion, still resetting\n");
1254 error_invalid_state:
1255 if (!mhi_chan->offload_ch) {
1256 mhi_reset_chan(mhi_cntrl, mhi_chan);
1257 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1259 dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1260 mutex_unlock(&mhi_chan->mutex);
1263 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1264 struct mhi_chan *mhi_chan)
1267 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1269 dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1271 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1273 "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1274 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1279 mutex_lock(&mhi_chan->mutex);
1281 /* If channel is not in disable state, do not allow it to start */
1282 if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1284 dev_dbg(dev, "channel: %d is not in disabled state\n",
1286 goto error_init_chan;
1289 /* Check of client manages channel context for offload channels */
1290 if (!mhi_chan->offload_ch) {
1291 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1293 goto error_init_chan;
1296 reinit_completion(&mhi_chan->completion);
1297 read_lock_bh(&mhi_cntrl->pm_lock);
1298 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1299 read_unlock_bh(&mhi_cntrl->pm_lock);
1301 goto error_pm_state;
1304 mhi_cntrl->wake_toggle(mhi_cntrl);
1305 read_unlock_bh(&mhi_cntrl->pm_lock);
1306 mhi_cntrl->runtime_get(mhi_cntrl);
1307 mhi_cntrl->runtime_put(mhi_cntrl);
1309 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1311 goto error_pm_state;
1313 ret = wait_for_completion_timeout(&mhi_chan->completion,
1314 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1315 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1317 goto error_pm_state;
1320 write_lock_irq(&mhi_chan->lock);
1321 mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1322 write_unlock_irq(&mhi_chan->lock);
1324 /* Pre-allocate buffer for xfer ring */
1325 if (mhi_chan->pre_alloc) {
1326 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1327 &mhi_chan->tre_ring);
1328 size_t len = mhi_cntrl->buffer_len;
1333 buf = kmalloc(len, GFP_KERNEL);
1336 goto error_pre_alloc;
1339 /* Prepare transfer descriptors */
1340 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
1344 goto error_pre_alloc;
1348 read_lock_bh(&mhi_cntrl->pm_lock);
1349 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1350 read_lock_irq(&mhi_chan->lock);
1351 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1352 read_unlock_irq(&mhi_chan->lock);
1354 read_unlock_bh(&mhi_cntrl->pm_lock);
1357 mutex_unlock(&mhi_chan->mutex);
1359 dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1365 if (!mhi_chan->offload_ch)
1366 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1369 mutex_unlock(&mhi_chan->mutex);
1374 mutex_unlock(&mhi_chan->mutex);
1375 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1380 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1381 struct mhi_event *mhi_event,
1382 struct mhi_event_ctxt *er_ctxt,
1386 struct mhi_tre *dev_rp, *local_rp;
1387 struct mhi_ring *ev_ring;
1388 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1389 unsigned long flags;
1391 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1393 ev_ring = &mhi_event->ring;
1395 /* mark all stale events related to channel as STALE event */
1396 spin_lock_irqsave(&mhi_event->lock, flags);
1397 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
1399 local_rp = ev_ring->rp;
1400 while (dev_rp != local_rp) {
1401 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1402 chan == MHI_TRE_GET_EV_CHID(local_rp))
1403 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1404 MHI_PKT_TYPE_STALE_EVENT);
1406 if (local_rp == (ev_ring->base + ev_ring->len))
1407 local_rp = ev_ring->base;
1410 dev_dbg(dev, "Finished marking events as stale events\n");
1411 spin_unlock_irqrestore(&mhi_event->lock, flags);
1414 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1415 struct mhi_chan *mhi_chan)
1417 struct mhi_ring *buf_ring, *tre_ring;
1418 struct mhi_result result;
1420 /* Reset any pending buffers */
1421 buf_ring = &mhi_chan->buf_ring;
1422 tre_ring = &mhi_chan->tre_ring;
1423 result.transaction_status = -ENOTCONN;
1424 result.bytes_xferd = 0;
1425 while (tre_ring->rp != tre_ring->wp) {
1426 struct mhi_buf_info *buf_info = buf_ring->rp;
1428 if (mhi_chan->dir == DMA_TO_DEVICE)
1429 atomic_dec(&mhi_cntrl->pending_pkts);
1431 if (!buf_info->pre_mapped)
1432 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1434 mhi_del_ring_element(mhi_cntrl, buf_ring);
1435 mhi_del_ring_element(mhi_cntrl, tre_ring);
1437 if (mhi_chan->pre_alloc) {
1438 kfree(buf_info->cb_buf);
1440 result.buf_addr = buf_info->cb_buf;
1441 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1446 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1448 struct mhi_event *mhi_event;
1449 struct mhi_event_ctxt *er_ctxt;
1450 int chan = mhi_chan->chan;
1452 /* Nothing to reset, client doesn't queue buffers */
1453 if (mhi_chan->offload_ch)
1456 read_lock_bh(&mhi_cntrl->pm_lock);
1457 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1458 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1460 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1462 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1464 read_unlock_bh(&mhi_cntrl->pm_lock);
1467 /* Move channel to start state */
1468 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1471 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1472 struct mhi_chan *mhi_chan;
1474 for (dir = 0; dir < 2; dir++) {
1475 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1479 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1481 goto error_open_chan;
1487 for (--dir; dir >= 0; dir--) {
1488 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1492 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1497 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1499 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1501 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1502 struct mhi_chan *mhi_chan;
1505 for (dir = 0; dir < 2; dir++) {
1506 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1510 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1513 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1515 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1517 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1518 struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1519 struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1522 spin_lock_bh(&mhi_event->lock);
1523 ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1524 spin_unlock_bh(&mhi_event->lock);
1528 EXPORT_SYMBOL_GPL(mhi_poll);