1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
19 void __iomem *base, u32 offset, u32 *out)
21 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
24 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
25 void __iomem *base, u32 offset,
26 u32 mask, u32 shift, u32 *out)
31 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
35 *out = (tmp & mask) >> shift;
40 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
43 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
46 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
47 u32 offset, u32 mask, u32 shift, u32 val)
52 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
57 tmp |= (val << shift);
58 mhi_write_reg(mhi_cntrl, base, offset, tmp);
61 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
64 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
65 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
68 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
69 struct db_cfg *db_cfg,
70 void __iomem *db_addr,
73 if (db_cfg->db_mode) {
74 db_cfg->db_val = db_val;
75 mhi_write_db(mhi_cntrl, db_addr, db_val);
80 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
81 struct db_cfg *db_cfg,
82 void __iomem *db_addr,
85 db_cfg->db_val = db_val;
86 mhi_write_db(mhi_cntrl, db_addr, db_val);
89 void mhi_ring_er_db(struct mhi_event *mhi_event)
91 struct mhi_ring *ring = &mhi_event->ring;
93 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
94 ring->db_addr, *ring->ctxt_wp);
97 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
100 struct mhi_ring *ring = &mhi_cmd->ring;
102 db = ring->iommu_base + (ring->wp - ring->base);
104 mhi_write_db(mhi_cntrl, ring->db_addr, db);
107 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
108 struct mhi_chan *mhi_chan)
110 struct mhi_ring *ring = &mhi_chan->tre_ring;
113 db = ring->iommu_base + (ring->wp - ring->base);
116 * Writes to the new ring element must be visible to the hardware
117 * before letting h/w know there is new element to fetch.
122 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
126 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
129 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
131 return (ret) ? MHI_EE_MAX : exec;
133 EXPORT_SYMBOL_GPL(mhi_get_exec_env);
135 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
138 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
139 MHISTATUS_MHISTATE_MASK,
140 MHISTATUS_MHISTATE_SHIFT, &state);
141 return ret ? MHI_STATE_MAX : state;
143 EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
145 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
147 if (mhi_cntrl->reset) {
148 mhi_cntrl->reset(mhi_cntrl);
152 /* Generic MHI SoC reset */
153 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
156 EXPORT_SYMBOL_GPL(mhi_soc_reset);
158 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
159 struct mhi_buf_info *buf_info)
161 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
162 buf_info->v_addr, buf_info->len,
164 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
170 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
171 struct mhi_buf_info *buf_info)
173 void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
174 &buf_info->p_addr, GFP_ATOMIC);
179 if (buf_info->dir == DMA_TO_DEVICE)
180 memcpy(buf, buf_info->v_addr, buf_info->len);
182 buf_info->bb_addr = buf;
187 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
188 struct mhi_buf_info *buf_info)
190 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
194 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
195 struct mhi_buf_info *buf_info)
197 if (buf_info->dir == DMA_FROM_DEVICE)
198 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
200 mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
204 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
205 struct mhi_ring *ring)
209 if (ring->wp < ring->rp) {
210 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
212 nr_el = (ring->rp - ring->base) / ring->el_size;
213 nr_el += ((ring->base + ring->len - ring->wp) /
220 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
222 return (addr - ring->iommu_base) + ring->base;
225 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
226 struct mhi_ring *ring)
228 ring->wp += ring->el_size;
229 if (ring->wp >= (ring->base + ring->len))
230 ring->wp = ring->base;
235 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
236 struct mhi_ring *ring)
238 ring->rp += ring->el_size;
239 if (ring->rp >= (ring->base + ring->len))
240 ring->rp = ring->base;
245 int mhi_destroy_device(struct device *dev, void *data)
247 struct mhi_chan *ul_chan, *dl_chan;
248 struct mhi_device *mhi_dev;
249 struct mhi_controller *mhi_cntrl;
250 enum mhi_ee_type ee = MHI_EE_MAX;
252 if (dev->bus != &mhi_bus_type)
255 mhi_dev = to_mhi_device(dev);
256 mhi_cntrl = mhi_dev->mhi_cntrl;
258 /* Only destroy virtual devices thats attached to bus */
259 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
262 ul_chan = mhi_dev->ul_chan;
263 dl_chan = mhi_dev->dl_chan;
266 * If execution environment is specified, remove only those devices that
267 * started in them based on ee_mask for the channels as we move on to a
268 * different execution environment
271 ee = *(enum mhi_ee_type *)data;
274 * For the suspend and resume case, this function will get called
275 * without mhi_unregister_controller(). Hence, we need to drop the
276 * references to mhi_dev created for ul and dl channels. We can
277 * be sure that there will be no instances of mhi_dev left after
281 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
284 put_device(&ul_chan->mhi_dev->dev);
288 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
291 put_device(&dl_chan->mhi_dev->dev);
294 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
297 /* Notify the client and remove the device from MHI bus */
304 int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
305 enum dma_data_direction dir)
307 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
308 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
309 mhi_dev->ul_chan : mhi_dev->dl_chan;
310 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
312 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
314 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
316 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
318 struct mhi_driver *mhi_drv;
320 if (!mhi_dev->dev.driver)
323 mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
325 if (mhi_drv->status_cb)
326 mhi_drv->status_cb(mhi_dev, cb_reason);
328 EXPORT_SYMBOL_GPL(mhi_notify);
330 /* Bind MHI channels to MHI devices */
331 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
333 struct mhi_chan *mhi_chan;
334 struct mhi_device *mhi_dev;
335 struct device *dev = &mhi_cntrl->mhi_dev->dev;
338 mhi_chan = mhi_cntrl->mhi_chan;
339 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
340 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
341 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
343 mhi_dev = mhi_alloc_device(mhi_cntrl);
347 mhi_dev->dev_type = MHI_DEVICE_XFER;
348 switch (mhi_chan->dir) {
350 mhi_dev->ul_chan = mhi_chan;
351 mhi_dev->ul_chan_id = mhi_chan->chan;
353 case DMA_FROM_DEVICE:
354 /* We use dl_chan as offload channels */
355 mhi_dev->dl_chan = mhi_chan;
356 mhi_dev->dl_chan_id = mhi_chan->chan;
359 dev_err(dev, "Direction not supported\n");
360 put_device(&mhi_dev->dev);
364 get_device(&mhi_dev->dev);
365 mhi_chan->mhi_dev = mhi_dev;
367 /* Check next channel if it matches */
368 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
369 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
372 if (mhi_chan->dir == DMA_TO_DEVICE) {
373 mhi_dev->ul_chan = mhi_chan;
374 mhi_dev->ul_chan_id = mhi_chan->chan;
376 mhi_dev->dl_chan = mhi_chan;
377 mhi_dev->dl_chan_id = mhi_chan->chan;
379 get_device(&mhi_dev->dev);
380 mhi_chan->mhi_dev = mhi_dev;
384 /* Channel name is same for both UL and DL */
385 mhi_dev->name = mhi_chan->name;
386 dev_set_name(&mhi_dev->dev, "%s_%s",
387 dev_name(&mhi_cntrl->mhi_dev->dev),
390 /* Init wakeup source if available */
391 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
392 device_init_wakeup(&mhi_dev->dev, true);
394 ret = device_add(&mhi_dev->dev);
396 put_device(&mhi_dev->dev);
400 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
402 struct mhi_event *mhi_event = dev;
403 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
404 struct mhi_event_ctxt *er_ctxt =
405 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
406 struct mhi_ring *ev_ring = &mhi_event->ring;
407 void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
409 /* Only proceed if event ring has pending events */
410 if (ev_ring->rp == dev_rp)
413 /* For client managed event ring, notify pending data */
414 if (mhi_event->cl_manage) {
415 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
416 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
419 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
421 tasklet_schedule(&mhi_event->task);
427 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
429 struct mhi_controller *mhi_cntrl = priv;
430 struct device *dev = &mhi_cntrl->mhi_dev->dev;
431 enum mhi_state state;
432 enum mhi_pm_state pm_state = 0;
435 write_lock_irq(&mhi_cntrl->pm_lock);
436 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
437 write_unlock_irq(&mhi_cntrl->pm_lock);
441 state = mhi_get_mhi_state(mhi_cntrl);
442 ee = mhi_get_exec_env(mhi_cntrl);
443 dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
444 TO_MHI_EXEC_STR(mhi_cntrl->ee),
445 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
446 TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state));
448 if (state == MHI_STATE_SYS_ERR) {
449 dev_dbg(dev, "System error detected\n");
450 pm_state = mhi_tryset_pm_state(mhi_cntrl,
451 MHI_PM_SYS_ERR_DETECT);
453 write_unlock_irq(&mhi_cntrl->pm_lock);
455 if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
460 /* proceed if power down is not already in progress */
461 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
462 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
464 wake_up_all(&mhi_cntrl->state_event);
470 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
472 wake_up_all(&mhi_cntrl->state_event);
473 mhi_pm_sys_err_handler(mhi_cntrl);
476 wake_up_all(&mhi_cntrl->state_event);
477 mhi_pm_sys_err_handler(mhi_cntrl);
486 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
488 struct mhi_controller *mhi_cntrl = dev;
490 /* Wake up events waiting for state change */
491 wake_up_all(&mhi_cntrl->state_event);
493 return IRQ_WAKE_THREAD;
496 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
497 struct mhi_ring *ring)
502 ring->wp += ring->el_size;
503 ctxt_wp = *ring->ctxt_wp + ring->el_size;
505 if (ring->wp >= (ring->base + ring->len)) {
506 ring->wp = ring->base;
507 ctxt_wp = ring->iommu_base;
510 *ring->ctxt_wp = ctxt_wp;
513 ring->rp += ring->el_size;
514 if (ring->rp >= (ring->base + ring->len))
515 ring->rp = ring->base;
517 /* Update to all cores */
521 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
522 struct mhi_tre *event,
523 struct mhi_chan *mhi_chan)
525 struct mhi_ring *buf_ring, *tre_ring;
526 struct device *dev = &mhi_cntrl->mhi_dev->dev;
527 struct mhi_result result;
528 unsigned long flags = 0;
531 ev_code = MHI_TRE_GET_EV_CODE(event);
532 buf_ring = &mhi_chan->buf_ring;
533 tre_ring = &mhi_chan->tre_ring;
535 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
539 * If it's a DB Event then we need to grab the lock
540 * with preemption disabled and as a write because we
541 * have to update db register and there are chances that
542 * another thread could be doing the same.
544 if (ev_code >= MHI_EV_CC_OOB)
545 write_lock_irqsave(&mhi_chan->lock, flags);
547 read_lock_bh(&mhi_chan->lock);
549 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
550 goto end_process_tx_event;
553 case MHI_EV_CC_OVERFLOW:
557 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
558 struct mhi_tre *local_rp, *ev_tre;
560 struct mhi_buf_info *buf_info;
563 /* Get the TRB this event points to */
564 ev_tre = mhi_to_virtual(tre_ring, ptr);
567 if (dev_rp >= (tre_ring->base + tre_ring->len))
568 dev_rp = tre_ring->base;
570 result.dir = mhi_chan->dir;
572 local_rp = tre_ring->rp;
573 while (local_rp != dev_rp) {
574 buf_info = buf_ring->rp;
575 /* If it's the last TRE, get length from the event */
576 if (local_rp == ev_tre)
577 xfer_len = MHI_TRE_GET_EV_LEN(event);
579 xfer_len = buf_info->len;
581 /* Unmap if it's not pre-mapped by client */
582 if (likely(!buf_info->pre_mapped))
583 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
585 result.buf_addr = buf_info->cb_buf;
587 /* truncate to buf len if xfer_len is larger */
589 min_t(u16, xfer_len, buf_info->len);
590 mhi_del_ring_element(mhi_cntrl, buf_ring);
591 mhi_del_ring_element(mhi_cntrl, tre_ring);
592 local_rp = tre_ring->rp;
595 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
597 if (mhi_chan->dir == DMA_TO_DEVICE)
598 atomic_dec(&mhi_cntrl->pending_pkts);
601 * Recycle the buffer if buffer is pre-allocated,
602 * if there is an error, not much we can do apart
603 * from dropping the packet
605 if (mhi_chan->pre_alloc) {
606 if (mhi_queue_buf(mhi_chan->mhi_dev,
609 buf_info->len, MHI_EOT)) {
611 "Error recycling buffer for chan:%d\n",
613 kfree(buf_info->cb_buf);
620 case MHI_EV_CC_DB_MODE:
624 mhi_chan->db_cfg.db_mode = 1;
625 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
626 if (tre_ring->wp != tre_ring->rp &&
627 MHI_DB_ACCESS_VALID(mhi_cntrl)) {
628 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
630 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
633 case MHI_EV_CC_BAD_TRE:
635 dev_err(dev, "Unknown event 0x%x\n", ev_code);
637 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
639 end_process_tx_event:
640 if (ev_code >= MHI_EV_CC_OOB)
641 write_unlock_irqrestore(&mhi_chan->lock, flags);
643 read_unlock_bh(&mhi_chan->lock);
648 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
649 struct mhi_tre *event,
650 struct mhi_chan *mhi_chan)
652 struct mhi_ring *buf_ring, *tre_ring;
653 struct mhi_buf_info *buf_info;
654 struct mhi_result result;
656 u32 cookie; /* offset to local descriptor */
659 buf_ring = &mhi_chan->buf_ring;
660 tre_ring = &mhi_chan->tre_ring;
662 ev_code = MHI_TRE_GET_EV_CODE(event);
663 cookie = MHI_TRE_GET_EV_COOKIE(event);
664 xfer_len = MHI_TRE_GET_EV_LEN(event);
666 /* Received out of bound cookie */
667 WARN_ON(cookie >= buf_ring->len);
669 buf_info = buf_ring->base + cookie;
671 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
674 /* truncate to buf len if xfer_len is larger */
675 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
676 result.buf_addr = buf_info->cb_buf;
677 result.dir = mhi_chan->dir;
679 read_lock_bh(&mhi_chan->lock);
681 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
682 goto end_process_rsc_event;
684 WARN_ON(!buf_info->used);
686 /* notify the client */
687 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
690 * Note: We're arbitrarily incrementing RP even though, completion
691 * packet we processed might not be the same one, reason we can do this
692 * is because device guaranteed to cache descriptors in order it
693 * receive, so even though completion event is different we can re-use
694 * all descriptors in between.
696 * Transfer Ring has descriptors: A, B, C, D
697 * Last descriptor host queue is D (WP) and first descriptor
698 * host queue is A (RP).
699 * The completion event we just serviced is descriptor C.
700 * Then we can safely queue descriptors to replace A, B, and C
701 * even though host did not receive any completions.
703 mhi_del_ring_element(mhi_cntrl, tre_ring);
704 buf_info->used = false;
706 end_process_rsc_event:
707 read_unlock_bh(&mhi_chan->lock);
712 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
715 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
716 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
717 struct mhi_ring *mhi_ring = &cmd_ring->ring;
718 struct mhi_tre *cmd_pkt;
719 struct mhi_chan *mhi_chan;
722 cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
724 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
725 mhi_chan = &mhi_cntrl->mhi_chan[chan];
726 write_lock_bh(&mhi_chan->lock);
727 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
728 complete(&mhi_chan->completion);
729 write_unlock_bh(&mhi_chan->lock);
731 mhi_del_ring_element(mhi_cntrl, mhi_ring);
734 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
735 struct mhi_event *mhi_event,
738 struct mhi_tre *dev_rp, *local_rp;
739 struct mhi_ring *ev_ring = &mhi_event->ring;
740 struct mhi_event_ctxt *er_ctxt =
741 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
742 struct mhi_chan *mhi_chan;
743 struct device *dev = &mhi_cntrl->mhi_dev->dev;
748 * This is a quick check to avoid unnecessary event processing
749 * in case MHI is already in error state, but it's still possible
750 * to transition to error state while processing events
752 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
755 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
756 local_rp = ev_ring->rp;
758 while (dev_rp != local_rp) {
759 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
762 case MHI_PKT_TYPE_BW_REQ_EVENT:
764 struct mhi_link_info *link_info;
766 link_info = &mhi_cntrl->mhi_link_info;
767 write_lock_irq(&mhi_cntrl->pm_lock);
768 link_info->target_link_speed =
769 MHI_TRE_GET_EV_LINKSPEED(local_rp);
770 link_info->target_link_width =
771 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
772 write_unlock_irq(&mhi_cntrl->pm_lock);
773 dev_dbg(dev, "Received BW_REQ event\n");
774 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
777 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
779 enum mhi_state new_state;
781 new_state = MHI_TRE_GET_EV_STATE(local_rp);
783 dev_dbg(dev, "State change event to state: %s\n",
784 TO_MHI_STATE_STR(new_state));
788 mhi_pm_m0_transition(mhi_cntrl);
791 mhi_pm_m1_transition(mhi_cntrl);
794 mhi_pm_m3_transition(mhi_cntrl);
796 case MHI_STATE_SYS_ERR:
798 enum mhi_pm_state new_state;
800 dev_dbg(dev, "System error detected\n");
801 write_lock_irq(&mhi_cntrl->pm_lock);
802 new_state = mhi_tryset_pm_state(mhi_cntrl,
803 MHI_PM_SYS_ERR_DETECT);
804 write_unlock_irq(&mhi_cntrl->pm_lock);
805 if (new_state == MHI_PM_SYS_ERR_DETECT)
806 mhi_pm_sys_err_handler(mhi_cntrl);
810 dev_err(dev, "Invalid state: %s\n",
811 TO_MHI_STATE_STR(new_state));
816 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
817 mhi_process_cmd_completion(mhi_cntrl, local_rp);
819 case MHI_PKT_TYPE_EE_EVENT:
821 enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
822 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
824 dev_dbg(dev, "Received EE event: %s\n",
825 TO_MHI_EXEC_STR(event));
828 st = DEV_ST_TRANSITION_SBL;
832 st = DEV_ST_TRANSITION_MISSION_MODE;
835 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
836 write_lock_irq(&mhi_cntrl->pm_lock);
837 mhi_cntrl->ee = event;
838 write_unlock_irq(&mhi_cntrl->pm_lock);
839 wake_up_all(&mhi_cntrl->state_event);
843 "Unhandled EE event: 0x%x\n", type);
845 if (st != DEV_ST_TRANSITION_MAX)
846 mhi_queue_state_transition(mhi_cntrl, st);
850 case MHI_PKT_TYPE_TX_EVENT:
851 chan = MHI_TRE_GET_EV_CHID(local_rp);
853 WARN_ON(chan >= mhi_cntrl->max_chan);
856 * Only process the event ring elements whose channel
857 * ID is within the maximum supported range.
859 if (chan < mhi_cntrl->max_chan) {
860 mhi_chan = &mhi_cntrl->mhi_chan[chan];
861 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
866 dev_err(dev, "Unhandled event type: %d\n", type);
870 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
871 local_rp = ev_ring->rp;
872 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
876 read_lock_bh(&mhi_cntrl->pm_lock);
877 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
878 mhi_ring_er_db(mhi_event);
879 read_unlock_bh(&mhi_cntrl->pm_lock);
884 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
885 struct mhi_event *mhi_event,
888 struct mhi_tre *dev_rp, *local_rp;
889 struct mhi_ring *ev_ring = &mhi_event->ring;
890 struct mhi_event_ctxt *er_ctxt =
891 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
894 struct mhi_chan *mhi_chan;
896 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
899 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
900 local_rp = ev_ring->rp;
902 while (dev_rp != local_rp && event_quota > 0) {
903 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
905 chan = MHI_TRE_GET_EV_CHID(local_rp);
907 WARN_ON(chan >= mhi_cntrl->max_chan);
910 * Only process the event ring elements whose channel
911 * ID is within the maximum supported range.
913 if (chan < mhi_cntrl->max_chan) {
914 mhi_chan = &mhi_cntrl->mhi_chan[chan];
916 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
917 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
919 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
920 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
925 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
926 local_rp = ev_ring->rp;
927 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
930 read_lock_bh(&mhi_cntrl->pm_lock);
931 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
932 mhi_ring_er_db(mhi_event);
933 read_unlock_bh(&mhi_cntrl->pm_lock);
938 void mhi_ev_task(unsigned long data)
940 struct mhi_event *mhi_event = (struct mhi_event *)data;
941 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
943 /* process all pending events */
944 spin_lock_bh(&mhi_event->lock);
945 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
946 spin_unlock_bh(&mhi_event->lock);
949 void mhi_ctrl_ev_task(unsigned long data)
951 struct mhi_event *mhi_event = (struct mhi_event *)data;
952 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
953 struct device *dev = &mhi_cntrl->mhi_dev->dev;
954 enum mhi_state state;
955 enum mhi_pm_state pm_state = 0;
959 * We can check PM state w/o a lock here because there is no way
960 * PM state can change from reg access valid to no access while this
961 * thread being executed.
963 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
965 * We may have a pending event but not allowed to
966 * process it since we are probably in a suspended state,
967 * so trigger a resume.
969 mhi_trigger_resume(mhi_cntrl);
974 /* Process ctrl events events */
975 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
978 * We received an IRQ but no events to process, maybe device went to
979 * SYS_ERR state? Check the state to confirm.
982 write_lock_irq(&mhi_cntrl->pm_lock);
983 state = mhi_get_mhi_state(mhi_cntrl);
984 if (state == MHI_STATE_SYS_ERR) {
985 dev_dbg(dev, "System error detected\n");
986 pm_state = mhi_tryset_pm_state(mhi_cntrl,
987 MHI_PM_SYS_ERR_DETECT);
989 write_unlock_irq(&mhi_cntrl->pm_lock);
990 if (pm_state == MHI_PM_SYS_ERR_DETECT)
991 mhi_pm_sys_err_handler(mhi_cntrl);
995 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
996 struct mhi_ring *ring)
998 void *tmp = ring->wp + ring->el_size;
1000 if (tmp >= (ring->base + ring->len))
1003 return (tmp == ring->rp);
1006 static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1007 enum dma_data_direction dir, enum mhi_flags mflags)
1009 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1010 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1012 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1013 unsigned long flags;
1016 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1019 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1021 ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1022 if (unlikely(ret)) {
1027 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1031 /* trigger M3 exit if necessary */
1032 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1033 mhi_trigger_resume(mhi_cntrl);
1035 /* Assert dev_wake (to exit/prevent M1/M2)*/
1036 mhi_cntrl->wake_toggle(mhi_cntrl);
1038 if (mhi_chan->dir == DMA_TO_DEVICE)
1039 atomic_inc(&mhi_cntrl->pending_pkts);
1041 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1042 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1045 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1050 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1051 struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1053 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1055 struct mhi_buf_info buf_info = { };
1057 buf_info.v_addr = skb->data;
1058 buf_info.cb_buf = skb;
1061 if (unlikely(mhi_chan->pre_alloc))
1064 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1066 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1068 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1069 struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1071 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1073 struct mhi_buf_info buf_info = { };
1075 buf_info.p_addr = mhi_buf->dma_addr;
1076 buf_info.cb_buf = mhi_buf;
1077 buf_info.pre_mapped = true;
1080 if (unlikely(mhi_chan->pre_alloc))
1083 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1085 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1087 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1088 struct mhi_buf_info *info, enum mhi_flags flags)
1090 struct mhi_ring *buf_ring, *tre_ring;
1091 struct mhi_tre *mhi_tre;
1092 struct mhi_buf_info *buf_info;
1093 int eot, eob, chain, bei;
1096 buf_ring = &mhi_chan->buf_ring;
1097 tre_ring = &mhi_chan->tre_ring;
1099 buf_info = buf_ring->wp;
1100 WARN_ON(buf_info->used);
1101 buf_info->pre_mapped = info->pre_mapped;
1102 if (info->pre_mapped)
1103 buf_info->p_addr = info->p_addr;
1105 buf_info->v_addr = info->v_addr;
1106 buf_info->cb_buf = info->cb_buf;
1107 buf_info->wp = tre_ring->wp;
1108 buf_info->dir = mhi_chan->dir;
1109 buf_info->len = info->len;
1111 if (!info->pre_mapped) {
1112 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1117 eob = !!(flags & MHI_EOB);
1118 eot = !!(flags & MHI_EOT);
1119 chain = !!(flags & MHI_CHAIN);
1120 bei = !!(mhi_chan->intmod);
1122 mhi_tre = tre_ring->wp;
1123 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1124 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1125 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1128 mhi_add_ring_element(mhi_cntrl, tre_ring);
1129 mhi_add_ring_element(mhi_cntrl, buf_ring);
1134 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1135 void *buf, size_t len, enum mhi_flags mflags)
1137 struct mhi_buf_info buf_info = { };
1139 buf_info.v_addr = buf;
1140 buf_info.cb_buf = buf;
1143 return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1145 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1147 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1149 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1150 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1151 mhi_dev->ul_chan : mhi_dev->dl_chan;
1152 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1154 return mhi_is_ring_full(mhi_cntrl, tre_ring);
1156 EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1158 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1159 struct mhi_chan *mhi_chan,
1160 enum mhi_cmd_type cmd)
1162 struct mhi_tre *cmd_tre = NULL;
1163 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1164 struct mhi_ring *ring = &mhi_cmd->ring;
1165 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1169 chan = mhi_chan->chan;
1171 spin_lock_bh(&mhi_cmd->lock);
1172 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1173 spin_unlock_bh(&mhi_cmd->lock);
1177 /* prepare the cmd tre */
1180 case MHI_CMD_RESET_CHAN:
1181 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1182 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1183 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1185 case MHI_CMD_START_CHAN:
1186 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1187 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1188 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1191 dev_err(dev, "Command not supported\n");
1195 /* queue to hardware */
1196 mhi_add_ring_element(mhi_cntrl, ring);
1197 read_lock_bh(&mhi_cntrl->pm_lock);
1198 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1199 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1200 read_unlock_bh(&mhi_cntrl->pm_lock);
1201 spin_unlock_bh(&mhi_cmd->lock);
1206 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1207 struct mhi_chan *mhi_chan)
1210 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1212 dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1214 /* no more processing events for this channel */
1215 mutex_lock(&mhi_chan->mutex);
1216 write_lock_irq(&mhi_chan->lock);
1217 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1218 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1219 write_unlock_irq(&mhi_chan->lock);
1220 mutex_unlock(&mhi_chan->mutex);
1224 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1225 write_unlock_irq(&mhi_chan->lock);
1227 reinit_completion(&mhi_chan->completion);
1228 read_lock_bh(&mhi_cntrl->pm_lock);
1229 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1230 read_unlock_bh(&mhi_cntrl->pm_lock);
1231 goto error_invalid_state;
1234 mhi_cntrl->wake_toggle(mhi_cntrl);
1235 read_unlock_bh(&mhi_cntrl->pm_lock);
1237 mhi_cntrl->runtime_get(mhi_cntrl);
1238 mhi_cntrl->runtime_put(mhi_cntrl);
1239 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1241 goto error_invalid_state;
1243 /* even if it fails we will still reset */
1244 ret = wait_for_completion_timeout(&mhi_chan->completion,
1245 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1246 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1248 "Failed to receive cmd completion, still resetting\n");
1250 error_invalid_state:
1251 if (!mhi_chan->offload_ch) {
1252 mhi_reset_chan(mhi_cntrl, mhi_chan);
1253 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1255 dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1256 mutex_unlock(&mhi_chan->mutex);
1259 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1260 struct mhi_chan *mhi_chan)
1263 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1265 dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1267 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1269 "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1270 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1275 mutex_lock(&mhi_chan->mutex);
1277 /* If channel is not in disable state, do not allow it to start */
1278 if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1280 dev_dbg(dev, "channel: %d is not in disabled state\n",
1282 goto error_init_chan;
1285 /* Check of client manages channel context for offload channels */
1286 if (!mhi_chan->offload_ch) {
1287 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1289 goto error_init_chan;
1292 reinit_completion(&mhi_chan->completion);
1293 read_lock_bh(&mhi_cntrl->pm_lock);
1294 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1295 read_unlock_bh(&mhi_cntrl->pm_lock);
1297 goto error_pm_state;
1300 mhi_cntrl->wake_toggle(mhi_cntrl);
1301 read_unlock_bh(&mhi_cntrl->pm_lock);
1302 mhi_cntrl->runtime_get(mhi_cntrl);
1303 mhi_cntrl->runtime_put(mhi_cntrl);
1305 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1307 goto error_pm_state;
1309 ret = wait_for_completion_timeout(&mhi_chan->completion,
1310 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1311 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1313 goto error_pm_state;
1316 write_lock_irq(&mhi_chan->lock);
1317 mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1318 write_unlock_irq(&mhi_chan->lock);
1320 /* Pre-allocate buffer for xfer ring */
1321 if (mhi_chan->pre_alloc) {
1322 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1323 &mhi_chan->tre_ring);
1324 size_t len = mhi_cntrl->buffer_len;
1328 struct mhi_buf_info info = { };
1329 buf = kmalloc(len, GFP_KERNEL);
1332 goto error_pre_alloc;
1335 /* Prepare transfer descriptors */
1339 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1342 goto error_pre_alloc;
1346 read_lock_bh(&mhi_cntrl->pm_lock);
1347 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1348 read_lock_irq(&mhi_chan->lock);
1349 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1350 read_unlock_irq(&mhi_chan->lock);
1352 read_unlock_bh(&mhi_cntrl->pm_lock);
1355 mutex_unlock(&mhi_chan->mutex);
1357 dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1363 if (!mhi_chan->offload_ch)
1364 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1367 mutex_unlock(&mhi_chan->mutex);
1372 mutex_unlock(&mhi_chan->mutex);
1373 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1378 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1379 struct mhi_event *mhi_event,
1380 struct mhi_event_ctxt *er_ctxt,
1384 struct mhi_tre *dev_rp, *local_rp;
1385 struct mhi_ring *ev_ring;
1386 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1387 unsigned long flags;
1389 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1391 ev_ring = &mhi_event->ring;
1393 /* mark all stale events related to channel as STALE event */
1394 spin_lock_irqsave(&mhi_event->lock, flags);
1395 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
1397 local_rp = ev_ring->rp;
1398 while (dev_rp != local_rp) {
1399 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1400 chan == MHI_TRE_GET_EV_CHID(local_rp))
1401 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1402 MHI_PKT_TYPE_STALE_EVENT);
1404 if (local_rp == (ev_ring->base + ev_ring->len))
1405 local_rp = ev_ring->base;
1408 dev_dbg(dev, "Finished marking events as stale events\n");
1409 spin_unlock_irqrestore(&mhi_event->lock, flags);
1412 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1413 struct mhi_chan *mhi_chan)
1415 struct mhi_ring *buf_ring, *tre_ring;
1416 struct mhi_result result;
1418 /* Reset any pending buffers */
1419 buf_ring = &mhi_chan->buf_ring;
1420 tre_ring = &mhi_chan->tre_ring;
1421 result.transaction_status = -ENOTCONN;
1422 result.bytes_xferd = 0;
1423 while (tre_ring->rp != tre_ring->wp) {
1424 struct mhi_buf_info *buf_info = buf_ring->rp;
1426 if (mhi_chan->dir == DMA_TO_DEVICE)
1427 atomic_dec(&mhi_cntrl->pending_pkts);
1429 if (!buf_info->pre_mapped)
1430 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1432 mhi_del_ring_element(mhi_cntrl, buf_ring);
1433 mhi_del_ring_element(mhi_cntrl, tre_ring);
1435 if (mhi_chan->pre_alloc) {
1436 kfree(buf_info->cb_buf);
1438 result.buf_addr = buf_info->cb_buf;
1439 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1444 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1446 struct mhi_event *mhi_event;
1447 struct mhi_event_ctxt *er_ctxt;
1448 int chan = mhi_chan->chan;
1450 /* Nothing to reset, client doesn't queue buffers */
1451 if (mhi_chan->offload_ch)
1454 read_lock_bh(&mhi_cntrl->pm_lock);
1455 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1456 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1458 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1460 mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1462 read_unlock_bh(&mhi_cntrl->pm_lock);
1465 /* Move channel to start state */
1466 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1469 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1470 struct mhi_chan *mhi_chan;
1472 for (dir = 0; dir < 2; dir++) {
1473 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1477 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1479 goto error_open_chan;
1485 for (--dir; dir >= 0; dir--) {
1486 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1490 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1495 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1497 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1499 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1500 struct mhi_chan *mhi_chan;
1503 for (dir = 0; dir < 2; dir++) {
1504 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1508 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1511 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1513 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1515 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1516 struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1517 struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1520 spin_lock_bh(&mhi_event->lock);
1521 ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1522 spin_unlock_bh(&mhi_event->lock);
1526 EXPORT_SYMBOL_GPL(mhi_poll);