1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/wait.h>
20 * Not all MHI state transitions are synchronous. Transitions like Linkdown,
21 * SYS_ERR, and shutdown can happen anytime asynchronously. This function will
22 * transition to a new state only if we're allowed to.
24 * Priority increases as we go down. For instance, from any state in L0, the
25 * transition can be made to states in L1, L2 and L3. A notable exception to
26 * this rule is state DISABLE. From DISABLE state we can only transition to
27 * POR state. Also, while in L2 state, user cannot jump back to previous
31 * L0: DISABLE <--> POR
33 * POR -> M0 -> M2 --> M0
35 * FW_DL_ERR <--> FW_DL_ERR
38 * M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
39 * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
40 * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
41 * SHUTDOWN_PROCESS -> DISABLE
42 * L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
43 * LD_ERR_FATAL_DETECT -> DISABLE
45 static struct mhi_pm_transitions const dev_state_transitions[] = {
53 MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
54 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
55 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
59 MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
60 MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
61 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
65 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
66 MHI_PM_LD_ERR_FATAL_DETECT
70 MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
71 MHI_PM_LD_ERR_FATAL_DETECT
75 MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
76 MHI_PM_LD_ERR_FATAL_DETECT
80 MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
81 MHI_PM_LD_ERR_FATAL_DETECT
85 MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
86 MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
90 MHI_PM_SYS_ERR_DETECT,
91 MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
92 MHI_PM_LD_ERR_FATAL_DETECT
95 MHI_PM_SYS_ERR_PROCESS,
96 MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
97 MHI_PM_LD_ERR_FATAL_DETECT
101 MHI_PM_SHUTDOWN_PROCESS,
102 MHI_PM_DISABLE | MHI_PM_LD_ERR_FATAL_DETECT
106 MHI_PM_LD_ERR_FATAL_DETECT,
107 MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_DISABLE
111 enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cntrl,
112 enum mhi_pm_state state)
114 unsigned long cur_state = mhi_cntrl->pm_state;
115 int index = find_last_bit(&cur_state, 32);
117 if (unlikely(index >= ARRAY_SIZE(dev_state_transitions)))
120 if (unlikely(dev_state_transitions[index].from_state != cur_state))
123 if (unlikely(!(dev_state_transitions[index].to_states & state)))
126 mhi_cntrl->pm_state = state;
127 return mhi_cntrl->pm_state;
130 void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl, enum mhi_state state)
132 if (state == MHI_STATE_RESET) {
133 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
134 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
136 mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
137 MHICTRL_MHISTATE_MASK,
138 MHICTRL_MHISTATE_SHIFT, state);
142 /* NOP for backward compatibility, host allowed to ring DB in M2 state */
143 static void mhi_toggle_dev_wake_nop(struct mhi_controller *mhi_cntrl)
147 static void mhi_toggle_dev_wake(struct mhi_controller *mhi_cntrl)
149 mhi_cntrl->wake_get(mhi_cntrl, false);
150 mhi_cntrl->wake_put(mhi_cntrl, true);
153 /* Handle device ready state transition */
154 int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl)
156 struct mhi_event *mhi_event;
157 enum mhi_pm_state cur_state;
158 struct device *dev = &mhi_cntrl->mhi_dev->dev;
159 u32 interval_us = 25000; /* poll register field every 25 milliseconds */
162 /* Check if device entered error state */
163 if (MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
164 dev_err(dev, "Device link is not accessible\n");
168 /* Wait for RESET to be cleared and READY bit to be set by the device */
169 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
170 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
173 dev_err(dev, "Device failed to clear MHI Reset\n");
177 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
178 MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT, 1,
181 dev_err(dev, "Device failed to enter MHI Ready\n");
185 dev_dbg(dev, "Device in READY State\n");
186 write_lock_irq(&mhi_cntrl->pm_lock);
187 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
188 mhi_cntrl->dev_state = MHI_STATE_READY;
189 write_unlock_irq(&mhi_cntrl->pm_lock);
191 if (cur_state != MHI_PM_POR) {
192 dev_err(dev, "Error moving to state %s from %s\n",
193 to_mhi_pm_state_str(MHI_PM_POR),
194 to_mhi_pm_state_str(cur_state));
198 read_lock_bh(&mhi_cntrl->pm_lock);
199 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
200 dev_err(dev, "Device registers not accessible\n");
204 /* Configure MMIO registers */
205 ret = mhi_init_mmio(mhi_cntrl);
207 dev_err(dev, "Error configuring MMIO registers\n");
211 /* Add elements to all SW event rings */
212 mhi_event = mhi_cntrl->mhi_event;
213 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
214 struct mhi_ring *ring = &mhi_event->ring;
216 /* Skip if this is an offload or HW event */
217 if (mhi_event->offload_ev || mhi_event->hw_ring)
220 ring->wp = ring->base + ring->len - ring->el_size;
221 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
222 /* Update all cores */
225 /* Ring the event ring db */
226 spin_lock_irq(&mhi_event->lock);
227 mhi_ring_er_db(mhi_event);
228 spin_unlock_irq(&mhi_event->lock);
231 /* Set MHI to M0 state */
232 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
233 read_unlock_bh(&mhi_cntrl->pm_lock);
238 read_unlock_bh(&mhi_cntrl->pm_lock);
243 int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl)
245 enum mhi_pm_state cur_state;
246 struct mhi_chan *mhi_chan;
247 struct device *dev = &mhi_cntrl->mhi_dev->dev;
250 write_lock_irq(&mhi_cntrl->pm_lock);
251 mhi_cntrl->dev_state = MHI_STATE_M0;
252 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M0);
253 write_unlock_irq(&mhi_cntrl->pm_lock);
254 if (unlikely(cur_state != MHI_PM_M0)) {
255 dev_err(dev, "Unable to transition to M0 state\n");
260 /* Wake up the device */
261 read_lock_bh(&mhi_cntrl->pm_lock);
262 mhi_cntrl->wake_get(mhi_cntrl, true);
264 /* Ring all event rings and CMD ring only if we're in mission mode */
265 if (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) {
266 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
267 struct mhi_cmd *mhi_cmd =
268 &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
270 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
271 if (mhi_event->offload_ev)
274 spin_lock_irq(&mhi_event->lock);
275 mhi_ring_er_db(mhi_event);
276 spin_unlock_irq(&mhi_event->lock);
279 /* Only ring primary cmd ring if ring is not empty */
280 spin_lock_irq(&mhi_cmd->lock);
281 if (mhi_cmd->ring.rp != mhi_cmd->ring.wp)
282 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
283 spin_unlock_irq(&mhi_cmd->lock);
286 /* Ring channel DB registers */
287 mhi_chan = mhi_cntrl->mhi_chan;
288 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
289 struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
291 if (mhi_chan->db_cfg.reset_req) {
292 write_lock_irq(&mhi_chan->lock);
293 mhi_chan->db_cfg.db_mode = true;
294 write_unlock_irq(&mhi_chan->lock);
297 read_lock_irq(&mhi_chan->lock);
299 /* Only ring DB if ring is not empty */
300 if (tre_ring->base && tre_ring->wp != tre_ring->rp)
301 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
302 read_unlock_irq(&mhi_chan->lock);
305 mhi_cntrl->wake_put(mhi_cntrl, false);
306 read_unlock_bh(&mhi_cntrl->pm_lock);
307 wake_up_all(&mhi_cntrl->state_event);
313 * After receiving the MHI state change event from the device indicating the
314 * transition to M1 state, the host can transition the device to M2 state
315 * for keeping it in low power state.
317 void mhi_pm_m1_transition(struct mhi_controller *mhi_cntrl)
319 enum mhi_pm_state state;
320 struct device *dev = &mhi_cntrl->mhi_dev->dev;
322 write_lock_irq(&mhi_cntrl->pm_lock);
323 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M2);
324 if (state == MHI_PM_M2) {
325 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M2);
326 mhi_cntrl->dev_state = MHI_STATE_M2;
328 write_unlock_irq(&mhi_cntrl->pm_lock);
331 wake_up_all(&mhi_cntrl->state_event);
333 /* If there are any pending resources, exit M2 immediately */
334 if (unlikely(atomic_read(&mhi_cntrl->pending_pkts) ||
335 atomic_read(&mhi_cntrl->dev_wake))) {
337 "Exiting M2, pending_pkts: %d dev_wake: %d\n",
338 atomic_read(&mhi_cntrl->pending_pkts),
339 atomic_read(&mhi_cntrl->dev_wake));
340 read_lock_bh(&mhi_cntrl->pm_lock);
341 mhi_cntrl->wake_get(mhi_cntrl, true);
342 mhi_cntrl->wake_put(mhi_cntrl, true);
343 read_unlock_bh(&mhi_cntrl->pm_lock);
345 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_IDLE);
348 write_unlock_irq(&mhi_cntrl->pm_lock);
352 /* MHI M3 completion handler */
353 int mhi_pm_m3_transition(struct mhi_controller *mhi_cntrl)
355 enum mhi_pm_state state;
356 struct device *dev = &mhi_cntrl->mhi_dev->dev;
358 write_lock_irq(&mhi_cntrl->pm_lock);
359 mhi_cntrl->dev_state = MHI_STATE_M3;
360 state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3);
361 write_unlock_irq(&mhi_cntrl->pm_lock);
362 if (state != MHI_PM_M3) {
363 dev_err(dev, "Unable to transition to M3 state\n");
368 wake_up_all(&mhi_cntrl->state_event);
373 /* Handle device Mission Mode transition */
374 static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
376 struct mhi_event *mhi_event;
377 struct device *dev = &mhi_cntrl->mhi_dev->dev;
378 enum mhi_ee_type ee = MHI_EE_MAX, current_ee = mhi_cntrl->ee;
381 dev_dbg(dev, "Processing Mission Mode transition\n");
383 write_lock_irq(&mhi_cntrl->pm_lock);
384 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
385 ee = mhi_get_exec_env(mhi_cntrl);
387 if (!MHI_IN_MISSION_MODE(ee)) {
388 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
389 write_unlock_irq(&mhi_cntrl->pm_lock);
390 wake_up_all(&mhi_cntrl->state_event);
394 write_unlock_irq(&mhi_cntrl->pm_lock);
396 wake_up_all(&mhi_cntrl->state_event);
398 device_for_each_child(&mhi_cntrl->mhi_dev->dev, ¤t_ee,
400 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
402 /* Force MHI to be in M0 state before continuing */
403 ret = __mhi_device_get_sync(mhi_cntrl);
407 read_lock_bh(&mhi_cntrl->pm_lock);
409 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
411 goto error_mission_mode;
414 /* Add elements to all HW event rings */
415 mhi_event = mhi_cntrl->mhi_event;
416 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
417 struct mhi_ring *ring = &mhi_event->ring;
419 if (mhi_event->offload_ev || !mhi_event->hw_ring)
422 ring->wp = ring->base + ring->len - ring->el_size;
423 *ring->ctxt_wp = ring->iommu_base + ring->len - ring->el_size;
424 /* Update to all cores */
427 spin_lock_irq(&mhi_event->lock);
428 if (MHI_DB_ACCESS_VALID(mhi_cntrl))
429 mhi_ring_er_db(mhi_event);
430 spin_unlock_irq(&mhi_event->lock);
433 read_unlock_bh(&mhi_cntrl->pm_lock);
436 * The MHI devices are only created when the client device switches its
437 * Execution Environment (EE) to either SBL or AMSS states
439 mhi_create_devices(mhi_cntrl);
441 read_lock_bh(&mhi_cntrl->pm_lock);
444 mhi_cntrl->wake_put(mhi_cntrl, false);
445 read_unlock_bh(&mhi_cntrl->pm_lock);
450 /* Handle shutdown transitions */
451 static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
453 enum mhi_pm_state cur_state;
454 struct mhi_event *mhi_event;
455 struct mhi_cmd_ctxt *cmd_ctxt;
456 struct mhi_cmd *mhi_cmd;
457 struct mhi_event_ctxt *er_ctxt;
458 struct device *dev = &mhi_cntrl->mhi_dev->dev;
461 dev_dbg(dev, "Processing disable transition with PM state: %s\n",
462 to_mhi_pm_state_str(mhi_cntrl->pm_state));
464 mutex_lock(&mhi_cntrl->pm_mutex);
466 /* Trigger MHI RESET so that the device will not access host memory */
467 if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
468 dev_dbg(dev, "Triggering MHI Reset in device\n");
469 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
471 /* Wait for the reset bit to be cleared by the device */
472 ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
473 MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
476 dev_err(dev, "Device failed to clear MHI Reset\n");
479 * Device will clear BHI_INTVEC as a part of RESET processing,
480 * hence re-program it
482 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
486 "Waiting for all pending event ring processing to complete\n");
487 mhi_event = mhi_cntrl->mhi_event;
488 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
489 if (mhi_event->offload_ev)
491 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
492 tasklet_kill(&mhi_event->task);
495 /* Release lock and wait for all pending threads to complete */
496 mutex_unlock(&mhi_cntrl->pm_mutex);
497 dev_dbg(dev, "Waiting for all pending threads to complete\n");
498 wake_up_all(&mhi_cntrl->state_event);
500 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
501 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
503 mutex_lock(&mhi_cntrl->pm_mutex);
505 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
506 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
508 /* Reset the ev rings and cmd rings */
509 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
510 mhi_cmd = mhi_cntrl->mhi_cmd;
511 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
512 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
513 struct mhi_ring *ring = &mhi_cmd->ring;
515 ring->rp = ring->base;
516 ring->wp = ring->base;
517 cmd_ctxt->rp = cmd_ctxt->rbase;
518 cmd_ctxt->wp = cmd_ctxt->rbase;
521 mhi_event = mhi_cntrl->mhi_event;
522 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
523 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
525 struct mhi_ring *ring = &mhi_event->ring;
527 /* Skip offload events */
528 if (mhi_event->offload_ev)
531 ring->rp = ring->base;
532 ring->wp = ring->base;
533 er_ctxt->rp = er_ctxt->rbase;
534 er_ctxt->wp = er_ctxt->rbase;
537 /* Move to disable state */
538 write_lock_irq(&mhi_cntrl->pm_lock);
539 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_DISABLE);
540 write_unlock_irq(&mhi_cntrl->pm_lock);
541 if (unlikely(cur_state != MHI_PM_DISABLE))
542 dev_err(dev, "Error moving from PM state: %s to: %s\n",
543 to_mhi_pm_state_str(cur_state),
544 to_mhi_pm_state_str(MHI_PM_DISABLE));
546 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
547 to_mhi_pm_state_str(mhi_cntrl->pm_state),
548 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
550 mutex_unlock(&mhi_cntrl->pm_mutex);
553 /* Handle system error transitions */
554 static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
556 enum mhi_pm_state cur_state, prev_state;
557 enum dev_st_transition next_state;
558 struct mhi_event *mhi_event;
559 struct mhi_cmd_ctxt *cmd_ctxt;
560 struct mhi_cmd *mhi_cmd;
561 struct mhi_event_ctxt *er_ctxt;
562 struct device *dev = &mhi_cntrl->mhi_dev->dev;
565 dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
566 to_mhi_pm_state_str(mhi_cntrl->pm_state),
567 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
569 /* We must notify MHI control driver so it can clean up first */
570 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_SYS_ERROR);
572 mutex_lock(&mhi_cntrl->pm_mutex);
573 write_lock_irq(&mhi_cntrl->pm_lock);
574 prev_state = mhi_cntrl->pm_state;
575 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_SYS_ERR_PROCESS);
576 write_unlock_irq(&mhi_cntrl->pm_lock);
578 if (cur_state != MHI_PM_SYS_ERR_PROCESS) {
579 dev_err(dev, "Failed to transition from PM state: %s to: %s\n",
580 to_mhi_pm_state_str(cur_state),
581 to_mhi_pm_state_str(MHI_PM_SYS_ERR_PROCESS));
582 goto exit_sys_error_transition;
585 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
586 mhi_cntrl->dev_state = MHI_STATE_RESET;
588 /* Wake up threads waiting for state transition */
589 wake_up_all(&mhi_cntrl->state_event);
591 /* Trigger MHI RESET so that the device will not access host memory */
592 if (MHI_REG_ACCESS_VALID(prev_state)) {
594 unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
596 dev_dbg(dev, "Triggering MHI Reset in device\n");
597 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
599 /* Wait for the reset bit to be cleared by the device */
600 ret = wait_event_timeout(mhi_cntrl->state_event,
601 mhi_read_reg_field(mhi_cntrl,
608 if (!ret || in_reset) {
609 dev_err(dev, "Device failed to exit MHI Reset state\n");
610 goto exit_sys_error_transition;
614 * Device will clear BHI_INTVEC as a part of RESET processing,
615 * hence re-program it
617 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
621 "Waiting for all pending event ring processing to complete\n");
622 mhi_event = mhi_cntrl->mhi_event;
623 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
624 if (mhi_event->offload_ev)
626 tasklet_kill(&mhi_event->task);
629 /* Release lock and wait for all pending threads to complete */
630 mutex_unlock(&mhi_cntrl->pm_mutex);
631 dev_dbg(dev, "Waiting for all pending threads to complete\n");
632 wake_up_all(&mhi_cntrl->state_event);
634 dev_dbg(dev, "Reset all active channels and remove MHI devices\n");
635 device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device);
637 mutex_lock(&mhi_cntrl->pm_mutex);
639 WARN_ON(atomic_read(&mhi_cntrl->dev_wake));
640 WARN_ON(atomic_read(&mhi_cntrl->pending_pkts));
642 /* Reset the ev rings and cmd rings */
643 dev_dbg(dev, "Resetting EV CTXT and CMD CTXT\n");
644 mhi_cmd = mhi_cntrl->mhi_cmd;
645 cmd_ctxt = mhi_cntrl->mhi_ctxt->cmd_ctxt;
646 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
647 struct mhi_ring *ring = &mhi_cmd->ring;
649 ring->rp = ring->base;
650 ring->wp = ring->base;
651 cmd_ctxt->rp = cmd_ctxt->rbase;
652 cmd_ctxt->wp = cmd_ctxt->rbase;
655 mhi_event = mhi_cntrl->mhi_event;
656 er_ctxt = mhi_cntrl->mhi_ctxt->er_ctxt;
657 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
659 struct mhi_ring *ring = &mhi_event->ring;
661 /* Skip offload events */
662 if (mhi_event->offload_ev)
665 ring->rp = ring->base;
666 ring->wp = ring->base;
667 er_ctxt->rp = er_ctxt->rbase;
668 er_ctxt->wp = er_ctxt->rbase;
671 /* Transition to next state */
672 if (MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) {
673 write_lock_irq(&mhi_cntrl->pm_lock);
674 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_POR);
675 write_unlock_irq(&mhi_cntrl->pm_lock);
676 if (cur_state != MHI_PM_POR) {
677 dev_err(dev, "Error moving to state %s from %s\n",
678 to_mhi_pm_state_str(MHI_PM_POR),
679 to_mhi_pm_state_str(cur_state));
680 goto exit_sys_error_transition;
682 next_state = DEV_ST_TRANSITION_PBL;
684 next_state = DEV_ST_TRANSITION_READY;
687 mhi_queue_state_transition(mhi_cntrl, next_state);
689 exit_sys_error_transition:
690 dev_dbg(dev, "Exiting with PM state: %s, MHI state: %s\n",
691 to_mhi_pm_state_str(mhi_cntrl->pm_state),
692 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
694 mutex_unlock(&mhi_cntrl->pm_mutex);
697 /* Queue a new work item and schedule work */
698 int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
699 enum dev_st_transition state)
701 struct state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
708 spin_lock_irqsave(&mhi_cntrl->transition_lock, flags);
709 list_add_tail(&item->node, &mhi_cntrl->transition_list);
710 spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags);
712 queue_work(mhi_cntrl->hiprio_wq, &mhi_cntrl->st_worker);
718 void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
720 struct device *dev = &mhi_cntrl->mhi_dev->dev;
722 /* skip if controller supports RDDM */
723 if (mhi_cntrl->rddm_image) {
724 dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
728 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_SYS_ERR);
731 /* Device State Transition worker */
732 void mhi_pm_st_worker(struct work_struct *work)
734 struct state_transition *itr, *tmp;
736 struct mhi_controller *mhi_cntrl = container_of(work,
737 struct mhi_controller,
739 struct device *dev = &mhi_cntrl->mhi_dev->dev;
741 spin_lock_irq(&mhi_cntrl->transition_lock);
742 list_splice_tail_init(&mhi_cntrl->transition_list, &head);
743 spin_unlock_irq(&mhi_cntrl->transition_lock);
745 list_for_each_entry_safe(itr, tmp, &head, node) {
746 list_del(&itr->node);
747 dev_dbg(dev, "Handling state transition: %s\n",
748 TO_DEV_STATE_TRANS_STR(itr->state));
750 switch (itr->state) {
751 case DEV_ST_TRANSITION_PBL:
752 write_lock_irq(&mhi_cntrl->pm_lock);
753 if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state))
754 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
755 write_unlock_irq(&mhi_cntrl->pm_lock);
756 mhi_fw_load_handler(mhi_cntrl);
758 case DEV_ST_TRANSITION_SBL:
759 write_lock_irq(&mhi_cntrl->pm_lock);
760 mhi_cntrl->ee = MHI_EE_SBL;
761 write_unlock_irq(&mhi_cntrl->pm_lock);
763 * The MHI devices are only created when the client
764 * device switches its Execution Environment (EE) to
765 * either SBL or AMSS states
767 mhi_create_devices(mhi_cntrl);
768 if (mhi_cntrl->fbc_download)
769 mhi_download_amss_image(mhi_cntrl);
771 case DEV_ST_TRANSITION_MISSION_MODE:
772 mhi_pm_mission_mode_transition(mhi_cntrl);
774 case DEV_ST_TRANSITION_FP:
775 write_lock_irq(&mhi_cntrl->pm_lock);
776 mhi_cntrl->ee = MHI_EE_FP;
777 write_unlock_irq(&mhi_cntrl->pm_lock);
778 mhi_create_devices(mhi_cntrl);
780 case DEV_ST_TRANSITION_READY:
781 mhi_ready_state_transition(mhi_cntrl);
783 case DEV_ST_TRANSITION_SYS_ERR:
784 mhi_pm_sys_error_transition(mhi_cntrl);
786 case DEV_ST_TRANSITION_DISABLE:
787 mhi_pm_disable_transition(mhi_cntrl);
796 int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
798 struct mhi_chan *itr, *tmp;
799 struct device *dev = &mhi_cntrl->mhi_dev->dev;
800 enum mhi_pm_state new_state;
803 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
806 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
809 /* Return busy if there are any pending resources */
810 if (atomic_read(&mhi_cntrl->dev_wake) ||
811 atomic_read(&mhi_cntrl->pending_pkts))
814 /* Take MHI out of M2 state */
815 read_lock_bh(&mhi_cntrl->pm_lock);
816 mhi_cntrl->wake_get(mhi_cntrl, false);
817 read_unlock_bh(&mhi_cntrl->pm_lock);
819 ret = wait_event_timeout(mhi_cntrl->state_event,
820 mhi_cntrl->dev_state == MHI_STATE_M0 ||
821 mhi_cntrl->dev_state == MHI_STATE_M1 ||
822 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
823 msecs_to_jiffies(mhi_cntrl->timeout_ms));
825 read_lock_bh(&mhi_cntrl->pm_lock);
826 mhi_cntrl->wake_put(mhi_cntrl, false);
827 read_unlock_bh(&mhi_cntrl->pm_lock);
829 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
831 "Could not enter M0/M1 state");
835 write_lock_irq(&mhi_cntrl->pm_lock);
837 if (atomic_read(&mhi_cntrl->dev_wake) ||
838 atomic_read(&mhi_cntrl->pending_pkts)) {
839 write_unlock_irq(&mhi_cntrl->pm_lock);
843 dev_dbg(dev, "Allowing M3 transition\n");
844 new_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_ENTER);
845 if (new_state != MHI_PM_M3_ENTER) {
846 write_unlock_irq(&mhi_cntrl->pm_lock);
848 "Error setting to PM state: %s from: %s\n",
849 to_mhi_pm_state_str(MHI_PM_M3_ENTER),
850 to_mhi_pm_state_str(mhi_cntrl->pm_state));
854 /* Set MHI to M3 and wait for completion */
855 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
856 write_unlock_irq(&mhi_cntrl->pm_lock);
857 dev_dbg(dev, "Waiting for M3 completion\n");
859 ret = wait_event_timeout(mhi_cntrl->state_event,
860 mhi_cntrl->dev_state == MHI_STATE_M3 ||
861 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
862 msecs_to_jiffies(mhi_cntrl->timeout_ms));
864 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
866 "Did not enter M3 state, MHI state: %s, PM state: %s\n",
867 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
868 to_mhi_pm_state_str(mhi_cntrl->pm_state));
872 /* Notify clients about entering LPM */
873 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
874 mutex_lock(&itr->mutex);
876 mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
877 mutex_unlock(&itr->mutex);
882 EXPORT_SYMBOL_GPL(mhi_pm_suspend);
884 int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
886 struct mhi_chan *itr, *tmp;
887 struct device *dev = &mhi_cntrl->mhi_dev->dev;
888 enum mhi_pm_state cur_state;
891 dev_dbg(dev, "Entered with PM state: %s, MHI state: %s\n",
892 to_mhi_pm_state_str(mhi_cntrl->pm_state),
893 TO_MHI_STATE_STR(mhi_cntrl->dev_state));
895 if (mhi_cntrl->pm_state == MHI_PM_DISABLE)
898 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))
901 if (mhi_get_mhi_state(mhi_cntrl) != MHI_STATE_M3)
904 /* Notify clients about exiting LPM */
905 list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
906 mutex_lock(&itr->mutex);
908 mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
909 mutex_unlock(&itr->mutex);
912 write_lock_irq(&mhi_cntrl->pm_lock);
913 cur_state = mhi_tryset_pm_state(mhi_cntrl, MHI_PM_M3_EXIT);
914 if (cur_state != MHI_PM_M3_EXIT) {
915 write_unlock_irq(&mhi_cntrl->pm_lock);
917 "Error setting to PM state: %s from: %s\n",
918 to_mhi_pm_state_str(MHI_PM_M3_EXIT),
919 to_mhi_pm_state_str(mhi_cntrl->pm_state));
923 /* Set MHI to M0 and wait for completion */
924 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
925 write_unlock_irq(&mhi_cntrl->pm_lock);
927 ret = wait_event_timeout(mhi_cntrl->state_event,
928 mhi_cntrl->dev_state == MHI_STATE_M0 ||
929 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
930 msecs_to_jiffies(mhi_cntrl->timeout_ms));
932 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
934 "Did not enter M0 state, MHI state: %s, PM state: %s\n",
935 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
936 to_mhi_pm_state_str(mhi_cntrl->pm_state));
942 EXPORT_SYMBOL_GPL(mhi_pm_resume);
944 int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
948 /* Wake up the device */
949 read_lock_bh(&mhi_cntrl->pm_lock);
950 if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
951 read_unlock_bh(&mhi_cntrl->pm_lock);
954 mhi_cntrl->wake_get(mhi_cntrl, true);
955 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
956 mhi_trigger_resume(mhi_cntrl);
957 read_unlock_bh(&mhi_cntrl->pm_lock);
959 ret = wait_event_timeout(mhi_cntrl->state_event,
960 mhi_cntrl->pm_state == MHI_PM_M0 ||
961 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
962 msecs_to_jiffies(mhi_cntrl->timeout_ms));
964 if (!ret || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
965 read_lock_bh(&mhi_cntrl->pm_lock);
966 mhi_cntrl->wake_put(mhi_cntrl, false);
967 read_unlock_bh(&mhi_cntrl->pm_lock);
974 /* Assert device wake db */
975 static void mhi_assert_dev_wake(struct mhi_controller *mhi_cntrl, bool force)
980 * If force flag is set, then increment the wake count value and
983 if (unlikely(force)) {
984 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
985 atomic_inc(&mhi_cntrl->dev_wake);
986 if (MHI_WAKE_DB_FORCE_SET_VALID(mhi_cntrl->pm_state) &&
987 !mhi_cntrl->wake_set) {
988 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
989 mhi_cntrl->wake_set = true;
991 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
994 * If resources are already requested, then just increment
995 * the wake count value and return
997 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, 1, 0)))
1000 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1001 if ((atomic_inc_return(&mhi_cntrl->dev_wake) == 1) &&
1002 MHI_WAKE_DB_SET_VALID(mhi_cntrl->pm_state) &&
1003 !mhi_cntrl->wake_set) {
1004 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 1);
1005 mhi_cntrl->wake_set = true;
1007 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1011 /* De-assert device wake db */
1012 static void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl,
1015 unsigned long flags;
1018 * Only continue if there is a single resource, else just decrement
1021 if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
1024 spin_lock_irqsave(&mhi_cntrl->wlock, flags);
1025 if ((atomic_dec_return(&mhi_cntrl->dev_wake) == 0) &&
1026 MHI_WAKE_DB_CLEAR_VALID(mhi_cntrl->pm_state) && !override &&
1027 mhi_cntrl->wake_set) {
1028 mhi_write_db(mhi_cntrl, mhi_cntrl->wake_db, 0);
1029 mhi_cntrl->wake_set = false;
1031 spin_unlock_irqrestore(&mhi_cntrl->wlock, flags);
1034 int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
1036 enum mhi_state state;
1037 enum mhi_ee_type current_ee;
1038 enum dev_st_transition next_state;
1039 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1043 dev_info(dev, "Requested to power ON\n");
1045 /* Supply default wake routines if not provided by controller driver */
1046 if (!mhi_cntrl->wake_get || !mhi_cntrl->wake_put ||
1047 !mhi_cntrl->wake_toggle) {
1048 mhi_cntrl->wake_get = mhi_assert_dev_wake;
1049 mhi_cntrl->wake_put = mhi_deassert_dev_wake;
1050 mhi_cntrl->wake_toggle = (mhi_cntrl->db_access & MHI_PM_M2) ?
1051 mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
1054 mutex_lock(&mhi_cntrl->pm_mutex);
1055 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1057 ret = mhi_init_irq_setup(mhi_cntrl);
1059 goto error_setup_irq;
1061 /* Setup BHI offset & INTVEC */
1062 write_lock_irq(&mhi_cntrl->pm_lock);
1063 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &val);
1065 write_unlock_irq(&mhi_cntrl->pm_lock);
1066 goto error_bhi_offset;
1069 mhi_cntrl->bhi = mhi_cntrl->regs + val;
1071 /* Setup BHIE offset */
1072 if (mhi_cntrl->fbc_download) {
1073 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, &val);
1075 write_unlock_irq(&mhi_cntrl->pm_lock);
1076 dev_err(dev, "Error reading BHIE offset\n");
1077 goto error_bhi_offset;
1080 mhi_cntrl->bhie = mhi_cntrl->regs + val;
1083 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1084 mhi_cntrl->pm_state = MHI_PM_POR;
1085 mhi_cntrl->ee = MHI_EE_MAX;
1086 current_ee = mhi_get_exec_env(mhi_cntrl);
1087 write_unlock_irq(&mhi_cntrl->pm_lock);
1089 /* Confirm that the device is in valid exec env */
1090 if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
1091 dev_err(dev, "Not a valid EE for power on\n");
1093 goto error_bhi_offset;
1096 state = mhi_get_mhi_state(mhi_cntrl);
1097 if (state == MHI_STATE_SYS_ERR) {
1098 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
1099 ret = wait_event_timeout(mhi_cntrl->state_event,
1100 MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
1101 mhi_read_reg_field(mhi_cntrl,
1105 MHICTRL_RESET_SHIFT,
1108 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1111 dev_info(dev, "Failed to reset MHI due to syserr state\n");
1112 goto error_bhi_offset;
1116 * device cleares INTVEC as part of RESET processing,
1119 mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
1122 /* Transition to next state */
1123 next_state = MHI_IN_PBL(current_ee) ?
1124 DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
1126 mhi_queue_state_transition(mhi_cntrl, next_state);
1128 mutex_unlock(&mhi_cntrl->pm_mutex);
1130 dev_info(dev, "Power on setup success\n");
1135 mhi_deinit_free_irq(mhi_cntrl);
1138 mhi_cntrl->pm_state = MHI_PM_DISABLE;
1139 mutex_unlock(&mhi_cntrl->pm_mutex);
1143 EXPORT_SYMBOL_GPL(mhi_async_power_up);
1145 void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
1147 enum mhi_pm_state cur_state, transition_state;
1148 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1150 mutex_lock(&mhi_cntrl->pm_mutex);
1151 write_lock_irq(&mhi_cntrl->pm_lock);
1152 cur_state = mhi_cntrl->pm_state;
1153 if (cur_state == MHI_PM_DISABLE) {
1154 write_unlock_irq(&mhi_cntrl->pm_lock);
1155 mutex_unlock(&mhi_cntrl->pm_mutex);
1156 return; /* Already powered down */
1159 /* If it's not a graceful shutdown, force MHI to linkdown state */
1160 transition_state = (graceful) ? MHI_PM_SHUTDOWN_PROCESS :
1161 MHI_PM_LD_ERR_FATAL_DETECT;
1163 cur_state = mhi_tryset_pm_state(mhi_cntrl, transition_state);
1164 if (cur_state != transition_state) {
1165 dev_err(dev, "Failed to move to state: %s from: %s\n",
1166 to_mhi_pm_state_str(transition_state),
1167 to_mhi_pm_state_str(mhi_cntrl->pm_state));
1168 /* Force link down or error fatal detected state */
1169 mhi_cntrl->pm_state = MHI_PM_LD_ERR_FATAL_DETECT;
1172 /* mark device inactive to avoid any further host processing */
1173 mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
1174 mhi_cntrl->dev_state = MHI_STATE_RESET;
1176 wake_up_all(&mhi_cntrl->state_event);
1178 write_unlock_irq(&mhi_cntrl->pm_lock);
1179 mutex_unlock(&mhi_cntrl->pm_mutex);
1181 mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE);
1183 /* Wait for shutdown to complete */
1184 flush_work(&mhi_cntrl->st_worker);
1186 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
1188 EXPORT_SYMBOL_GPL(mhi_power_down);
1190 int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
1192 int ret = mhi_async_power_up(mhi_cntrl);
1197 wait_event_timeout(mhi_cntrl->state_event,
1198 MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
1199 MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
1200 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1202 ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
1204 mhi_power_down(mhi_cntrl, false);
1208 EXPORT_SYMBOL(mhi_sync_power_up);
1210 int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl)
1212 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1215 /* Check if device is already in RDDM */
1216 if (mhi_cntrl->ee == MHI_EE_RDDM)
1219 dev_dbg(dev, "Triggering SYS_ERR to force RDDM state\n");
1220 mhi_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
1222 /* Wait for RDDM event */
1223 ret = wait_event_timeout(mhi_cntrl->state_event,
1224 mhi_cntrl->ee == MHI_EE_RDDM,
1225 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1226 ret = ret ? 0 : -EIO;
1230 EXPORT_SYMBOL_GPL(mhi_force_rddm_mode);
1232 void mhi_device_get(struct mhi_device *mhi_dev)
1234 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1236 mhi_dev->dev_wake++;
1237 read_lock_bh(&mhi_cntrl->pm_lock);
1238 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1239 mhi_trigger_resume(mhi_cntrl);
1241 mhi_cntrl->wake_get(mhi_cntrl, true);
1242 read_unlock_bh(&mhi_cntrl->pm_lock);
1244 EXPORT_SYMBOL_GPL(mhi_device_get);
1246 int mhi_device_get_sync(struct mhi_device *mhi_dev)
1248 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1251 ret = __mhi_device_get_sync(mhi_cntrl);
1253 mhi_dev->dev_wake++;
1257 EXPORT_SYMBOL_GPL(mhi_device_get_sync);
1259 void mhi_device_put(struct mhi_device *mhi_dev)
1261 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1263 mhi_dev->dev_wake--;
1264 read_lock_bh(&mhi_cntrl->pm_lock);
1265 if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1266 mhi_trigger_resume(mhi_cntrl);
1268 mhi_cntrl->wake_put(mhi_cntrl, false);
1269 read_unlock_bh(&mhi_cntrl->pm_lock);
1271 EXPORT_SYMBOL_GPL(mhi_device_put);