1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/debugfs.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/idr.h>
12 #include <linux/interrupt.h>
13 #include <linux/list.h>
14 #include <linux/mhi.h>
15 #include <linux/mod_devicetable.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 #include <linux/wait.h>
22 static DEFINE_IDA(mhi_controller_ida);
24 const char * const mhi_ee_str[MHI_EE_MAX] = {
25 [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
26 [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
27 [MHI_EE_AMSS] = "MISSION MODE",
28 [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
29 [MHI_EE_WFW] = "WLAN FIRMWARE",
30 [MHI_EE_PTHRU] = "PASS THROUGH",
31 [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
32 [MHI_EE_FP] = "FLASH PROGRAMMER",
33 [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
34 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
37 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
38 [DEV_ST_TRANSITION_PBL] = "PBL",
39 [DEV_ST_TRANSITION_READY] = "READY",
40 [DEV_ST_TRANSITION_SBL] = "SBL",
41 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
42 [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
43 [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
44 [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
47 const char * const mhi_state_str[MHI_STATE_MAX] = {
48 [MHI_STATE_RESET] = "RESET",
49 [MHI_STATE_READY] = "READY",
50 [MHI_STATE_M0] = "M0",
51 [MHI_STATE_M1] = "M1",
52 [MHI_STATE_M2] = "M2",
53 [MHI_STATE_M3] = "M3",
54 [MHI_STATE_M3_FAST] = "M3 FAST",
55 [MHI_STATE_BHI] = "BHI",
56 [MHI_STATE_SYS_ERR] = "SYS ERROR",
59 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
60 [MHI_CH_STATE_TYPE_RESET] = "RESET",
61 [MHI_CH_STATE_TYPE_STOP] = "STOP",
62 [MHI_CH_STATE_TYPE_START] = "START",
65 static const char * const mhi_pm_state_str[] = {
66 [MHI_PM_STATE_DISABLE] = "DISABLE",
67 [MHI_PM_STATE_POR] = "POWER ON RESET",
68 [MHI_PM_STATE_M0] = "M0",
69 [MHI_PM_STATE_M2] = "M2",
70 [MHI_PM_STATE_M3_ENTER] = "M?->M3",
71 [MHI_PM_STATE_M3] = "M3",
72 [MHI_PM_STATE_M3_EXIT] = "M3->M0",
73 [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
74 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
75 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
76 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
77 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
80 const char *to_mhi_pm_state_str(enum mhi_pm_state state)
82 int index = find_last_bit((unsigned long *)&state, 32);
84 if (index >= ARRAY_SIZE(mhi_pm_state_str))
85 return "Invalid State";
87 return mhi_pm_state_str[index];
90 static ssize_t serial_number_show(struct device *dev,
91 struct device_attribute *attr,
94 struct mhi_device *mhi_dev = to_mhi_device(dev);
95 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
97 return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n",
98 mhi_cntrl->serial_number);
100 static DEVICE_ATTR_RO(serial_number);
102 static ssize_t oem_pk_hash_show(struct device *dev,
103 struct device_attribute *attr,
106 struct mhi_device *mhi_dev = to_mhi_device(dev);
107 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
110 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
111 cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
112 "OEMPKHASH[%d]: 0x%x\n", i,
113 mhi_cntrl->oem_pk_hash[i]);
117 static DEVICE_ATTR_RO(oem_pk_hash);
119 static struct attribute *mhi_dev_attrs[] = {
120 &dev_attr_serial_number.attr,
121 &dev_attr_oem_pk_hash.attr,
124 ATTRIBUTE_GROUPS(mhi_dev);
126 /* MHI protocol requires the transfer ring to be aligned with ring length */
127 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
128 struct mhi_ring *ring,
131 ring->alloc_size = len + (len - 1);
132 ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
133 &ring->dma_handle, GFP_KERNEL);
134 if (!ring->pre_aligned)
137 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
138 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
143 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
146 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
148 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
149 if (mhi_event->offload_ev)
152 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
155 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
158 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
160 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
161 struct device *dev = &mhi_cntrl->mhi_dev->dev;
162 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
165 /* if controller driver has set irq_flags, use it */
166 if (mhi_cntrl->irq_flags)
167 irq_flags = mhi_cntrl->irq_flags;
169 /* Setup BHI_INTVEC IRQ */
170 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
171 mhi_intvec_threaded_handler,
177 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
178 if (mhi_event->offload_ev)
181 if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
182 dev_err(dev, "irq %d not available for event ring\n",
188 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
193 dev_err(dev, "Error requesting irq:%d for ev:%d\n",
194 mhi_cntrl->irq[mhi_event->irq], i);
202 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
203 if (mhi_event->offload_ev)
206 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
208 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
213 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
216 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
217 struct mhi_cmd *mhi_cmd;
218 struct mhi_event *mhi_event;
219 struct mhi_ring *ring;
221 mhi_cmd = mhi_cntrl->mhi_cmd;
222 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
223 ring = &mhi_cmd->ring;
224 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
225 ring->pre_aligned, ring->dma_handle);
227 ring->iommu_base = 0;
230 mhi_free_coherent(mhi_cntrl,
231 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
232 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
234 mhi_event = mhi_cntrl->mhi_event;
235 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
236 if (mhi_event->offload_ev)
239 ring = &mhi_event->ring;
240 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
241 ring->pre_aligned, ring->dma_handle);
243 ring->iommu_base = 0;
246 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
247 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
248 mhi_ctxt->er_ctxt_addr);
250 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
251 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
252 mhi_ctxt->chan_ctxt_addr);
255 mhi_cntrl->mhi_ctxt = NULL;
258 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
260 struct mhi_ctxt *mhi_ctxt;
261 struct mhi_chan_ctxt *chan_ctxt;
262 struct mhi_event_ctxt *er_ctxt;
263 struct mhi_cmd_ctxt *cmd_ctxt;
264 struct mhi_chan *mhi_chan;
265 struct mhi_event *mhi_event;
266 struct mhi_cmd *mhi_cmd;
268 int ret = -ENOMEM, i;
270 atomic_set(&mhi_cntrl->dev_wake, 0);
271 atomic_set(&mhi_cntrl->pending_pkts, 0);
273 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
277 /* Setup channel ctxt */
278 mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
279 sizeof(*mhi_ctxt->chan_ctxt) *
281 &mhi_ctxt->chan_ctxt_addr,
283 if (!mhi_ctxt->chan_ctxt)
284 goto error_alloc_chan_ctxt;
286 mhi_chan = mhi_cntrl->mhi_chan;
287 chan_ctxt = mhi_ctxt->chan_ctxt;
288 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
289 /* Skip if it is an offload channel */
290 if (mhi_chan->offload_ch)
293 tmp = chan_ctxt->chcfg;
294 tmp &= ~CHAN_CTX_CHSTATE_MASK;
295 tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
296 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
297 tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
298 tmp &= ~CHAN_CTX_POLLCFG_MASK;
299 tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
300 chan_ctxt->chcfg = tmp;
302 chan_ctxt->chtype = mhi_chan->type;
303 chan_ctxt->erindex = mhi_chan->er_index;
305 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
306 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
309 /* Setup event context */
310 mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
311 sizeof(*mhi_ctxt->er_ctxt) *
312 mhi_cntrl->total_ev_rings,
313 &mhi_ctxt->er_ctxt_addr,
315 if (!mhi_ctxt->er_ctxt)
316 goto error_alloc_er_ctxt;
318 er_ctxt = mhi_ctxt->er_ctxt;
319 mhi_event = mhi_cntrl->mhi_event;
320 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
322 struct mhi_ring *ring = &mhi_event->ring;
324 /* Skip if it is an offload event */
325 if (mhi_event->offload_ev)
328 tmp = er_ctxt->intmod;
329 tmp &= ~EV_CTX_INTMODC_MASK;
330 tmp &= ~EV_CTX_INTMODT_MASK;
331 tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
332 er_ctxt->intmod = tmp;
334 er_ctxt->ertype = MHI_ER_TYPE_VALID;
335 er_ctxt->msivec = mhi_event->irq;
336 mhi_event->db_cfg.db_mode = true;
338 ring->el_size = sizeof(struct mhi_tre);
339 ring->len = ring->el_size * ring->elements;
340 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
345 * If the read pointer equals to the write pointer, then the
348 ring->rp = ring->wp = ring->base;
349 er_ctxt->rbase = ring->iommu_base;
350 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
351 er_ctxt->rlen = ring->len;
352 ring->ctxt_wp = &er_ctxt->wp;
355 /* Setup cmd context */
357 mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
358 sizeof(*mhi_ctxt->cmd_ctxt) *
360 &mhi_ctxt->cmd_ctxt_addr,
362 if (!mhi_ctxt->cmd_ctxt)
365 mhi_cmd = mhi_cntrl->mhi_cmd;
366 cmd_ctxt = mhi_ctxt->cmd_ctxt;
367 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
368 struct mhi_ring *ring = &mhi_cmd->ring;
370 ring->el_size = sizeof(struct mhi_tre);
371 ring->elements = CMD_EL_PER_RING;
372 ring->len = ring->el_size * ring->elements;
373 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
375 goto error_alloc_cmd;
377 ring->rp = ring->wp = ring->base;
378 cmd_ctxt->rbase = ring->iommu_base;
379 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
380 cmd_ctxt->rlen = ring->len;
381 ring->ctxt_wp = &cmd_ctxt->wp;
384 mhi_cntrl->mhi_ctxt = mhi_ctxt;
389 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
390 struct mhi_ring *ring = &mhi_cmd->ring;
392 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
393 ring->pre_aligned, ring->dma_handle);
395 mhi_free_coherent(mhi_cntrl,
396 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
397 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
398 i = mhi_cntrl->total_ev_rings;
399 mhi_event = mhi_cntrl->mhi_event + i;
402 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
403 struct mhi_ring *ring = &mhi_event->ring;
405 if (mhi_event->offload_ev)
408 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
409 ring->pre_aligned, ring->dma_handle);
411 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
412 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
413 mhi_ctxt->er_ctxt_addr);
416 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
417 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
418 mhi_ctxt->chan_ctxt_addr);
420 error_alloc_chan_ctxt:
426 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
430 struct mhi_chan *mhi_chan;
431 struct mhi_event *mhi_event;
432 void __iomem *base = mhi_cntrl->regs;
433 struct device *dev = &mhi_cntrl->mhi_dev->dev;
441 CCABAP_HIGHER, U32_MAX, 0,
442 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
445 CCABAP_LOWER, U32_MAX, 0,
446 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
449 ECABAP_HIGHER, U32_MAX, 0,
450 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
453 ECABAP_LOWER, U32_MAX, 0,
454 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
457 CRCBAP_HIGHER, U32_MAX, 0,
458 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
461 CRCBAP_LOWER, U32_MAX, 0,
462 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
465 MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
466 mhi_cntrl->total_ev_rings,
469 MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
470 mhi_cntrl->hw_ev_rings,
473 MHICTRLBASE_HIGHER, U32_MAX, 0,
474 upper_32_bits(mhi_cntrl->iova_start),
477 MHICTRLBASE_LOWER, U32_MAX, 0,
478 lower_32_bits(mhi_cntrl->iova_start),
481 MHIDATABASE_HIGHER, U32_MAX, 0,
482 upper_32_bits(mhi_cntrl->iova_start),
485 MHIDATABASE_LOWER, U32_MAX, 0,
486 lower_32_bits(mhi_cntrl->iova_start),
489 MHICTRLLIMIT_HIGHER, U32_MAX, 0,
490 upper_32_bits(mhi_cntrl->iova_stop),
493 MHICTRLLIMIT_LOWER, U32_MAX, 0,
494 lower_32_bits(mhi_cntrl->iova_stop),
497 MHIDATALIMIT_HIGHER, U32_MAX, 0,
498 upper_32_bits(mhi_cntrl->iova_stop),
501 MHIDATALIMIT_LOWER, U32_MAX, 0,
502 lower_32_bits(mhi_cntrl->iova_stop),
507 dev_dbg(dev, "Initializing MHI registers\n");
509 /* Read channel db offset */
510 ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
511 CHDBOFF_CHDBOFF_SHIFT, &val);
513 dev_err(dev, "Unable to read CHDBOFF register\n");
518 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
519 mhi_cntrl->wake_set = false;
521 /* Setup channel db address for each channel in tre_ring */
522 mhi_chan = mhi_cntrl->mhi_chan;
523 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
524 mhi_chan->tre_ring.db_addr = base + val;
526 /* Read event ring db offset */
527 ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
528 ERDBOFF_ERDBOFF_SHIFT, &val);
530 dev_err(dev, "Unable to read ERDBOFF register\n");
534 /* Setup event db address for each ev_ring */
535 mhi_event = mhi_cntrl->mhi_event;
536 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
537 if (mhi_event->offload_ev)
540 mhi_event->ring.db_addr = base + val;
543 /* Setup DB register for primary CMD rings */
544 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
546 /* Write to MMIO registers */
547 for (i = 0; reg_info[i].offset; i++)
548 mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
549 reg_info[i].mask, reg_info[i].shift,
555 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
556 struct mhi_chan *mhi_chan)
558 struct mhi_ring *buf_ring;
559 struct mhi_ring *tre_ring;
560 struct mhi_chan_ctxt *chan_ctxt;
563 buf_ring = &mhi_chan->buf_ring;
564 tre_ring = &mhi_chan->tre_ring;
565 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
567 if (!chan_ctxt->rbase) /* Already uninitialized */
570 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
571 tre_ring->pre_aligned, tre_ring->dma_handle);
572 vfree(buf_ring->base);
574 buf_ring->base = tre_ring->base = NULL;
575 tre_ring->ctxt_wp = NULL;
576 chan_ctxt->rbase = 0;
581 tmp = chan_ctxt->chcfg;
582 tmp &= ~CHAN_CTX_CHSTATE_MASK;
583 tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
584 chan_ctxt->chcfg = tmp;
586 /* Update to all cores */
590 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
591 struct mhi_chan *mhi_chan)
593 struct mhi_ring *buf_ring;
594 struct mhi_ring *tre_ring;
595 struct mhi_chan_ctxt *chan_ctxt;
599 buf_ring = &mhi_chan->buf_ring;
600 tre_ring = &mhi_chan->tre_ring;
601 tre_ring->el_size = sizeof(struct mhi_tre);
602 tre_ring->len = tre_ring->el_size * tre_ring->elements;
603 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
604 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
608 buf_ring->el_size = sizeof(struct mhi_buf_info);
609 buf_ring->len = buf_ring->el_size * buf_ring->elements;
610 buf_ring->base = vzalloc(buf_ring->len);
612 if (!buf_ring->base) {
613 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
614 tre_ring->pre_aligned, tre_ring->dma_handle);
618 tmp = chan_ctxt->chcfg;
619 tmp &= ~CHAN_CTX_CHSTATE_MASK;
620 tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
621 chan_ctxt->chcfg = tmp;
623 chan_ctxt->rbase = tre_ring->iommu_base;
624 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
625 chan_ctxt->rlen = tre_ring->len;
626 tre_ring->ctxt_wp = &chan_ctxt->wp;
628 tre_ring->rp = tre_ring->wp = tre_ring->base;
629 buf_ring->rp = buf_ring->wp = buf_ring->base;
630 mhi_chan->db_cfg.db_mode = 1;
632 /* Update to all cores */
638 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
639 const struct mhi_controller_config *config)
641 struct mhi_event *mhi_event;
642 const struct mhi_event_config *event_cfg;
643 struct device *dev = mhi_cntrl->cntrl_dev;
646 num = config->num_events;
647 mhi_cntrl->total_ev_rings = num;
648 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
650 if (!mhi_cntrl->mhi_event)
653 /* Populate event ring */
654 mhi_event = mhi_cntrl->mhi_event;
655 for (i = 0; i < num; i++) {
656 event_cfg = &config->event_cfg[i];
658 mhi_event->er_index = i;
659 mhi_event->ring.elements = event_cfg->num_elements;
660 mhi_event->intmod = event_cfg->irq_moderation_ms;
661 mhi_event->irq = event_cfg->irq;
663 if (event_cfg->channel != U32_MAX) {
664 /* This event ring has a dedicated channel */
665 mhi_event->chan = event_cfg->channel;
666 if (mhi_event->chan >= mhi_cntrl->max_chan) {
668 "Event Ring channel not available\n");
672 mhi_event->mhi_chan =
673 &mhi_cntrl->mhi_chan[mhi_event->chan];
676 /* Priority is fixed to 1 for now */
677 mhi_event->priority = 1;
679 mhi_event->db_cfg.brstmode = event_cfg->mode;
680 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
683 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
684 mhi_event->db_cfg.process_db = mhi_db_brstmode;
686 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
688 mhi_event->data_type = event_cfg->data_type;
690 switch (mhi_event->data_type) {
692 mhi_event->process_event = mhi_process_data_event_ring;
695 mhi_event->process_event = mhi_process_ctrl_ev_ring;
698 dev_err(dev, "Event Ring type not supported\n");
702 mhi_event->hw_ring = event_cfg->hardware_event;
703 if (mhi_event->hw_ring)
704 mhi_cntrl->hw_ev_rings++;
706 mhi_cntrl->sw_ev_rings++;
708 mhi_event->cl_manage = event_cfg->client_managed;
709 mhi_event->offload_ev = event_cfg->offload_channel;
717 kfree(mhi_cntrl->mhi_event);
721 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
722 const struct mhi_controller_config *config)
724 const struct mhi_channel_config *ch_cfg;
725 struct device *dev = mhi_cntrl->cntrl_dev;
729 mhi_cntrl->max_chan = config->max_channels;
732 * The allocation of MHI channels can exceed 32KB in some scenarios,
733 * so to avoid any memory possible allocation failures, vzalloc is
736 mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
737 sizeof(*mhi_cntrl->mhi_chan));
738 if (!mhi_cntrl->mhi_chan)
741 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
743 /* Populate channel configurations */
744 for (i = 0; i < config->num_channels; i++) {
745 struct mhi_chan *mhi_chan;
747 ch_cfg = &config->ch_cfg[i];
750 if (chan >= mhi_cntrl->max_chan) {
751 dev_err(dev, "Channel %d not available\n", chan);
755 mhi_chan = &mhi_cntrl->mhi_chan[chan];
756 mhi_chan->name = ch_cfg->name;
757 mhi_chan->chan = chan;
759 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
760 if (!mhi_chan->tre_ring.elements)
764 * For some channels, local ring length should be bigger than
765 * the transfer ring length due to internal logical channels
766 * in device. So host can queue much more buffers than transfer
767 * ring length. Example, RSC channels should have a larger local
768 * channel length than transfer ring length.
770 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
771 if (!mhi_chan->buf_ring.elements)
772 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
773 mhi_chan->er_index = ch_cfg->event_ring;
774 mhi_chan->dir = ch_cfg->dir;
777 * For most channels, chtype is identical to channel directions.
778 * So, if it is not defined then assign channel direction to
781 mhi_chan->type = ch_cfg->type;
783 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
785 mhi_chan->ee_mask = ch_cfg->ee_mask;
786 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
787 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
788 mhi_chan->offload_ch = ch_cfg->offload_channel;
789 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
790 mhi_chan->pre_alloc = ch_cfg->auto_queue;
793 * If MHI host allocates buffers, then the channel direction
794 * should be DMA_FROM_DEVICE
796 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
797 dev_err(dev, "Invalid channel configuration\n");
802 * Bi-directional and direction less channel must be an
805 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
806 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
807 dev_err(dev, "Invalid channel configuration\n");
811 if (!mhi_chan->offload_ch) {
812 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
813 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
814 dev_err(dev, "Invalid Door bell mode\n");
819 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
820 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
822 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
824 mhi_chan->configured = true;
826 if (mhi_chan->lpm_notify)
827 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
833 vfree(mhi_cntrl->mhi_chan);
838 static int parse_config(struct mhi_controller *mhi_cntrl,
839 const struct mhi_controller_config *config)
843 /* Parse MHI channel configuration */
844 ret = parse_ch_cfg(mhi_cntrl, config);
848 /* Parse MHI event configuration */
849 ret = parse_ev_cfg(mhi_cntrl, config);
853 mhi_cntrl->timeout_ms = config->timeout_ms;
854 if (!mhi_cntrl->timeout_ms)
855 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
857 mhi_cntrl->bounce_buf = config->use_bounce_buf;
858 mhi_cntrl->buffer_len = config->buf_len;
859 if (!mhi_cntrl->buffer_len)
860 mhi_cntrl->buffer_len = MHI_MAX_MTU;
862 /* By default, host is allowed to ring DB in both M0 and M2 states */
863 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
864 if (config->m2_no_db)
865 mhi_cntrl->db_access &= ~MHI_PM_M2;
870 vfree(mhi_cntrl->mhi_chan);
875 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
876 const struct mhi_controller_config *config)
878 struct mhi_event *mhi_event;
879 struct mhi_chan *mhi_chan;
880 struct mhi_cmd *mhi_cmd;
881 struct mhi_device *mhi_dev;
885 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
886 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
887 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
888 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || !mhi_cntrl->irq)
891 ret = parse_config(mhi_cntrl, config);
895 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
896 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
897 if (!mhi_cntrl->mhi_cmd) {
902 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
903 mutex_init(&mhi_cntrl->pm_mutex);
904 rwlock_init(&mhi_cntrl->pm_lock);
905 spin_lock_init(&mhi_cntrl->transition_lock);
906 spin_lock_init(&mhi_cntrl->wlock);
907 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
908 init_waitqueue_head(&mhi_cntrl->state_event);
910 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
911 if (!mhi_cntrl->hiprio_wq) {
912 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
917 mhi_cmd = mhi_cntrl->mhi_cmd;
918 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
919 spin_lock_init(&mhi_cmd->lock);
921 mhi_event = mhi_cntrl->mhi_event;
922 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
923 /* Skip for offload events */
924 if (mhi_event->offload_ev)
927 mhi_event->mhi_cntrl = mhi_cntrl;
928 spin_lock_init(&mhi_event->lock);
929 if (mhi_event->data_type == MHI_ER_CTRL)
930 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
933 tasklet_init(&mhi_event->task, mhi_ev_task,
937 mhi_chan = mhi_cntrl->mhi_chan;
938 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
939 mutex_init(&mhi_chan->mutex);
940 init_completion(&mhi_chan->completion);
941 rwlock_init(&mhi_chan->lock);
943 /* used in setting bei field of TRE */
944 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
945 mhi_chan->intmod = mhi_event->intmod;
948 if (mhi_cntrl->bounce_buf) {
949 mhi_cntrl->map_single = mhi_map_single_use_bb;
950 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
952 mhi_cntrl->map_single = mhi_map_single_no_bb;
953 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
956 /* Read the MHI device info */
957 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
958 SOC_HW_VERSION_OFFS, &soc_info);
962 mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
963 SOC_HW_VERSION_FAM_NUM_SHFT;
964 mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
965 SOC_HW_VERSION_DEV_NUM_SHFT;
966 mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
967 SOC_HW_VERSION_MAJOR_VER_SHFT;
968 mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
969 SOC_HW_VERSION_MINOR_VER_SHFT;
971 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
972 if (mhi_cntrl->index < 0) {
973 ret = mhi_cntrl->index;
977 /* Register controller with MHI bus */
978 mhi_dev = mhi_alloc_device(mhi_cntrl);
979 if (IS_ERR(mhi_dev)) {
980 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
981 ret = PTR_ERR(mhi_dev);
985 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
986 mhi_dev->mhi_cntrl = mhi_cntrl;
987 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
988 mhi_dev->name = dev_name(&mhi_dev->dev);
990 /* Init wakeup source */
991 device_init_wakeup(&mhi_dev->dev, true);
993 ret = device_add(&mhi_dev->dev);
995 goto err_release_dev;
997 mhi_cntrl->mhi_dev = mhi_dev;
999 mhi_create_debugfs(mhi_cntrl);
1004 put_device(&mhi_dev->dev);
1006 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1008 destroy_workqueue(mhi_cntrl->hiprio_wq);
1010 kfree(mhi_cntrl->mhi_cmd);
1012 kfree(mhi_cntrl->mhi_event);
1013 vfree(mhi_cntrl->mhi_chan);
1017 EXPORT_SYMBOL_GPL(mhi_register_controller);
1019 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1021 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1022 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1025 mhi_destroy_debugfs(mhi_cntrl);
1027 destroy_workqueue(mhi_cntrl->hiprio_wq);
1028 kfree(mhi_cntrl->mhi_cmd);
1029 kfree(mhi_cntrl->mhi_event);
1031 /* Drop the references to MHI devices created for channels */
1032 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1033 if (!mhi_chan->mhi_dev)
1036 put_device(&mhi_chan->mhi_dev->dev);
1038 vfree(mhi_cntrl->mhi_chan);
1040 device_del(&mhi_dev->dev);
1041 put_device(&mhi_dev->dev);
1043 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1045 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1047 struct mhi_controller *mhi_alloc_controller(void)
1049 struct mhi_controller *mhi_cntrl;
1051 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1055 EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1057 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1061 EXPORT_SYMBOL_GPL(mhi_free_controller);
1063 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1065 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1069 mutex_lock(&mhi_cntrl->pm_mutex);
1071 ret = mhi_init_dev_ctxt(mhi_cntrl);
1073 goto error_dev_ctxt;
1076 * Allocate RDDM table if specified, this table is for debugging purpose
1078 if (mhi_cntrl->rddm_size) {
1079 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1080 mhi_cntrl->rddm_size);
1083 * This controller supports RDDM, so we need to manually clear
1084 * BHIE RX registers since POR values are undefined.
1086 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1089 dev_err(dev, "Error getting BHIE offset\n");
1093 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1094 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1095 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1098 if (mhi_cntrl->rddm_image)
1099 mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
1102 mutex_unlock(&mhi_cntrl->pm_mutex);
1107 if (mhi_cntrl->rddm_image) {
1108 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1109 mhi_cntrl->rddm_image = NULL;
1113 mutex_unlock(&mhi_cntrl->pm_mutex);
1117 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1119 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1121 if (mhi_cntrl->fbc_image) {
1122 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1123 mhi_cntrl->fbc_image = NULL;
1126 if (mhi_cntrl->rddm_image) {
1127 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1128 mhi_cntrl->rddm_image = NULL;
1131 mhi_deinit_dev_ctxt(mhi_cntrl);
1133 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1135 static void mhi_release_device(struct device *dev)
1137 struct mhi_device *mhi_dev = to_mhi_device(dev);
1140 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1141 * devices for the channels will only get created if the mhi_dev
1142 * associated with it is NULL. This scenario will happen during the
1143 * controller suspend and resume.
1145 if (mhi_dev->ul_chan)
1146 mhi_dev->ul_chan->mhi_dev = NULL;
1148 if (mhi_dev->dl_chan)
1149 mhi_dev->dl_chan->mhi_dev = NULL;
1154 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1156 struct mhi_device *mhi_dev;
1159 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1161 return ERR_PTR(-ENOMEM);
1163 dev = &mhi_dev->dev;
1164 device_initialize(dev);
1165 dev->bus = &mhi_bus_type;
1166 dev->release = mhi_release_device;
1168 if (mhi_cntrl->mhi_dev) {
1169 /* for MHI client devices, parent is the MHI controller device */
1170 dev->parent = &mhi_cntrl->mhi_dev->dev;
1172 /* for MHI controller device, parent is the bus device (e.g. pci device) */
1173 dev->parent = mhi_cntrl->cntrl_dev;
1176 mhi_dev->mhi_cntrl = mhi_cntrl;
1177 mhi_dev->dev_wake = 0;
1182 static int mhi_driver_probe(struct device *dev)
1184 struct mhi_device *mhi_dev = to_mhi_device(dev);
1185 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1186 struct device_driver *drv = dev->driver;
1187 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1188 struct mhi_event *mhi_event;
1189 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1190 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1193 /* Bring device out of LPM */
1194 ret = mhi_device_get_sync(mhi_dev);
1202 * If channel supports LPM notifications then status_cb should
1205 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1208 /* For non-offload channels then xfer_cb should be provided */
1209 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1212 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1218 * If channel supports LPM notifications then status_cb should
1221 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1224 /* For non-offload channels then xfer_cb should be provided */
1225 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1228 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1231 * If the channel event ring is managed by client, then
1232 * status_cb must be provided so that the framework can
1233 * notify pending data
1235 if (mhi_event->cl_manage && !mhi_drv->status_cb)
1238 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1241 /* Call the user provided probe function */
1242 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1246 mhi_device_put(mhi_dev);
1251 mhi_unprepare_from_transfer(mhi_dev);
1253 mhi_device_put(mhi_dev);
1258 static int mhi_driver_remove(struct device *dev)
1260 struct mhi_device *mhi_dev = to_mhi_device(dev);
1261 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1262 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1263 struct mhi_chan *mhi_chan;
1264 enum mhi_ch_state ch_state[] = {
1265 MHI_CH_STATE_DISABLED,
1266 MHI_CH_STATE_DISABLED
1270 /* Skip if it is a controller device */
1271 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1274 /* Reset both channels */
1275 for (dir = 0; dir < 2; dir++) {
1276 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1281 /* Wake all threads waiting for completion */
1282 write_lock_irq(&mhi_chan->lock);
1283 mhi_chan->ccs = MHI_EV_CC_INVALID;
1284 complete_all(&mhi_chan->completion);
1285 write_unlock_irq(&mhi_chan->lock);
1287 /* Set the channel state to disabled */
1288 mutex_lock(&mhi_chan->mutex);
1289 write_lock_irq(&mhi_chan->lock);
1290 ch_state[dir] = mhi_chan->ch_state;
1291 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1292 write_unlock_irq(&mhi_chan->lock);
1294 /* Reset the non-offload channel */
1295 if (!mhi_chan->offload_ch)
1296 mhi_reset_chan(mhi_cntrl, mhi_chan);
1298 mutex_unlock(&mhi_chan->mutex);
1301 mhi_drv->remove(mhi_dev);
1303 /* De-init channel if it was enabled */
1304 for (dir = 0; dir < 2; dir++) {
1305 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1310 mutex_lock(&mhi_chan->mutex);
1312 if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
1313 ch_state[dir] == MHI_CH_STATE_STOP) &&
1314 !mhi_chan->offload_ch)
1315 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1317 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1319 mutex_unlock(&mhi_chan->mutex);
1322 while (mhi_dev->dev_wake)
1323 mhi_device_put(mhi_dev);
1328 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1330 struct device_driver *driver = &mhi_drv->driver;
1332 if (!mhi_drv->probe || !mhi_drv->remove)
1335 driver->bus = &mhi_bus_type;
1336 driver->owner = owner;
1337 driver->probe = mhi_driver_probe;
1338 driver->remove = mhi_driver_remove;
1340 return driver_register(driver);
1342 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1344 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1346 driver_unregister(&mhi_drv->driver);
1348 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1350 static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1352 struct mhi_device *mhi_dev = to_mhi_device(dev);
1354 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1358 static int mhi_match(struct device *dev, struct device_driver *drv)
1360 struct mhi_device *mhi_dev = to_mhi_device(dev);
1361 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1362 const struct mhi_device_id *id;
1365 * If the device is a controller type then there is no client driver
1366 * associated with it
1368 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1371 for (id = mhi_drv->id_table; id->chan[0]; id++)
1372 if (!strcmp(mhi_dev->name, id->chan)) {
1380 struct bus_type mhi_bus_type = {
1384 .uevent = mhi_uevent,
1385 .dev_groups = mhi_dev_groups,
1388 static int __init mhi_init(void)
1391 return bus_register(&mhi_bus_type);
1394 static void __exit mhi_exit(void)
1397 bus_unregister(&mhi_bus_type);
1400 postcore_initcall(mhi_init);
1401 module_exit(mhi_exit);
1403 MODULE_LICENSE("GPL v2");
1404 MODULE_DESCRIPTION("MHI Host Interface");