1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/bitfield.h>
8 #include <linux/debugfs.h>
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/idr.h>
13 #include <linux/interrupt.h>
14 #include <linux/list.h>
15 #include <linux/mhi.h>
16 #include <linux/mod_devicetable.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/wait.h>
23 static DEFINE_IDA(mhi_controller_ida);
25 const char * const mhi_ee_str[MHI_EE_MAX] = {
26 [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
27 [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
28 [MHI_EE_AMSS] = "MISSION MODE",
29 [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
30 [MHI_EE_WFW] = "WLAN FIRMWARE",
31 [MHI_EE_PTHRU] = "PASS THROUGH",
32 [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
33 [MHI_EE_FP] = "FLASH PROGRAMMER",
34 [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
35 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
38 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
39 [DEV_ST_TRANSITION_PBL] = "PBL",
40 [DEV_ST_TRANSITION_READY] = "READY",
41 [DEV_ST_TRANSITION_SBL] = "SBL",
42 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
43 [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
44 [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
45 [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
48 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
49 [MHI_CH_STATE_TYPE_RESET] = "RESET",
50 [MHI_CH_STATE_TYPE_STOP] = "STOP",
51 [MHI_CH_STATE_TYPE_START] = "START",
54 static const char * const mhi_pm_state_str[] = {
55 [MHI_PM_STATE_DISABLE] = "DISABLE",
56 [MHI_PM_STATE_POR] = "POWER ON RESET",
57 [MHI_PM_STATE_M0] = "M0",
58 [MHI_PM_STATE_M2] = "M2",
59 [MHI_PM_STATE_M3_ENTER] = "M?->M3",
60 [MHI_PM_STATE_M3] = "M3",
61 [MHI_PM_STATE_M3_EXIT] = "M3->M0",
62 [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
63 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
64 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
65 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
66 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
69 const char *to_mhi_pm_state_str(u32 state)
76 if (!state || index >= ARRAY_SIZE(mhi_pm_state_str))
77 return "Invalid State";
79 return mhi_pm_state_str[index];
82 static ssize_t serial_number_show(struct device *dev,
83 struct device_attribute *attr,
86 struct mhi_device *mhi_dev = to_mhi_device(dev);
87 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
89 return sysfs_emit(buf, "Serial Number: %u\n",
90 mhi_cntrl->serial_number);
92 static DEVICE_ATTR_RO(serial_number);
94 static ssize_t oem_pk_hash_show(struct device *dev,
95 struct device_attribute *attr,
98 struct mhi_device *mhi_dev = to_mhi_device(dev);
99 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
102 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
103 cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
104 i, mhi_cntrl->oem_pk_hash[i]);
108 static DEVICE_ATTR_RO(oem_pk_hash);
110 static ssize_t soc_reset_store(struct device *dev,
111 struct device_attribute *attr,
115 struct mhi_device *mhi_dev = to_mhi_device(dev);
116 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
118 mhi_soc_reset(mhi_cntrl);
121 static DEVICE_ATTR_WO(soc_reset);
123 static struct attribute *mhi_dev_attrs[] = {
124 &dev_attr_serial_number.attr,
125 &dev_attr_oem_pk_hash.attr,
126 &dev_attr_soc_reset.attr,
129 ATTRIBUTE_GROUPS(mhi_dev);
131 /* MHI protocol requires the transfer ring to be aligned with ring length */
132 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
133 struct mhi_ring *ring,
136 ring->alloc_size = len + (len - 1);
137 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
138 &ring->dma_handle, GFP_KERNEL);
139 if (!ring->pre_aligned)
142 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
143 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
148 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
151 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
153 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
154 if (mhi_event->offload_ev)
157 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
160 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
163 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
165 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
166 struct device *dev = &mhi_cntrl->mhi_dev->dev;
167 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
170 /* if controller driver has set irq_flags, use it */
171 if (mhi_cntrl->irq_flags)
172 irq_flags = mhi_cntrl->irq_flags;
174 /* Setup BHI_INTVEC IRQ */
175 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
176 mhi_intvec_threaded_handler,
182 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
183 if (mhi_event->offload_ev)
186 if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
187 dev_err(dev, "irq %d not available for event ring\n",
193 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
198 dev_err(dev, "Error requesting irq:%d for ev:%d\n",
199 mhi_cntrl->irq[mhi_event->irq], i);
207 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
208 if (mhi_event->offload_ev)
211 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
213 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
218 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
221 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
222 struct mhi_cmd *mhi_cmd;
223 struct mhi_event *mhi_event;
224 struct mhi_ring *ring;
226 mhi_cmd = mhi_cntrl->mhi_cmd;
227 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
228 ring = &mhi_cmd->ring;
229 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
230 ring->pre_aligned, ring->dma_handle);
232 ring->iommu_base = 0;
235 dma_free_coherent(mhi_cntrl->cntrl_dev,
236 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
237 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
239 mhi_event = mhi_cntrl->mhi_event;
240 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
241 if (mhi_event->offload_ev)
244 ring = &mhi_event->ring;
245 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
246 ring->pre_aligned, ring->dma_handle);
248 ring->iommu_base = 0;
251 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
252 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
253 mhi_ctxt->er_ctxt_addr);
255 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
256 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
257 mhi_ctxt->chan_ctxt_addr);
260 mhi_cntrl->mhi_ctxt = NULL;
263 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
265 struct mhi_ctxt *mhi_ctxt;
266 struct mhi_chan_ctxt *chan_ctxt;
267 struct mhi_event_ctxt *er_ctxt;
268 struct mhi_cmd_ctxt *cmd_ctxt;
269 struct mhi_chan *mhi_chan;
270 struct mhi_event *mhi_event;
271 struct mhi_cmd *mhi_cmd;
273 int ret = -ENOMEM, i;
275 atomic_set(&mhi_cntrl->dev_wake, 0);
276 atomic_set(&mhi_cntrl->pending_pkts, 0);
278 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
282 /* Setup channel ctxt */
283 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
284 sizeof(*mhi_ctxt->chan_ctxt) *
286 &mhi_ctxt->chan_ctxt_addr,
288 if (!mhi_ctxt->chan_ctxt)
289 goto error_alloc_chan_ctxt;
291 mhi_chan = mhi_cntrl->mhi_chan;
292 chan_ctxt = mhi_ctxt->chan_ctxt;
293 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
294 /* Skip if it is an offload channel */
295 if (mhi_chan->offload_ch)
298 tmp = le32_to_cpu(chan_ctxt->chcfg);
299 tmp &= ~CHAN_CTX_CHSTATE_MASK;
300 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
301 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
302 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode);
303 tmp &= ~CHAN_CTX_POLLCFG_MASK;
304 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg);
305 chan_ctxt->chcfg = cpu_to_le32(tmp);
307 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type);
308 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index);
310 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
311 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
314 /* Setup event context */
315 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
316 sizeof(*mhi_ctxt->er_ctxt) *
317 mhi_cntrl->total_ev_rings,
318 &mhi_ctxt->er_ctxt_addr,
320 if (!mhi_ctxt->er_ctxt)
321 goto error_alloc_er_ctxt;
323 er_ctxt = mhi_ctxt->er_ctxt;
324 mhi_event = mhi_cntrl->mhi_event;
325 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
327 struct mhi_ring *ring = &mhi_event->ring;
329 /* Skip if it is an offload event */
330 if (mhi_event->offload_ev)
333 tmp = le32_to_cpu(er_ctxt->intmod);
334 tmp &= ~EV_CTX_INTMODC_MASK;
335 tmp &= ~EV_CTX_INTMODT_MASK;
336 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod);
337 er_ctxt->intmod = cpu_to_le32(tmp);
339 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID);
340 er_ctxt->msivec = cpu_to_le32(mhi_event->irq);
341 mhi_event->db_cfg.db_mode = true;
343 ring->el_size = sizeof(struct mhi_ring_element);
344 ring->len = ring->el_size * ring->elements;
345 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
350 * If the read pointer equals to the write pointer, then the
353 ring->rp = ring->wp = ring->base;
354 er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
355 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
356 er_ctxt->rlen = cpu_to_le64(ring->len);
357 ring->ctxt_wp = &er_ctxt->wp;
360 /* Setup cmd context */
362 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
363 sizeof(*mhi_ctxt->cmd_ctxt) *
365 &mhi_ctxt->cmd_ctxt_addr,
367 if (!mhi_ctxt->cmd_ctxt)
370 mhi_cmd = mhi_cntrl->mhi_cmd;
371 cmd_ctxt = mhi_ctxt->cmd_ctxt;
372 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
373 struct mhi_ring *ring = &mhi_cmd->ring;
375 ring->el_size = sizeof(struct mhi_ring_element);
376 ring->elements = CMD_EL_PER_RING;
377 ring->len = ring->el_size * ring->elements;
378 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
380 goto error_alloc_cmd;
382 ring->rp = ring->wp = ring->base;
383 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base);
384 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
385 cmd_ctxt->rlen = cpu_to_le64(ring->len);
386 ring->ctxt_wp = &cmd_ctxt->wp;
389 mhi_cntrl->mhi_ctxt = mhi_ctxt;
394 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
395 struct mhi_ring *ring = &mhi_cmd->ring;
397 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
398 ring->pre_aligned, ring->dma_handle);
400 dma_free_coherent(mhi_cntrl->cntrl_dev,
401 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
402 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
403 i = mhi_cntrl->total_ev_rings;
404 mhi_event = mhi_cntrl->mhi_event + i;
407 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
408 struct mhi_ring *ring = &mhi_event->ring;
410 if (mhi_event->offload_ev)
413 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
414 ring->pre_aligned, ring->dma_handle);
416 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) *
417 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
418 mhi_ctxt->er_ctxt_addr);
421 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) *
422 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
423 mhi_ctxt->chan_ctxt_addr);
425 error_alloc_chan_ctxt:
431 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
435 struct mhi_chan *mhi_chan;
436 struct mhi_event *mhi_event;
437 void __iomem *base = mhi_cntrl->regs;
438 struct device *dev = &mhi_cntrl->mhi_dev->dev;
445 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
449 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
453 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
457 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
461 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
465 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
469 upper_32_bits(mhi_cntrl->iova_start),
473 lower_32_bits(mhi_cntrl->iova_start),
477 upper_32_bits(mhi_cntrl->iova_start),
481 lower_32_bits(mhi_cntrl->iova_start),
485 upper_32_bits(mhi_cntrl->iova_stop),
489 lower_32_bits(mhi_cntrl->iova_stop),
493 upper_32_bits(mhi_cntrl->iova_stop),
497 lower_32_bits(mhi_cntrl->iova_stop),
502 dev_dbg(dev, "Initializing MHI registers\n");
504 /* Read channel db offset */
505 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val);
507 dev_err(dev, "Unable to read CHDBOFF register\n");
512 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
513 mhi_cntrl->wake_set = false;
515 /* Setup channel db address for each channel in tre_ring */
516 mhi_chan = mhi_cntrl->mhi_chan;
517 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
518 mhi_chan->tre_ring.db_addr = base + val;
520 /* Read event ring db offset */
521 ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val);
523 dev_err(dev, "Unable to read ERDBOFF register\n");
527 /* Setup event db address for each ev_ring */
528 mhi_event = mhi_cntrl->mhi_event;
529 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
530 if (mhi_event->offload_ev)
533 mhi_event->ring.db_addr = base + val;
536 /* Setup DB register for primary CMD rings */
537 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
539 /* Write to MMIO registers */
540 for (i = 0; reg_info[i].offset; i++)
541 mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
544 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
545 mhi_cntrl->total_ev_rings);
547 dev_err(dev, "Unable to write MHICFG register\n");
551 ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
552 mhi_cntrl->hw_ev_rings);
554 dev_err(dev, "Unable to write MHICFG register\n");
561 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
562 struct mhi_chan *mhi_chan)
564 struct mhi_ring *buf_ring;
565 struct mhi_ring *tre_ring;
566 struct mhi_chan_ctxt *chan_ctxt;
569 buf_ring = &mhi_chan->buf_ring;
570 tre_ring = &mhi_chan->tre_ring;
571 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
573 if (!chan_ctxt->rbase) /* Already uninitialized */
576 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
577 tre_ring->pre_aligned, tre_ring->dma_handle);
578 vfree(buf_ring->base);
580 buf_ring->base = tre_ring->base = NULL;
581 tre_ring->ctxt_wp = NULL;
582 chan_ctxt->rbase = 0;
587 tmp = le32_to_cpu(chan_ctxt->chcfg);
588 tmp &= ~CHAN_CTX_CHSTATE_MASK;
589 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED);
590 chan_ctxt->chcfg = cpu_to_le32(tmp);
592 /* Update to all cores */
596 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
597 struct mhi_chan *mhi_chan)
599 struct mhi_ring *buf_ring;
600 struct mhi_ring *tre_ring;
601 struct mhi_chan_ctxt *chan_ctxt;
605 buf_ring = &mhi_chan->buf_ring;
606 tre_ring = &mhi_chan->tre_ring;
607 tre_ring->el_size = sizeof(struct mhi_ring_element);
608 tre_ring->len = tre_ring->el_size * tre_ring->elements;
609 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
610 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
614 buf_ring->el_size = sizeof(struct mhi_buf_info);
615 buf_ring->len = buf_ring->el_size * buf_ring->elements;
616 buf_ring->base = vzalloc(buf_ring->len);
618 if (!buf_ring->base) {
619 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size,
620 tre_ring->pre_aligned, tre_ring->dma_handle);
624 tmp = le32_to_cpu(chan_ctxt->chcfg);
625 tmp &= ~CHAN_CTX_CHSTATE_MASK;
626 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED);
627 chan_ctxt->chcfg = cpu_to_le32(tmp);
629 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base);
630 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
631 chan_ctxt->rlen = cpu_to_le64(tre_ring->len);
632 tre_ring->ctxt_wp = &chan_ctxt->wp;
634 tre_ring->rp = tre_ring->wp = tre_ring->base;
635 buf_ring->rp = buf_ring->wp = buf_ring->base;
636 mhi_chan->db_cfg.db_mode = 1;
638 /* Update to all cores */
644 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
645 const struct mhi_controller_config *config)
647 struct mhi_event *mhi_event;
648 const struct mhi_event_config *event_cfg;
649 struct device *dev = mhi_cntrl->cntrl_dev;
652 num = config->num_events;
653 mhi_cntrl->total_ev_rings = num;
654 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
656 if (!mhi_cntrl->mhi_event)
659 /* Populate event ring */
660 mhi_event = mhi_cntrl->mhi_event;
661 for (i = 0; i < num; i++) {
662 event_cfg = &config->event_cfg[i];
664 mhi_event->er_index = i;
665 mhi_event->ring.elements = event_cfg->num_elements;
666 mhi_event->intmod = event_cfg->irq_moderation_ms;
667 mhi_event->irq = event_cfg->irq;
669 if (event_cfg->channel != U32_MAX) {
670 /* This event ring has a dedicated channel */
671 mhi_event->chan = event_cfg->channel;
672 if (mhi_event->chan >= mhi_cntrl->max_chan) {
674 "Event Ring channel not available\n");
678 mhi_event->mhi_chan =
679 &mhi_cntrl->mhi_chan[mhi_event->chan];
682 /* Priority is fixed to 1 for now */
683 mhi_event->priority = 1;
685 mhi_event->db_cfg.brstmode = event_cfg->mode;
686 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
689 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
690 mhi_event->db_cfg.process_db = mhi_db_brstmode;
692 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
694 mhi_event->data_type = event_cfg->data_type;
696 switch (mhi_event->data_type) {
698 mhi_event->process_event = mhi_process_data_event_ring;
701 mhi_event->process_event = mhi_process_ctrl_ev_ring;
704 dev_err(dev, "Event Ring type not supported\n");
708 mhi_event->hw_ring = event_cfg->hardware_event;
709 if (mhi_event->hw_ring)
710 mhi_cntrl->hw_ev_rings++;
712 mhi_cntrl->sw_ev_rings++;
714 mhi_event->cl_manage = event_cfg->client_managed;
715 mhi_event->offload_ev = event_cfg->offload_channel;
723 kfree(mhi_cntrl->mhi_event);
727 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
728 const struct mhi_controller_config *config)
730 const struct mhi_channel_config *ch_cfg;
731 struct device *dev = mhi_cntrl->cntrl_dev;
735 mhi_cntrl->max_chan = config->max_channels;
738 * The allocation of MHI channels can exceed 32KB in some scenarios,
739 * so to avoid any memory possible allocation failures, vzalloc is
742 mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
743 sizeof(*mhi_cntrl->mhi_chan));
744 if (!mhi_cntrl->mhi_chan)
747 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
749 /* Populate channel configurations */
750 for (i = 0; i < config->num_channels; i++) {
751 struct mhi_chan *mhi_chan;
753 ch_cfg = &config->ch_cfg[i];
756 if (chan >= mhi_cntrl->max_chan) {
757 dev_err(dev, "Channel %d not available\n", chan);
761 mhi_chan = &mhi_cntrl->mhi_chan[chan];
762 mhi_chan->name = ch_cfg->name;
763 mhi_chan->chan = chan;
765 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
766 if (!mhi_chan->tre_ring.elements)
770 * For some channels, local ring length should be bigger than
771 * the transfer ring length due to internal logical channels
772 * in device. So host can queue much more buffers than transfer
773 * ring length. Example, RSC channels should have a larger local
774 * channel length than transfer ring length.
776 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
777 if (!mhi_chan->buf_ring.elements)
778 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
779 mhi_chan->er_index = ch_cfg->event_ring;
780 mhi_chan->dir = ch_cfg->dir;
783 * For most channels, chtype is identical to channel directions.
784 * So, if it is not defined then assign channel direction to
787 mhi_chan->type = ch_cfg->type;
789 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
791 mhi_chan->ee_mask = ch_cfg->ee_mask;
792 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
793 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
794 mhi_chan->offload_ch = ch_cfg->offload_channel;
795 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
796 mhi_chan->pre_alloc = ch_cfg->auto_queue;
797 mhi_chan->wake_capable = ch_cfg->wake_capable;
800 * If MHI host allocates buffers, then the channel direction
801 * should be DMA_FROM_DEVICE
803 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
804 dev_err(dev, "Invalid channel configuration\n");
809 * Bi-directional and direction less channel must be an
812 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
813 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
814 dev_err(dev, "Invalid channel configuration\n");
818 if (!mhi_chan->offload_ch) {
819 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
820 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
821 dev_err(dev, "Invalid Door bell mode\n");
826 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
827 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
829 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
831 mhi_chan->configured = true;
833 if (mhi_chan->lpm_notify)
834 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
840 vfree(mhi_cntrl->mhi_chan);
845 static int parse_config(struct mhi_controller *mhi_cntrl,
846 const struct mhi_controller_config *config)
850 /* Parse MHI channel configuration */
851 ret = parse_ch_cfg(mhi_cntrl, config);
855 /* Parse MHI event configuration */
856 ret = parse_ev_cfg(mhi_cntrl, config);
860 mhi_cntrl->timeout_ms = config->timeout_ms;
861 if (!mhi_cntrl->timeout_ms)
862 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
864 mhi_cntrl->bounce_buf = config->use_bounce_buf;
865 mhi_cntrl->buffer_len = config->buf_len;
866 if (!mhi_cntrl->buffer_len)
867 mhi_cntrl->buffer_len = MHI_MAX_MTU;
869 /* By default, host is allowed to ring DB in both M0 and M2 states */
870 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
871 if (config->m2_no_db)
872 mhi_cntrl->db_access &= ~MHI_PM_M2;
877 vfree(mhi_cntrl->mhi_chan);
882 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
883 const struct mhi_controller_config *config)
885 struct mhi_event *mhi_event;
886 struct mhi_chan *mhi_chan;
887 struct mhi_cmd *mhi_cmd;
888 struct mhi_device *mhi_dev;
892 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
893 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
894 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
895 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs ||
896 !mhi_cntrl->irq || !mhi_cntrl->reg_len)
899 ret = parse_config(mhi_cntrl, config);
903 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
904 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
905 if (!mhi_cntrl->mhi_cmd) {
910 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
911 mutex_init(&mhi_cntrl->pm_mutex);
912 rwlock_init(&mhi_cntrl->pm_lock);
913 spin_lock_init(&mhi_cntrl->transition_lock);
914 spin_lock_init(&mhi_cntrl->wlock);
915 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
916 init_waitqueue_head(&mhi_cntrl->state_event);
918 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI);
919 if (!mhi_cntrl->hiprio_wq) {
920 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
925 mhi_cmd = mhi_cntrl->mhi_cmd;
926 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
927 spin_lock_init(&mhi_cmd->lock);
929 mhi_event = mhi_cntrl->mhi_event;
930 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
931 /* Skip for offload events */
932 if (mhi_event->offload_ev)
935 mhi_event->mhi_cntrl = mhi_cntrl;
936 spin_lock_init(&mhi_event->lock);
937 if (mhi_event->data_type == MHI_ER_CTRL)
938 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
941 tasklet_init(&mhi_event->task, mhi_ev_task,
945 mhi_chan = mhi_cntrl->mhi_chan;
946 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
947 mutex_init(&mhi_chan->mutex);
948 init_completion(&mhi_chan->completion);
949 rwlock_init(&mhi_chan->lock);
951 /* used in setting bei field of TRE */
952 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
953 mhi_chan->intmod = mhi_event->intmod;
956 if (mhi_cntrl->bounce_buf) {
957 mhi_cntrl->map_single = mhi_map_single_use_bb;
958 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
960 mhi_cntrl->map_single = mhi_map_single_no_bb;
961 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
964 /* Read the MHI device info */
965 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
966 SOC_HW_VERSION_OFFS, &soc_info);
970 mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
971 mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
972 mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
973 mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
975 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
976 if (mhi_cntrl->index < 0) {
977 ret = mhi_cntrl->index;
981 /* Register controller with MHI bus */
982 mhi_dev = mhi_alloc_device(mhi_cntrl);
983 if (IS_ERR(mhi_dev)) {
984 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
985 ret = PTR_ERR(mhi_dev);
989 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
990 mhi_dev->mhi_cntrl = mhi_cntrl;
991 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index);
992 mhi_dev->name = dev_name(&mhi_dev->dev);
994 /* Init wakeup source */
995 device_init_wakeup(&mhi_dev->dev, true);
997 ret = device_add(&mhi_dev->dev);
999 goto err_release_dev;
1001 mhi_cntrl->mhi_dev = mhi_dev;
1003 mhi_create_debugfs(mhi_cntrl);
1008 put_device(&mhi_dev->dev);
1010 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1012 destroy_workqueue(mhi_cntrl->hiprio_wq);
1014 kfree(mhi_cntrl->mhi_cmd);
1016 kfree(mhi_cntrl->mhi_event);
1017 vfree(mhi_cntrl->mhi_chan);
1021 EXPORT_SYMBOL_GPL(mhi_register_controller);
1023 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
1025 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
1026 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
1029 mhi_destroy_debugfs(mhi_cntrl);
1031 destroy_workqueue(mhi_cntrl->hiprio_wq);
1032 kfree(mhi_cntrl->mhi_cmd);
1033 kfree(mhi_cntrl->mhi_event);
1035 /* Drop the references to MHI devices created for channels */
1036 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
1037 if (!mhi_chan->mhi_dev)
1040 put_device(&mhi_chan->mhi_dev->dev);
1042 vfree(mhi_cntrl->mhi_chan);
1044 device_del(&mhi_dev->dev);
1045 put_device(&mhi_dev->dev);
1047 ida_free(&mhi_controller_ida, mhi_cntrl->index);
1049 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
1051 struct mhi_controller *mhi_alloc_controller(void)
1053 struct mhi_controller *mhi_cntrl;
1055 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL);
1059 EXPORT_SYMBOL_GPL(mhi_alloc_controller);
1061 void mhi_free_controller(struct mhi_controller *mhi_cntrl)
1065 EXPORT_SYMBOL_GPL(mhi_free_controller);
1067 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
1069 struct device *dev = &mhi_cntrl->mhi_dev->dev;
1070 u32 bhi_off, bhie_off;
1073 mutex_lock(&mhi_cntrl->pm_mutex);
1075 ret = mhi_init_dev_ctxt(mhi_cntrl);
1077 goto error_dev_ctxt;
1079 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off);
1081 dev_err(dev, "Error getting BHI offset\n");
1082 goto error_reg_offset;
1085 if (bhi_off >= mhi_cntrl->reg_len) {
1086 dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
1087 bhi_off, mhi_cntrl->reg_len);
1089 goto error_reg_offset;
1091 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
1093 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) {
1094 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
1097 dev_err(dev, "Error getting BHIE offset\n");
1098 goto error_reg_offset;
1101 if (bhie_off >= mhi_cntrl->reg_len) {
1103 "BHIe offset: 0x%x is out of range: 0x%zx\n",
1104 bhie_off, mhi_cntrl->reg_len);
1106 goto error_reg_offset;
1108 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
1111 if (mhi_cntrl->rddm_size) {
1113 * This controller supports RDDM, so we need to manually clear
1114 * BHIE RX registers since POR values are undefined.
1116 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
1117 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
1120 * Allocate RDDM table for debugging purpose if specified
1122 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
1123 mhi_cntrl->rddm_size);
1124 if (mhi_cntrl->rddm_image) {
1125 ret = mhi_rddm_prepare(mhi_cntrl,
1126 mhi_cntrl->rddm_image);
1128 mhi_free_bhie_table(mhi_cntrl,
1129 mhi_cntrl->rddm_image);
1130 goto error_reg_offset;
1135 mutex_unlock(&mhi_cntrl->pm_mutex);
1140 mhi_deinit_dev_ctxt(mhi_cntrl);
1143 mutex_unlock(&mhi_cntrl->pm_mutex);
1147 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1149 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1151 if (mhi_cntrl->fbc_image) {
1152 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1153 mhi_cntrl->fbc_image = NULL;
1156 if (mhi_cntrl->rddm_image) {
1157 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1158 mhi_cntrl->rddm_image = NULL;
1161 mhi_cntrl->bhi = NULL;
1162 mhi_cntrl->bhie = NULL;
1164 mhi_deinit_dev_ctxt(mhi_cntrl);
1166 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1168 static void mhi_release_device(struct device *dev)
1170 struct mhi_device *mhi_dev = to_mhi_device(dev);
1173 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1174 * devices for the channels will only get created if the mhi_dev
1175 * associated with it is NULL. This scenario will happen during the
1176 * controller suspend and resume.
1178 if (mhi_dev->ul_chan)
1179 mhi_dev->ul_chan->mhi_dev = NULL;
1181 if (mhi_dev->dl_chan)
1182 mhi_dev->dl_chan->mhi_dev = NULL;
1187 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1189 struct mhi_device *mhi_dev;
1192 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1194 return ERR_PTR(-ENOMEM);
1196 dev = &mhi_dev->dev;
1197 device_initialize(dev);
1198 dev->bus = &mhi_bus_type;
1199 dev->release = mhi_release_device;
1201 if (mhi_cntrl->mhi_dev) {
1202 /* for MHI client devices, parent is the MHI controller device */
1203 dev->parent = &mhi_cntrl->mhi_dev->dev;
1205 /* for MHI controller device, parent is the bus device (e.g. pci device) */
1206 dev->parent = mhi_cntrl->cntrl_dev;
1209 mhi_dev->mhi_cntrl = mhi_cntrl;
1210 mhi_dev->dev_wake = 0;
1215 static int mhi_driver_probe(struct device *dev)
1217 struct mhi_device *mhi_dev = to_mhi_device(dev);
1218 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1219 struct device_driver *drv = dev->driver;
1220 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1221 struct mhi_event *mhi_event;
1222 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1223 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1226 /* Bring device out of LPM */
1227 ret = mhi_device_get_sync(mhi_dev);
1235 * If channel supports LPM notifications then status_cb should
1238 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1241 /* For non-offload channels then xfer_cb should be provided */
1242 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1245 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1251 * If channel supports LPM notifications then status_cb should
1254 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1257 /* For non-offload channels then xfer_cb should be provided */
1258 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1261 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1264 * If the channel event ring is managed by client, then
1265 * status_cb must be provided so that the framework can
1266 * notify pending data
1268 if (mhi_event->cl_manage && !mhi_drv->status_cb)
1271 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1274 /* Call the user provided probe function */
1275 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1279 mhi_device_put(mhi_dev);
1284 mhi_unprepare_from_transfer(mhi_dev);
1286 mhi_device_put(mhi_dev);
1291 static int mhi_driver_remove(struct device *dev)
1293 struct mhi_device *mhi_dev = to_mhi_device(dev);
1294 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1295 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1296 struct mhi_chan *mhi_chan;
1297 enum mhi_ch_state ch_state[] = {
1298 MHI_CH_STATE_DISABLED,
1299 MHI_CH_STATE_DISABLED
1303 /* Skip if it is a controller device */
1304 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1307 /* Reset both channels */
1308 for (dir = 0; dir < 2; dir++) {
1309 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1314 /* Wake all threads waiting for completion */
1315 write_lock_irq(&mhi_chan->lock);
1316 mhi_chan->ccs = MHI_EV_CC_INVALID;
1317 complete_all(&mhi_chan->completion);
1318 write_unlock_irq(&mhi_chan->lock);
1320 /* Set the channel state to disabled */
1321 mutex_lock(&mhi_chan->mutex);
1322 write_lock_irq(&mhi_chan->lock);
1323 ch_state[dir] = mhi_chan->ch_state;
1324 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1325 write_unlock_irq(&mhi_chan->lock);
1327 /* Reset the non-offload channel */
1328 if (!mhi_chan->offload_ch)
1329 mhi_reset_chan(mhi_cntrl, mhi_chan);
1331 mutex_unlock(&mhi_chan->mutex);
1334 mhi_drv->remove(mhi_dev);
1336 /* De-init channel if it was enabled */
1337 for (dir = 0; dir < 2; dir++) {
1338 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1343 mutex_lock(&mhi_chan->mutex);
1345 if ((ch_state[dir] == MHI_CH_STATE_ENABLED ||
1346 ch_state[dir] == MHI_CH_STATE_STOP) &&
1347 !mhi_chan->offload_ch)
1348 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1350 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1352 mutex_unlock(&mhi_chan->mutex);
1355 while (mhi_dev->dev_wake)
1356 mhi_device_put(mhi_dev);
1361 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1363 struct device_driver *driver = &mhi_drv->driver;
1365 if (!mhi_drv->probe || !mhi_drv->remove)
1368 driver->bus = &mhi_bus_type;
1369 driver->owner = owner;
1370 driver->probe = mhi_driver_probe;
1371 driver->remove = mhi_driver_remove;
1373 return driver_register(driver);
1375 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1377 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1379 driver_unregister(&mhi_drv->driver);
1381 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1383 static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1385 struct mhi_device *mhi_dev = to_mhi_device(dev);
1387 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1391 static int mhi_match(struct device *dev, struct device_driver *drv)
1393 struct mhi_device *mhi_dev = to_mhi_device(dev);
1394 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1395 const struct mhi_device_id *id;
1398 * If the device is a controller type then there is no client driver
1399 * associated with it
1401 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1404 for (id = mhi_drv->id_table; id->chan[0]; id++)
1405 if (!strcmp(mhi_dev->name, id->chan)) {
1413 struct bus_type mhi_bus_type = {
1417 .uevent = mhi_uevent,
1418 .dev_groups = mhi_dev_groups,
1421 static int __init mhi_init(void)
1424 return bus_register(&mhi_bus_type);
1427 static void __exit mhi_exit(void)
1430 bus_unregister(&mhi_bus_type);
1433 postcore_initcall(mhi_init);
1434 module_exit(mhi_exit);
1436 MODULE_LICENSE("GPL v2");
1437 MODULE_DESCRIPTION("MHI Host Interface");