1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/wait.h>
20 const char * const mhi_ee_str[MHI_EE_MAX] = {
23 [MHI_EE_AMSS] = "AMSS",
24 [MHI_EE_RDDM] = "RDDM",
26 [MHI_EE_PTHRU] = "PASS THRU",
28 [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
29 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
32 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
33 [DEV_ST_TRANSITION_PBL] = "PBL",
34 [DEV_ST_TRANSITION_READY] = "READY",
35 [DEV_ST_TRANSITION_SBL] = "SBL",
36 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
39 const char * const mhi_state_str[MHI_STATE_MAX] = {
40 [MHI_STATE_RESET] = "RESET",
41 [MHI_STATE_READY] = "READY",
42 [MHI_STATE_M0] = "M0",
43 [MHI_STATE_M1] = "M1",
44 [MHI_STATE_M2] = "M2",
45 [MHI_STATE_M3] = "M3",
46 [MHI_STATE_M3_FAST] = "M3_FAST",
47 [MHI_STATE_BHI] = "BHI",
48 [MHI_STATE_SYS_ERR] = "SYS_ERR",
51 static const char * const mhi_pm_state_str[] = {
52 [MHI_PM_STATE_DISABLE] = "DISABLE",
53 [MHI_PM_STATE_POR] = "POR",
54 [MHI_PM_STATE_M0] = "M0",
55 [MHI_PM_STATE_M2] = "M2",
56 [MHI_PM_STATE_M3_ENTER] = "M?->M3",
57 [MHI_PM_STATE_M3] = "M3",
58 [MHI_PM_STATE_M3_EXIT] = "M3->M0",
59 [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
60 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
61 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
62 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
63 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
66 const char *to_mhi_pm_state_str(enum mhi_pm_state state)
68 int index = find_last_bit((unsigned long *)&state, 32);
70 if (index >= ARRAY_SIZE(mhi_pm_state_str))
71 return "Invalid State";
73 return mhi_pm_state_str[index];
76 /* MHI protocol requires the transfer ring to be aligned with ring length */
77 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
78 struct mhi_ring *ring,
81 ring->alloc_size = len + (len - 1);
82 ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
83 &ring->dma_handle, GFP_KERNEL);
84 if (!ring->pre_aligned)
87 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
88 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
93 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
96 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
98 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
99 if (mhi_event->offload_ev)
102 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
105 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
108 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
110 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
111 struct device *dev = &mhi_cntrl->mhi_dev->dev;
114 /* Setup BHI_INTVEC IRQ */
115 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
116 mhi_intvec_threaded_handler,
117 IRQF_SHARED | IRQF_NO_SUSPEND,
122 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
123 if (mhi_event->offload_ev)
126 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
128 IRQF_SHARED | IRQF_NO_SUSPEND,
131 dev_err(dev, "Error requesting irq:%d for ev:%d\n",
132 mhi_cntrl->irq[mhi_event->irq], i);
140 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
141 if (mhi_event->offload_ev)
144 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
146 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
151 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
154 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
155 struct mhi_cmd *mhi_cmd;
156 struct mhi_event *mhi_event;
157 struct mhi_ring *ring;
159 mhi_cmd = mhi_cntrl->mhi_cmd;
160 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
161 ring = &mhi_cmd->ring;
162 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
163 ring->pre_aligned, ring->dma_handle);
165 ring->iommu_base = 0;
168 mhi_free_coherent(mhi_cntrl,
169 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
170 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
172 mhi_event = mhi_cntrl->mhi_event;
173 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
174 if (mhi_event->offload_ev)
177 ring = &mhi_event->ring;
178 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
179 ring->pre_aligned, ring->dma_handle);
181 ring->iommu_base = 0;
184 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
185 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
186 mhi_ctxt->er_ctxt_addr);
188 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
189 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
190 mhi_ctxt->chan_ctxt_addr);
193 mhi_cntrl->mhi_ctxt = NULL;
196 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
198 struct mhi_ctxt *mhi_ctxt;
199 struct mhi_chan_ctxt *chan_ctxt;
200 struct mhi_event_ctxt *er_ctxt;
201 struct mhi_cmd_ctxt *cmd_ctxt;
202 struct mhi_chan *mhi_chan;
203 struct mhi_event *mhi_event;
204 struct mhi_cmd *mhi_cmd;
206 int ret = -ENOMEM, i;
208 atomic_set(&mhi_cntrl->dev_wake, 0);
209 atomic_set(&mhi_cntrl->pending_pkts, 0);
211 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
215 /* Setup channel ctxt */
216 mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
217 sizeof(*mhi_ctxt->chan_ctxt) *
219 &mhi_ctxt->chan_ctxt_addr,
221 if (!mhi_ctxt->chan_ctxt)
222 goto error_alloc_chan_ctxt;
224 mhi_chan = mhi_cntrl->mhi_chan;
225 chan_ctxt = mhi_ctxt->chan_ctxt;
226 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
227 /* Skip if it is an offload channel */
228 if (mhi_chan->offload_ch)
231 tmp = chan_ctxt->chcfg;
232 tmp &= ~CHAN_CTX_CHSTATE_MASK;
233 tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
234 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
235 tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
236 tmp &= ~CHAN_CTX_POLLCFG_MASK;
237 tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
238 chan_ctxt->chcfg = tmp;
240 chan_ctxt->chtype = mhi_chan->type;
241 chan_ctxt->erindex = mhi_chan->er_index;
243 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
244 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
247 /* Setup event context */
248 mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
249 sizeof(*mhi_ctxt->er_ctxt) *
250 mhi_cntrl->total_ev_rings,
251 &mhi_ctxt->er_ctxt_addr,
253 if (!mhi_ctxt->er_ctxt)
254 goto error_alloc_er_ctxt;
256 er_ctxt = mhi_ctxt->er_ctxt;
257 mhi_event = mhi_cntrl->mhi_event;
258 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
260 struct mhi_ring *ring = &mhi_event->ring;
262 /* Skip if it is an offload event */
263 if (mhi_event->offload_ev)
266 tmp = er_ctxt->intmod;
267 tmp &= ~EV_CTX_INTMODC_MASK;
268 tmp &= ~EV_CTX_INTMODT_MASK;
269 tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
270 er_ctxt->intmod = tmp;
272 er_ctxt->ertype = MHI_ER_TYPE_VALID;
273 er_ctxt->msivec = mhi_event->irq;
274 mhi_event->db_cfg.db_mode = true;
276 ring->el_size = sizeof(struct mhi_tre);
277 ring->len = ring->el_size * ring->elements;
278 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
283 * If the read pointer equals to the write pointer, then the
286 ring->rp = ring->wp = ring->base;
287 er_ctxt->rbase = ring->iommu_base;
288 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
289 er_ctxt->rlen = ring->len;
290 ring->ctxt_wp = &er_ctxt->wp;
293 /* Setup cmd context */
295 mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
296 sizeof(*mhi_ctxt->cmd_ctxt) *
298 &mhi_ctxt->cmd_ctxt_addr,
300 if (!mhi_ctxt->cmd_ctxt)
303 mhi_cmd = mhi_cntrl->mhi_cmd;
304 cmd_ctxt = mhi_ctxt->cmd_ctxt;
305 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
306 struct mhi_ring *ring = &mhi_cmd->ring;
308 ring->el_size = sizeof(struct mhi_tre);
309 ring->elements = CMD_EL_PER_RING;
310 ring->len = ring->el_size * ring->elements;
311 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
313 goto error_alloc_cmd;
315 ring->rp = ring->wp = ring->base;
316 cmd_ctxt->rbase = ring->iommu_base;
317 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
318 cmd_ctxt->rlen = ring->len;
319 ring->ctxt_wp = &cmd_ctxt->wp;
322 mhi_cntrl->mhi_ctxt = mhi_ctxt;
327 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
328 struct mhi_ring *ring = &mhi_cmd->ring;
330 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
331 ring->pre_aligned, ring->dma_handle);
333 mhi_free_coherent(mhi_cntrl,
334 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
335 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
336 i = mhi_cntrl->total_ev_rings;
337 mhi_event = mhi_cntrl->mhi_event + i;
340 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
341 struct mhi_ring *ring = &mhi_event->ring;
343 if (mhi_event->offload_ev)
346 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
347 ring->pre_aligned, ring->dma_handle);
349 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
350 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
351 mhi_ctxt->er_ctxt_addr);
354 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
355 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
356 mhi_ctxt->chan_ctxt_addr);
358 error_alloc_chan_ctxt:
364 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
368 struct mhi_chan *mhi_chan;
369 struct mhi_event *mhi_event;
370 void __iomem *base = mhi_cntrl->regs;
371 struct device *dev = &mhi_cntrl->mhi_dev->dev;
379 CCABAP_HIGHER, U32_MAX, 0,
380 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
383 CCABAP_LOWER, U32_MAX, 0,
384 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
387 ECABAP_HIGHER, U32_MAX, 0,
388 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
391 ECABAP_LOWER, U32_MAX, 0,
392 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
395 CRCBAP_HIGHER, U32_MAX, 0,
396 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
399 CRCBAP_LOWER, U32_MAX, 0,
400 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
403 MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
404 mhi_cntrl->total_ev_rings,
407 MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
408 mhi_cntrl->hw_ev_rings,
411 MHICTRLBASE_HIGHER, U32_MAX, 0,
412 upper_32_bits(mhi_cntrl->iova_start),
415 MHICTRLBASE_LOWER, U32_MAX, 0,
416 lower_32_bits(mhi_cntrl->iova_start),
419 MHIDATABASE_HIGHER, U32_MAX, 0,
420 upper_32_bits(mhi_cntrl->iova_start),
423 MHIDATABASE_LOWER, U32_MAX, 0,
424 lower_32_bits(mhi_cntrl->iova_start),
427 MHICTRLLIMIT_HIGHER, U32_MAX, 0,
428 upper_32_bits(mhi_cntrl->iova_stop),
431 MHICTRLLIMIT_LOWER, U32_MAX, 0,
432 lower_32_bits(mhi_cntrl->iova_stop),
435 MHIDATALIMIT_HIGHER, U32_MAX, 0,
436 upper_32_bits(mhi_cntrl->iova_stop),
439 MHIDATALIMIT_LOWER, U32_MAX, 0,
440 lower_32_bits(mhi_cntrl->iova_stop),
445 dev_dbg(dev, "Initializing MHI registers\n");
447 /* Read channel db offset */
448 ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
449 CHDBOFF_CHDBOFF_SHIFT, &val);
451 dev_err(dev, "Unable to read CHDBOFF register\n");
456 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
457 mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
458 mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
459 mhi_cntrl->wake_set = false;
461 /* Setup channel db address for each channel in tre_ring */
462 mhi_chan = mhi_cntrl->mhi_chan;
463 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
464 mhi_chan->tre_ring.db_addr = base + val;
466 /* Read event ring db offset */
467 ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
468 ERDBOFF_ERDBOFF_SHIFT, &val);
470 dev_err(dev, "Unable to read ERDBOFF register\n");
474 /* Setup event db address for each ev_ring */
475 mhi_event = mhi_cntrl->mhi_event;
476 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
477 if (mhi_event->offload_ev)
480 mhi_event->ring.db_addr = base + val;
483 /* Setup DB register for primary CMD rings */
484 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
486 /* Write to MMIO registers */
487 for (i = 0; reg_info[i].offset; i++)
488 mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
489 reg_info[i].mask, reg_info[i].shift,
495 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
496 struct mhi_chan *mhi_chan)
498 struct mhi_ring *buf_ring;
499 struct mhi_ring *tre_ring;
500 struct mhi_chan_ctxt *chan_ctxt;
502 buf_ring = &mhi_chan->buf_ring;
503 tre_ring = &mhi_chan->tre_ring;
504 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
506 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
507 tre_ring->pre_aligned, tre_ring->dma_handle);
508 vfree(buf_ring->base);
510 buf_ring->base = tre_ring->base = NULL;
511 chan_ctxt->rbase = 0;
514 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
515 struct mhi_chan *mhi_chan)
517 struct mhi_ring *buf_ring;
518 struct mhi_ring *tre_ring;
519 struct mhi_chan_ctxt *chan_ctxt;
523 buf_ring = &mhi_chan->buf_ring;
524 tre_ring = &mhi_chan->tre_ring;
525 tre_ring->el_size = sizeof(struct mhi_tre);
526 tre_ring->len = tre_ring->el_size * tre_ring->elements;
527 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
528 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
532 buf_ring->el_size = sizeof(struct mhi_buf_info);
533 buf_ring->len = buf_ring->el_size * buf_ring->elements;
534 buf_ring->base = vzalloc(buf_ring->len);
536 if (!buf_ring->base) {
537 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
538 tre_ring->pre_aligned, tre_ring->dma_handle);
542 tmp = chan_ctxt->chcfg;
543 tmp &= ~CHAN_CTX_CHSTATE_MASK;
544 tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
545 chan_ctxt->chcfg = tmp;
547 chan_ctxt->rbase = tre_ring->iommu_base;
548 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
549 chan_ctxt->rlen = tre_ring->len;
550 tre_ring->ctxt_wp = &chan_ctxt->wp;
552 tre_ring->rp = tre_ring->wp = tre_ring->base;
553 buf_ring->rp = buf_ring->wp = buf_ring->base;
554 mhi_chan->db_cfg.db_mode = 1;
556 /* Update to all cores */
562 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
563 struct mhi_controller_config *config)
565 struct mhi_event *mhi_event;
566 struct mhi_event_config *event_cfg;
567 struct device *dev = &mhi_cntrl->mhi_dev->dev;
570 num = config->num_events;
571 mhi_cntrl->total_ev_rings = num;
572 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
574 if (!mhi_cntrl->mhi_event)
577 /* Populate event ring */
578 mhi_event = mhi_cntrl->mhi_event;
579 for (i = 0; i < num; i++) {
580 event_cfg = &config->event_cfg[i];
582 mhi_event->er_index = i;
583 mhi_event->ring.elements = event_cfg->num_elements;
584 mhi_event->intmod = event_cfg->irq_moderation_ms;
585 mhi_event->irq = event_cfg->irq;
587 if (event_cfg->channel != U32_MAX) {
588 /* This event ring has a dedicated channel */
589 mhi_event->chan = event_cfg->channel;
590 if (mhi_event->chan >= mhi_cntrl->max_chan) {
592 "Event Ring channel not available\n");
596 mhi_event->mhi_chan =
597 &mhi_cntrl->mhi_chan[mhi_event->chan];
600 /* Priority is fixed to 1 for now */
601 mhi_event->priority = 1;
603 mhi_event->db_cfg.brstmode = event_cfg->mode;
604 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
607 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
608 mhi_event->db_cfg.process_db = mhi_db_brstmode;
610 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
612 mhi_event->data_type = event_cfg->data_type;
614 switch (mhi_event->data_type) {
616 mhi_event->process_event = mhi_process_data_event_ring;
619 mhi_event->process_event = mhi_process_ctrl_ev_ring;
622 dev_err(dev, "Event Ring type not supported\n");
626 mhi_event->hw_ring = event_cfg->hardware_event;
627 if (mhi_event->hw_ring)
628 mhi_cntrl->hw_ev_rings++;
630 mhi_cntrl->sw_ev_rings++;
632 mhi_event->cl_manage = event_cfg->client_managed;
633 mhi_event->offload_ev = event_cfg->offload_channel;
637 /* We need IRQ for each event ring + additional one for BHI */
638 mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1;
644 kfree(mhi_cntrl->mhi_event);
648 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
649 struct mhi_controller_config *config)
651 struct mhi_channel_config *ch_cfg;
652 struct device *dev = &mhi_cntrl->mhi_dev->dev;
656 mhi_cntrl->max_chan = config->max_channels;
659 * The allocation of MHI channels can exceed 32KB in some scenarios,
660 * so to avoid any memory possible allocation failures, vzalloc is
663 mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
664 sizeof(*mhi_cntrl->mhi_chan));
665 if (!mhi_cntrl->mhi_chan)
668 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
670 /* Populate channel configurations */
671 for (i = 0; i < config->num_channels; i++) {
672 struct mhi_chan *mhi_chan;
674 ch_cfg = &config->ch_cfg[i];
677 if (chan >= mhi_cntrl->max_chan) {
678 dev_err(dev, "Channel %d not available\n", chan);
682 mhi_chan = &mhi_cntrl->mhi_chan[chan];
683 mhi_chan->name = ch_cfg->name;
684 mhi_chan->chan = chan;
686 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
687 if (!mhi_chan->tre_ring.elements)
691 * For some channels, local ring length should be bigger than
692 * the transfer ring length due to internal logical channels
693 * in device. So host can queue much more buffers than transfer
694 * ring length. Example, RSC channels should have a larger local
695 * channel length than transfer ring length.
697 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
698 if (!mhi_chan->buf_ring.elements)
699 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
700 mhi_chan->er_index = ch_cfg->event_ring;
701 mhi_chan->dir = ch_cfg->dir;
704 * For most channels, chtype is identical to channel directions.
705 * So, if it is not defined then assign channel direction to
708 mhi_chan->type = ch_cfg->type;
710 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
712 mhi_chan->ee_mask = ch_cfg->ee_mask;
713 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
714 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
715 mhi_chan->offload_ch = ch_cfg->offload_channel;
716 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
717 mhi_chan->pre_alloc = ch_cfg->auto_queue;
718 mhi_chan->auto_start = ch_cfg->auto_start;
721 * If MHI host allocates buffers, then the channel direction
722 * should be DMA_FROM_DEVICE
724 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
725 dev_err(dev, "Invalid channel configuration\n");
730 * Bi-directional and direction less channel must be an
733 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
734 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
735 dev_err(dev, "Invalid channel configuration\n");
739 if (!mhi_chan->offload_ch) {
740 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
741 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
742 dev_err(dev, "Invalid Door bell mode\n");
747 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
748 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
750 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
752 mhi_chan->configured = true;
754 if (mhi_chan->lpm_notify)
755 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
761 vfree(mhi_cntrl->mhi_chan);
766 static int parse_config(struct mhi_controller *mhi_cntrl,
767 struct mhi_controller_config *config)
771 /* Parse MHI channel configuration */
772 ret = parse_ch_cfg(mhi_cntrl, config);
776 /* Parse MHI event configuration */
777 ret = parse_ev_cfg(mhi_cntrl, config);
781 mhi_cntrl->timeout_ms = config->timeout_ms;
782 if (!mhi_cntrl->timeout_ms)
783 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
785 mhi_cntrl->bounce_buf = config->use_bounce_buf;
786 mhi_cntrl->buffer_len = config->buf_len;
787 if (!mhi_cntrl->buffer_len)
788 mhi_cntrl->buffer_len = MHI_MAX_MTU;
790 /* By default, host is allowed to ring DB in both M0 and M2 states */
791 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
792 if (config->m2_no_db)
793 mhi_cntrl->db_access &= ~MHI_PM_M2;
798 vfree(mhi_cntrl->mhi_chan);
803 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
804 struct mhi_controller_config *config)
806 struct mhi_event *mhi_event;
807 struct mhi_chan *mhi_chan;
808 struct mhi_cmd *mhi_cmd;
809 struct mhi_device *mhi_dev;
816 if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
817 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
818 !mhi_cntrl->write_reg)
821 ret = parse_config(mhi_cntrl, config);
825 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
826 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
827 if (!mhi_cntrl->mhi_cmd) {
829 goto error_alloc_cmd;
832 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
833 mutex_init(&mhi_cntrl->pm_mutex);
834 rwlock_init(&mhi_cntrl->pm_lock);
835 spin_lock_init(&mhi_cntrl->transition_lock);
836 spin_lock_init(&mhi_cntrl->wlock);
837 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
838 INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
839 INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
840 init_waitqueue_head(&mhi_cntrl->state_event);
842 mhi_cmd = mhi_cntrl->mhi_cmd;
843 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
844 spin_lock_init(&mhi_cmd->lock);
846 mhi_event = mhi_cntrl->mhi_event;
847 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
848 /* Skip for offload events */
849 if (mhi_event->offload_ev)
852 mhi_event->mhi_cntrl = mhi_cntrl;
853 spin_lock_init(&mhi_event->lock);
854 if (mhi_event->data_type == MHI_ER_CTRL)
855 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
858 tasklet_init(&mhi_event->task, mhi_ev_task,
862 mhi_chan = mhi_cntrl->mhi_chan;
863 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
864 mutex_init(&mhi_chan->mutex);
865 init_completion(&mhi_chan->completion);
866 rwlock_init(&mhi_chan->lock);
869 if (mhi_cntrl->bounce_buf) {
870 mhi_cntrl->map_single = mhi_map_single_use_bb;
871 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
873 mhi_cntrl->map_single = mhi_map_single_no_bb;
874 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
877 /* Read the MHI device info */
878 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
879 SOC_HW_VERSION_OFFS, &soc_info);
881 goto error_alloc_dev;
883 mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
884 SOC_HW_VERSION_FAM_NUM_SHFT;
885 mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
886 SOC_HW_VERSION_DEV_NUM_SHFT;
887 mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
888 SOC_HW_VERSION_MAJOR_VER_SHFT;
889 mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
890 SOC_HW_VERSION_MINOR_VER_SHFT;
892 /* Register controller with MHI bus */
893 mhi_dev = mhi_alloc_device(mhi_cntrl);
894 if (IS_ERR(mhi_dev)) {
895 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
896 ret = PTR_ERR(mhi_dev);
897 goto error_alloc_dev;
900 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
901 mhi_dev->mhi_cntrl = mhi_cntrl;
902 dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
904 /* Init wakeup source */
905 device_init_wakeup(&mhi_dev->dev, true);
907 ret = device_add(&mhi_dev->dev);
911 mhi_cntrl->mhi_dev = mhi_dev;
916 put_device(&mhi_dev->dev);
919 kfree(mhi_cntrl->mhi_cmd);
922 vfree(mhi_cntrl->mhi_chan);
923 kfree(mhi_cntrl->mhi_event);
927 EXPORT_SYMBOL_GPL(mhi_register_controller);
929 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
931 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
932 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
935 kfree(mhi_cntrl->mhi_cmd);
936 kfree(mhi_cntrl->mhi_event);
938 /* Drop the references to MHI devices created for channels */
939 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
940 if (!mhi_chan->mhi_dev)
943 put_device(&mhi_chan->mhi_dev->dev);
945 vfree(mhi_cntrl->mhi_chan);
947 device_del(&mhi_dev->dev);
948 put_device(&mhi_dev->dev);
950 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
952 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
954 struct device *dev = &mhi_cntrl->mhi_dev->dev;
958 mutex_lock(&mhi_cntrl->pm_mutex);
960 ret = mhi_init_dev_ctxt(mhi_cntrl);
965 * Allocate RDDM table if specified, this table is for debugging purpose
967 if (mhi_cntrl->rddm_size) {
968 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
969 mhi_cntrl->rddm_size);
972 * This controller supports RDDM, so we need to manually clear
973 * BHIE RX registers since POR values are undefined.
975 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
978 dev_err(dev, "Error getting BHIE offset\n");
982 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
983 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
984 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
987 if (mhi_cntrl->rddm_image)
988 mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
991 mhi_cntrl->pre_init = true;
993 mutex_unlock(&mhi_cntrl->pm_mutex);
998 if (mhi_cntrl->rddm_image) {
999 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1000 mhi_cntrl->rddm_image = NULL;
1004 mutex_unlock(&mhi_cntrl->pm_mutex);
1008 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1010 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1012 if (mhi_cntrl->fbc_image) {
1013 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1014 mhi_cntrl->fbc_image = NULL;
1017 if (mhi_cntrl->rddm_image) {
1018 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1019 mhi_cntrl->rddm_image = NULL;
1022 mhi_deinit_dev_ctxt(mhi_cntrl);
1023 mhi_cntrl->pre_init = false;
1025 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1027 static void mhi_release_device(struct device *dev)
1029 struct mhi_device *mhi_dev = to_mhi_device(dev);
1032 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1033 * devices for the channels will only get created if the mhi_dev
1034 * associated with it is NULL. This scenario will happen during the
1035 * controller suspend and resume.
1037 if (mhi_dev->ul_chan)
1038 mhi_dev->ul_chan->mhi_dev = NULL;
1040 if (mhi_dev->dl_chan)
1041 mhi_dev->dl_chan->mhi_dev = NULL;
1046 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1048 struct mhi_device *mhi_dev;
1051 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1053 return ERR_PTR(-ENOMEM);
1055 dev = &mhi_dev->dev;
1056 device_initialize(dev);
1057 dev->bus = &mhi_bus_type;
1058 dev->release = mhi_release_device;
1059 dev->parent = mhi_cntrl->cntrl_dev;
1060 mhi_dev->mhi_cntrl = mhi_cntrl;
1061 mhi_dev->dev_wake = 0;
1066 static int mhi_driver_probe(struct device *dev)
1068 struct mhi_device *mhi_dev = to_mhi_device(dev);
1069 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1070 struct device_driver *drv = dev->driver;
1071 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1072 struct mhi_event *mhi_event;
1073 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1074 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1077 /* Bring device out of LPM */
1078 ret = mhi_device_get_sync(mhi_dev);
1086 * If channel supports LPM notifications then status_cb should
1089 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1092 /* For non-offload channels then xfer_cb should be provided */
1093 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1096 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1097 if (ul_chan->auto_start) {
1098 ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
1107 * If channel supports LPM notifications then status_cb should
1110 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1113 /* For non-offload channels then xfer_cb should be provided */
1114 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1117 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1120 * If the channel event ring is managed by client, then
1121 * status_cb must be provided so that the framework can
1122 * notify pending data
1124 if (mhi_event->cl_manage && !mhi_drv->status_cb)
1127 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1130 /* Call the user provided probe function */
1131 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1135 if (dl_chan && dl_chan->auto_start)
1136 mhi_prepare_channel(mhi_cntrl, dl_chan);
1138 mhi_device_put(mhi_dev);
1143 mhi_unprepare_from_transfer(mhi_dev);
1145 mhi_device_put(mhi_dev);
1150 static int mhi_driver_remove(struct device *dev)
1152 struct mhi_device *mhi_dev = to_mhi_device(dev);
1153 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1154 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1155 struct mhi_chan *mhi_chan;
1156 enum mhi_ch_state ch_state[] = {
1157 MHI_CH_STATE_DISABLED,
1158 MHI_CH_STATE_DISABLED
1162 /* Skip if it is a controller device */
1163 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1166 /* Reset both channels */
1167 for (dir = 0; dir < 2; dir++) {
1168 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1173 /* Wake all threads waiting for completion */
1174 write_lock_irq(&mhi_chan->lock);
1175 mhi_chan->ccs = MHI_EV_CC_INVALID;
1176 complete_all(&mhi_chan->completion);
1177 write_unlock_irq(&mhi_chan->lock);
1179 /* Set the channel state to disabled */
1180 mutex_lock(&mhi_chan->mutex);
1181 write_lock_irq(&mhi_chan->lock);
1182 ch_state[dir] = mhi_chan->ch_state;
1183 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1184 write_unlock_irq(&mhi_chan->lock);
1186 /* Reset the non-offload channel */
1187 if (!mhi_chan->offload_ch)
1188 mhi_reset_chan(mhi_cntrl, mhi_chan);
1190 mutex_unlock(&mhi_chan->mutex);
1193 mhi_drv->remove(mhi_dev);
1195 /* De-init channel if it was enabled */
1196 for (dir = 0; dir < 2; dir++) {
1197 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1202 mutex_lock(&mhi_chan->mutex);
1204 if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
1205 !mhi_chan->offload_ch)
1206 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1208 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1210 mutex_unlock(&mhi_chan->mutex);
1213 read_lock_bh(&mhi_cntrl->pm_lock);
1214 while (mhi_dev->dev_wake)
1215 mhi_device_put(mhi_dev);
1216 read_unlock_bh(&mhi_cntrl->pm_lock);
1221 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1223 struct device_driver *driver = &mhi_drv->driver;
1225 if (!mhi_drv->probe || !mhi_drv->remove)
1228 driver->bus = &mhi_bus_type;
1229 driver->owner = owner;
1230 driver->probe = mhi_driver_probe;
1231 driver->remove = mhi_driver_remove;
1233 return driver_register(driver);
1235 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1237 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1239 driver_unregister(&mhi_drv->driver);
1241 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1243 static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1245 struct mhi_device *mhi_dev = to_mhi_device(dev);
1247 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1248 mhi_dev->chan_name);
1251 static int mhi_match(struct device *dev, struct device_driver *drv)
1253 struct mhi_device *mhi_dev = to_mhi_device(dev);
1254 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1255 const struct mhi_device_id *id;
1258 * If the device is a controller type then there is no client driver
1259 * associated with it
1261 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1264 for (id = mhi_drv->id_table; id->chan[0]; id++)
1265 if (!strcmp(mhi_dev->chan_name, id->chan)) {
1273 struct bus_type mhi_bus_type = {
1277 .uevent = mhi_uevent,
1280 static int __init mhi_init(void)
1282 return bus_register(&mhi_bus_type);
1285 static void __exit mhi_exit(void)
1287 bus_unregister(&mhi_bus_type);
1290 postcore_initcall(mhi_init);
1291 module_exit(mhi_exit);
1293 MODULE_LICENSE("GPL v2");
1294 MODULE_DESCRIPTION("MHI Host Interface");