1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <linux/wait.h>
20 const char * const mhi_ee_str[MHI_EE_MAX] = {
23 [MHI_EE_AMSS] = "AMSS",
24 [MHI_EE_RDDM] = "RDDM",
26 [MHI_EE_PTHRU] = "PASS THRU",
28 [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
29 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
32 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
33 [DEV_ST_TRANSITION_PBL] = "PBL",
34 [DEV_ST_TRANSITION_READY] = "READY",
35 [DEV_ST_TRANSITION_SBL] = "SBL",
36 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION_MODE",
37 [DEV_ST_TRANSITION_SYS_ERR] = "SYS_ERR",
38 [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
41 const char * const mhi_state_str[MHI_STATE_MAX] = {
42 [MHI_STATE_RESET] = "RESET",
43 [MHI_STATE_READY] = "READY",
44 [MHI_STATE_M0] = "M0",
45 [MHI_STATE_M1] = "M1",
46 [MHI_STATE_M2] = "M2",
47 [MHI_STATE_M3] = "M3",
48 [MHI_STATE_M3_FAST] = "M3_FAST",
49 [MHI_STATE_BHI] = "BHI",
50 [MHI_STATE_SYS_ERR] = "SYS_ERR",
53 static const char * const mhi_pm_state_str[] = {
54 [MHI_PM_STATE_DISABLE] = "DISABLE",
55 [MHI_PM_STATE_POR] = "POR",
56 [MHI_PM_STATE_M0] = "M0",
57 [MHI_PM_STATE_M2] = "M2",
58 [MHI_PM_STATE_M3_ENTER] = "M?->M3",
59 [MHI_PM_STATE_M3] = "M3",
60 [MHI_PM_STATE_M3_EXIT] = "M3->M0",
61 [MHI_PM_STATE_FW_DL_ERR] = "FW DL Error",
62 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS_ERR Detect",
63 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS_ERR Process",
64 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
65 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
68 const char *to_mhi_pm_state_str(enum mhi_pm_state state)
70 int index = find_last_bit((unsigned long *)&state, 32);
72 if (index >= ARRAY_SIZE(mhi_pm_state_str))
73 return "Invalid State";
75 return mhi_pm_state_str[index];
78 /* MHI protocol requires the transfer ring to be aligned with ring length */
79 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl,
80 struct mhi_ring *ring,
83 ring->alloc_size = len + (len - 1);
84 ring->pre_aligned = mhi_alloc_coherent(mhi_cntrl, ring->alloc_size,
85 &ring->dma_handle, GFP_KERNEL);
86 if (!ring->pre_aligned)
89 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1);
90 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle);
95 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
98 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
100 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
101 if (mhi_event->offload_ev)
104 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
107 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
110 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
112 struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
113 struct device *dev = &mhi_cntrl->mhi_dev->dev;
116 /* Setup BHI_INTVEC IRQ */
117 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
118 mhi_intvec_threaded_handler,
119 IRQF_SHARED | IRQF_NO_SUSPEND,
124 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
125 if (mhi_event->offload_ev)
128 ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
130 IRQF_SHARED | IRQF_NO_SUSPEND,
133 dev_err(dev, "Error requesting irq:%d for ev:%d\n",
134 mhi_cntrl->irq[mhi_event->irq], i);
142 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
143 if (mhi_event->offload_ev)
146 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event);
148 free_irq(mhi_cntrl->irq[0], mhi_cntrl);
153 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl)
156 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt;
157 struct mhi_cmd *mhi_cmd;
158 struct mhi_event *mhi_event;
159 struct mhi_ring *ring;
161 mhi_cmd = mhi_cntrl->mhi_cmd;
162 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) {
163 ring = &mhi_cmd->ring;
164 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
165 ring->pre_aligned, ring->dma_handle);
167 ring->iommu_base = 0;
170 mhi_free_coherent(mhi_cntrl,
171 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
172 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
174 mhi_event = mhi_cntrl->mhi_event;
175 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
176 if (mhi_event->offload_ev)
179 ring = &mhi_event->ring;
180 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
181 ring->pre_aligned, ring->dma_handle);
183 ring->iommu_base = 0;
186 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
187 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
188 mhi_ctxt->er_ctxt_addr);
190 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
191 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
192 mhi_ctxt->chan_ctxt_addr);
195 mhi_cntrl->mhi_ctxt = NULL;
198 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
200 struct mhi_ctxt *mhi_ctxt;
201 struct mhi_chan_ctxt *chan_ctxt;
202 struct mhi_event_ctxt *er_ctxt;
203 struct mhi_cmd_ctxt *cmd_ctxt;
204 struct mhi_chan *mhi_chan;
205 struct mhi_event *mhi_event;
206 struct mhi_cmd *mhi_cmd;
208 int ret = -ENOMEM, i;
210 atomic_set(&mhi_cntrl->dev_wake, 0);
211 atomic_set(&mhi_cntrl->pending_pkts, 0);
213 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL);
217 /* Setup channel ctxt */
218 mhi_ctxt->chan_ctxt = mhi_alloc_coherent(mhi_cntrl,
219 sizeof(*mhi_ctxt->chan_ctxt) *
221 &mhi_ctxt->chan_ctxt_addr,
223 if (!mhi_ctxt->chan_ctxt)
224 goto error_alloc_chan_ctxt;
226 mhi_chan = mhi_cntrl->mhi_chan;
227 chan_ctxt = mhi_ctxt->chan_ctxt;
228 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) {
229 /* Skip if it is an offload channel */
230 if (mhi_chan->offload_ch)
233 tmp = chan_ctxt->chcfg;
234 tmp &= ~CHAN_CTX_CHSTATE_MASK;
235 tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
236 tmp &= ~CHAN_CTX_BRSTMODE_MASK;
237 tmp |= (mhi_chan->db_cfg.brstmode << CHAN_CTX_BRSTMODE_SHIFT);
238 tmp &= ~CHAN_CTX_POLLCFG_MASK;
239 tmp |= (mhi_chan->db_cfg.pollcfg << CHAN_CTX_POLLCFG_SHIFT);
240 chan_ctxt->chcfg = tmp;
242 chan_ctxt->chtype = mhi_chan->type;
243 chan_ctxt->erindex = mhi_chan->er_index;
245 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
246 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp;
249 /* Setup event context */
250 mhi_ctxt->er_ctxt = mhi_alloc_coherent(mhi_cntrl,
251 sizeof(*mhi_ctxt->er_ctxt) *
252 mhi_cntrl->total_ev_rings,
253 &mhi_ctxt->er_ctxt_addr,
255 if (!mhi_ctxt->er_ctxt)
256 goto error_alloc_er_ctxt;
258 er_ctxt = mhi_ctxt->er_ctxt;
259 mhi_event = mhi_cntrl->mhi_event;
260 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++,
262 struct mhi_ring *ring = &mhi_event->ring;
264 /* Skip if it is an offload event */
265 if (mhi_event->offload_ev)
268 tmp = er_ctxt->intmod;
269 tmp &= ~EV_CTX_INTMODC_MASK;
270 tmp &= ~EV_CTX_INTMODT_MASK;
271 tmp |= (mhi_event->intmod << EV_CTX_INTMODT_SHIFT);
272 er_ctxt->intmod = tmp;
274 er_ctxt->ertype = MHI_ER_TYPE_VALID;
275 er_ctxt->msivec = mhi_event->irq;
276 mhi_event->db_cfg.db_mode = true;
278 ring->el_size = sizeof(struct mhi_tre);
279 ring->len = ring->el_size * ring->elements;
280 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
285 * If the read pointer equals to the write pointer, then the
288 ring->rp = ring->wp = ring->base;
289 er_ctxt->rbase = ring->iommu_base;
290 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
291 er_ctxt->rlen = ring->len;
292 ring->ctxt_wp = &er_ctxt->wp;
295 /* Setup cmd context */
297 mhi_ctxt->cmd_ctxt = mhi_alloc_coherent(mhi_cntrl,
298 sizeof(*mhi_ctxt->cmd_ctxt) *
300 &mhi_ctxt->cmd_ctxt_addr,
302 if (!mhi_ctxt->cmd_ctxt)
305 mhi_cmd = mhi_cntrl->mhi_cmd;
306 cmd_ctxt = mhi_ctxt->cmd_ctxt;
307 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) {
308 struct mhi_ring *ring = &mhi_cmd->ring;
310 ring->el_size = sizeof(struct mhi_tre);
311 ring->elements = CMD_EL_PER_RING;
312 ring->len = ring->el_size * ring->elements;
313 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len);
315 goto error_alloc_cmd;
317 ring->rp = ring->wp = ring->base;
318 cmd_ctxt->rbase = ring->iommu_base;
319 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase;
320 cmd_ctxt->rlen = ring->len;
321 ring->ctxt_wp = &cmd_ctxt->wp;
324 mhi_cntrl->mhi_ctxt = mhi_ctxt;
329 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) {
330 struct mhi_ring *ring = &mhi_cmd->ring;
332 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
333 ring->pre_aligned, ring->dma_handle);
335 mhi_free_coherent(mhi_cntrl,
336 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS,
337 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr);
338 i = mhi_cntrl->total_ev_rings;
339 mhi_event = mhi_cntrl->mhi_event + i;
342 for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
343 struct mhi_ring *ring = &mhi_event->ring;
345 if (mhi_event->offload_ev)
348 mhi_free_coherent(mhi_cntrl, ring->alloc_size,
349 ring->pre_aligned, ring->dma_handle);
351 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->er_ctxt) *
352 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt,
353 mhi_ctxt->er_ctxt_addr);
356 mhi_free_coherent(mhi_cntrl, sizeof(*mhi_ctxt->chan_ctxt) *
357 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt,
358 mhi_ctxt->chan_ctxt_addr);
360 error_alloc_chan_ctxt:
366 int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
370 struct mhi_chan *mhi_chan;
371 struct mhi_event *mhi_event;
372 void __iomem *base = mhi_cntrl->regs;
373 struct device *dev = &mhi_cntrl->mhi_dev->dev;
381 CCABAP_HIGHER, U32_MAX, 0,
382 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
385 CCABAP_LOWER, U32_MAX, 0,
386 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr),
389 ECABAP_HIGHER, U32_MAX, 0,
390 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
393 ECABAP_LOWER, U32_MAX, 0,
394 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr),
397 CRCBAP_HIGHER, U32_MAX, 0,
398 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
401 CRCBAP_LOWER, U32_MAX, 0,
402 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr),
405 MHICFG, MHICFG_NER_MASK, MHICFG_NER_SHIFT,
406 mhi_cntrl->total_ev_rings,
409 MHICFG, MHICFG_NHWER_MASK, MHICFG_NHWER_SHIFT,
410 mhi_cntrl->hw_ev_rings,
413 MHICTRLBASE_HIGHER, U32_MAX, 0,
414 upper_32_bits(mhi_cntrl->iova_start),
417 MHICTRLBASE_LOWER, U32_MAX, 0,
418 lower_32_bits(mhi_cntrl->iova_start),
421 MHIDATABASE_HIGHER, U32_MAX, 0,
422 upper_32_bits(mhi_cntrl->iova_start),
425 MHIDATABASE_LOWER, U32_MAX, 0,
426 lower_32_bits(mhi_cntrl->iova_start),
429 MHICTRLLIMIT_HIGHER, U32_MAX, 0,
430 upper_32_bits(mhi_cntrl->iova_stop),
433 MHICTRLLIMIT_LOWER, U32_MAX, 0,
434 lower_32_bits(mhi_cntrl->iova_stop),
437 MHIDATALIMIT_HIGHER, U32_MAX, 0,
438 upper_32_bits(mhi_cntrl->iova_stop),
441 MHIDATALIMIT_LOWER, U32_MAX, 0,
442 lower_32_bits(mhi_cntrl->iova_stop),
447 dev_dbg(dev, "Initializing MHI registers\n");
449 /* Read channel db offset */
450 ret = mhi_read_reg_field(mhi_cntrl, base, CHDBOFF, CHDBOFF_CHDBOFF_MASK,
451 CHDBOFF_CHDBOFF_SHIFT, &val);
453 dev_err(dev, "Unable to read CHDBOFF register\n");
458 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
459 mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
460 mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
461 mhi_cntrl->wake_set = false;
463 /* Setup channel db address for each channel in tre_ring */
464 mhi_chan = mhi_cntrl->mhi_chan;
465 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
466 mhi_chan->tre_ring.db_addr = base + val;
468 /* Read event ring db offset */
469 ret = mhi_read_reg_field(mhi_cntrl, base, ERDBOFF, ERDBOFF_ERDBOFF_MASK,
470 ERDBOFF_ERDBOFF_SHIFT, &val);
472 dev_err(dev, "Unable to read ERDBOFF register\n");
476 /* Setup event db address for each ev_ring */
477 mhi_event = mhi_cntrl->mhi_event;
478 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) {
479 if (mhi_event->offload_ev)
482 mhi_event->ring.db_addr = base + val;
485 /* Setup DB register for primary CMD rings */
486 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
488 /* Write to MMIO registers */
489 for (i = 0; reg_info[i].offset; i++)
490 mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset,
491 reg_info[i].mask, reg_info[i].shift,
497 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
498 struct mhi_chan *mhi_chan)
500 struct mhi_ring *buf_ring;
501 struct mhi_ring *tre_ring;
502 struct mhi_chan_ctxt *chan_ctxt;
504 buf_ring = &mhi_chan->buf_ring;
505 tre_ring = &mhi_chan->tre_ring;
506 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
508 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
509 tre_ring->pre_aligned, tre_ring->dma_handle);
510 vfree(buf_ring->base);
512 buf_ring->base = tre_ring->base = NULL;
513 chan_ctxt->rbase = 0;
516 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
517 struct mhi_chan *mhi_chan)
519 struct mhi_ring *buf_ring;
520 struct mhi_ring *tre_ring;
521 struct mhi_chan_ctxt *chan_ctxt;
525 buf_ring = &mhi_chan->buf_ring;
526 tre_ring = &mhi_chan->tre_ring;
527 tre_ring->el_size = sizeof(struct mhi_tre);
528 tre_ring->len = tre_ring->el_size * tre_ring->elements;
529 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan];
530 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len);
534 buf_ring->el_size = sizeof(struct mhi_buf_info);
535 buf_ring->len = buf_ring->el_size * buf_ring->elements;
536 buf_ring->base = vzalloc(buf_ring->len);
538 if (!buf_ring->base) {
539 mhi_free_coherent(mhi_cntrl, tre_ring->alloc_size,
540 tre_ring->pre_aligned, tre_ring->dma_handle);
544 tmp = chan_ctxt->chcfg;
545 tmp &= ~CHAN_CTX_CHSTATE_MASK;
546 tmp |= (MHI_CH_STATE_ENABLED << CHAN_CTX_CHSTATE_SHIFT);
547 chan_ctxt->chcfg = tmp;
549 chan_ctxt->rbase = tre_ring->iommu_base;
550 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase;
551 chan_ctxt->rlen = tre_ring->len;
552 tre_ring->ctxt_wp = &chan_ctxt->wp;
554 tre_ring->rp = tre_ring->wp = tre_ring->base;
555 buf_ring->rp = buf_ring->wp = buf_ring->base;
556 mhi_chan->db_cfg.db_mode = 1;
558 /* Update to all cores */
564 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl,
565 struct mhi_controller_config *config)
567 struct mhi_event *mhi_event;
568 struct mhi_event_config *event_cfg;
569 struct device *dev = &mhi_cntrl->mhi_dev->dev;
572 num = config->num_events;
573 mhi_cntrl->total_ev_rings = num;
574 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event),
576 if (!mhi_cntrl->mhi_event)
579 /* Populate event ring */
580 mhi_event = mhi_cntrl->mhi_event;
581 for (i = 0; i < num; i++) {
582 event_cfg = &config->event_cfg[i];
584 mhi_event->er_index = i;
585 mhi_event->ring.elements = event_cfg->num_elements;
586 mhi_event->intmod = event_cfg->irq_moderation_ms;
587 mhi_event->irq = event_cfg->irq;
589 if (event_cfg->channel != U32_MAX) {
590 /* This event ring has a dedicated channel */
591 mhi_event->chan = event_cfg->channel;
592 if (mhi_event->chan >= mhi_cntrl->max_chan) {
594 "Event Ring channel not available\n");
598 mhi_event->mhi_chan =
599 &mhi_cntrl->mhi_chan[mhi_event->chan];
602 /* Priority is fixed to 1 for now */
603 mhi_event->priority = 1;
605 mhi_event->db_cfg.brstmode = event_cfg->mode;
606 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode))
609 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
610 mhi_event->db_cfg.process_db = mhi_db_brstmode;
612 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable;
614 mhi_event->data_type = event_cfg->data_type;
616 switch (mhi_event->data_type) {
618 mhi_event->process_event = mhi_process_data_event_ring;
621 mhi_event->process_event = mhi_process_ctrl_ev_ring;
624 dev_err(dev, "Event Ring type not supported\n");
628 mhi_event->hw_ring = event_cfg->hardware_event;
629 if (mhi_event->hw_ring)
630 mhi_cntrl->hw_ev_rings++;
632 mhi_cntrl->sw_ev_rings++;
634 mhi_event->cl_manage = event_cfg->client_managed;
635 mhi_event->offload_ev = event_cfg->offload_channel;
639 /* We need IRQ for each event ring + additional one for BHI */
640 mhi_cntrl->nr_irqs_req = mhi_cntrl->total_ev_rings + 1;
646 kfree(mhi_cntrl->mhi_event);
650 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
651 struct mhi_controller_config *config)
653 struct mhi_channel_config *ch_cfg;
654 struct device *dev = &mhi_cntrl->mhi_dev->dev;
658 mhi_cntrl->max_chan = config->max_channels;
661 * The allocation of MHI channels can exceed 32KB in some scenarios,
662 * so to avoid any memory possible allocation failures, vzalloc is
665 mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
666 sizeof(*mhi_cntrl->mhi_chan));
667 if (!mhi_cntrl->mhi_chan)
670 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
672 /* Populate channel configurations */
673 for (i = 0; i < config->num_channels; i++) {
674 struct mhi_chan *mhi_chan;
676 ch_cfg = &config->ch_cfg[i];
679 if (chan >= mhi_cntrl->max_chan) {
680 dev_err(dev, "Channel %d not available\n", chan);
684 mhi_chan = &mhi_cntrl->mhi_chan[chan];
685 mhi_chan->name = ch_cfg->name;
686 mhi_chan->chan = chan;
688 mhi_chan->tre_ring.elements = ch_cfg->num_elements;
689 if (!mhi_chan->tre_ring.elements)
693 * For some channels, local ring length should be bigger than
694 * the transfer ring length due to internal logical channels
695 * in device. So host can queue much more buffers than transfer
696 * ring length. Example, RSC channels should have a larger local
697 * channel length than transfer ring length.
699 mhi_chan->buf_ring.elements = ch_cfg->local_elements;
700 if (!mhi_chan->buf_ring.elements)
701 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
702 mhi_chan->er_index = ch_cfg->event_ring;
703 mhi_chan->dir = ch_cfg->dir;
706 * For most channels, chtype is identical to channel directions.
707 * So, if it is not defined then assign channel direction to
710 mhi_chan->type = ch_cfg->type;
712 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
714 mhi_chan->ee_mask = ch_cfg->ee_mask;
715 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg;
716 mhi_chan->lpm_notify = ch_cfg->lpm_notify;
717 mhi_chan->offload_ch = ch_cfg->offload_channel;
718 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
719 mhi_chan->pre_alloc = ch_cfg->auto_queue;
720 mhi_chan->auto_start = ch_cfg->auto_start;
723 * If MHI host allocates buffers, then the channel direction
724 * should be DMA_FROM_DEVICE
726 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
727 dev_err(dev, "Invalid channel configuration\n");
732 * Bi-directional and direction less channel must be an
735 if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
736 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
737 dev_err(dev, "Invalid channel configuration\n");
741 if (!mhi_chan->offload_ch) {
742 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell;
743 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
744 dev_err(dev, "Invalid Door bell mode\n");
749 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE)
750 mhi_chan->db_cfg.process_db = mhi_db_brstmode;
752 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable;
754 mhi_chan->configured = true;
756 if (mhi_chan->lpm_notify)
757 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans);
763 vfree(mhi_cntrl->mhi_chan);
768 static int parse_config(struct mhi_controller *mhi_cntrl,
769 struct mhi_controller_config *config)
773 /* Parse MHI channel configuration */
774 ret = parse_ch_cfg(mhi_cntrl, config);
778 /* Parse MHI event configuration */
779 ret = parse_ev_cfg(mhi_cntrl, config);
783 mhi_cntrl->timeout_ms = config->timeout_ms;
784 if (!mhi_cntrl->timeout_ms)
785 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS;
787 mhi_cntrl->bounce_buf = config->use_bounce_buf;
788 mhi_cntrl->buffer_len = config->buf_len;
789 if (!mhi_cntrl->buffer_len)
790 mhi_cntrl->buffer_len = MHI_MAX_MTU;
792 /* By default, host is allowed to ring DB in both M0 and M2 states */
793 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2;
794 if (config->m2_no_db)
795 mhi_cntrl->db_access &= ~MHI_PM_M2;
800 vfree(mhi_cntrl->mhi_chan);
805 int mhi_register_controller(struct mhi_controller *mhi_cntrl,
806 struct mhi_controller_config *config)
808 struct mhi_event *mhi_event;
809 struct mhi_chan *mhi_chan;
810 struct mhi_cmd *mhi_cmd;
811 struct mhi_device *mhi_dev;
818 if (!mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put ||
819 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg ||
820 !mhi_cntrl->write_reg)
823 ret = parse_config(mhi_cntrl, config);
827 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS,
828 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
829 if (!mhi_cntrl->mhi_cmd) {
831 goto error_alloc_cmd;
834 INIT_LIST_HEAD(&mhi_cntrl->transition_list);
835 mutex_init(&mhi_cntrl->pm_mutex);
836 rwlock_init(&mhi_cntrl->pm_lock);
837 spin_lock_init(&mhi_cntrl->transition_lock);
838 spin_lock_init(&mhi_cntrl->wlock);
839 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
840 init_waitqueue_head(&mhi_cntrl->state_event);
842 mhi_cmd = mhi_cntrl->mhi_cmd;
843 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
844 spin_lock_init(&mhi_cmd->lock);
846 mhi_event = mhi_cntrl->mhi_event;
847 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
848 /* Skip for offload events */
849 if (mhi_event->offload_ev)
852 mhi_event->mhi_cntrl = mhi_cntrl;
853 spin_lock_init(&mhi_event->lock);
854 if (mhi_event->data_type == MHI_ER_CTRL)
855 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
858 tasklet_init(&mhi_event->task, mhi_ev_task,
862 mhi_chan = mhi_cntrl->mhi_chan;
863 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
864 mutex_init(&mhi_chan->mutex);
865 init_completion(&mhi_chan->completion);
866 rwlock_init(&mhi_chan->lock);
868 /* used in setting bei field of TRE */
869 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
870 mhi_chan->intmod = mhi_event->intmod;
873 if (mhi_cntrl->bounce_buf) {
874 mhi_cntrl->map_single = mhi_map_single_use_bb;
875 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb;
877 mhi_cntrl->map_single = mhi_map_single_no_bb;
878 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
881 /* Read the MHI device info */
882 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
883 SOC_HW_VERSION_OFFS, &soc_info);
885 goto error_alloc_dev;
887 mhi_cntrl->family_number = (soc_info & SOC_HW_VERSION_FAM_NUM_BMSK) >>
888 SOC_HW_VERSION_FAM_NUM_SHFT;
889 mhi_cntrl->device_number = (soc_info & SOC_HW_VERSION_DEV_NUM_BMSK) >>
890 SOC_HW_VERSION_DEV_NUM_SHFT;
891 mhi_cntrl->major_version = (soc_info & SOC_HW_VERSION_MAJOR_VER_BMSK) >>
892 SOC_HW_VERSION_MAJOR_VER_SHFT;
893 mhi_cntrl->minor_version = (soc_info & SOC_HW_VERSION_MINOR_VER_BMSK) >>
894 SOC_HW_VERSION_MINOR_VER_SHFT;
896 /* Register controller with MHI bus */
897 mhi_dev = mhi_alloc_device(mhi_cntrl);
898 if (IS_ERR(mhi_dev)) {
899 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
900 ret = PTR_ERR(mhi_dev);
901 goto error_alloc_dev;
904 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
905 mhi_dev->mhi_cntrl = mhi_cntrl;
906 dev_set_name(&mhi_dev->dev, "%s", dev_name(mhi_cntrl->cntrl_dev));
908 /* Init wakeup source */
909 device_init_wakeup(&mhi_dev->dev, true);
911 ret = device_add(&mhi_dev->dev);
915 mhi_cntrl->mhi_dev = mhi_dev;
920 put_device(&mhi_dev->dev);
923 kfree(mhi_cntrl->mhi_cmd);
926 vfree(mhi_cntrl->mhi_chan);
927 kfree(mhi_cntrl->mhi_event);
931 EXPORT_SYMBOL_GPL(mhi_register_controller);
933 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl)
935 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
936 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan;
939 kfree(mhi_cntrl->mhi_cmd);
940 kfree(mhi_cntrl->mhi_event);
942 /* Drop the references to MHI devices created for channels */
943 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
944 if (!mhi_chan->mhi_dev)
947 put_device(&mhi_chan->mhi_dev->dev);
949 vfree(mhi_cntrl->mhi_chan);
951 device_del(&mhi_dev->dev);
952 put_device(&mhi_dev->dev);
954 EXPORT_SYMBOL_GPL(mhi_unregister_controller);
956 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
958 struct device *dev = &mhi_cntrl->mhi_dev->dev;
962 mutex_lock(&mhi_cntrl->pm_mutex);
964 ret = mhi_init_dev_ctxt(mhi_cntrl);
969 * Allocate RDDM table if specified, this table is for debugging purpose
971 if (mhi_cntrl->rddm_size) {
972 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
973 mhi_cntrl->rddm_size);
976 * This controller supports RDDM, so we need to manually clear
977 * BHIE RX registers since POR values are undefined.
979 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
982 dev_err(dev, "Error getting BHIE offset\n");
986 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
987 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
988 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
991 if (mhi_cntrl->rddm_image)
992 mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
995 mhi_cntrl->pre_init = true;
997 mutex_unlock(&mhi_cntrl->pm_mutex);
1002 if (mhi_cntrl->rddm_image) {
1003 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1004 mhi_cntrl->rddm_image = NULL;
1008 mutex_unlock(&mhi_cntrl->pm_mutex);
1012 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
1014 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
1016 if (mhi_cntrl->fbc_image) {
1017 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
1018 mhi_cntrl->fbc_image = NULL;
1021 if (mhi_cntrl->rddm_image) {
1022 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
1023 mhi_cntrl->rddm_image = NULL;
1026 mhi_deinit_dev_ctxt(mhi_cntrl);
1027 mhi_cntrl->pre_init = false;
1029 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down);
1031 static void mhi_release_device(struct device *dev)
1033 struct mhi_device *mhi_dev = to_mhi_device(dev);
1036 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
1037 * devices for the channels will only get created if the mhi_dev
1038 * associated with it is NULL. This scenario will happen during the
1039 * controller suspend and resume.
1041 if (mhi_dev->ul_chan)
1042 mhi_dev->ul_chan->mhi_dev = NULL;
1044 if (mhi_dev->dl_chan)
1045 mhi_dev->dl_chan->mhi_dev = NULL;
1050 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl)
1052 struct mhi_device *mhi_dev;
1055 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
1057 return ERR_PTR(-ENOMEM);
1059 dev = &mhi_dev->dev;
1060 device_initialize(dev);
1061 dev->bus = &mhi_bus_type;
1062 dev->release = mhi_release_device;
1063 dev->parent = mhi_cntrl->cntrl_dev;
1064 mhi_dev->mhi_cntrl = mhi_cntrl;
1065 mhi_dev->dev_wake = 0;
1070 static int mhi_driver_probe(struct device *dev)
1072 struct mhi_device *mhi_dev = to_mhi_device(dev);
1073 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1074 struct device_driver *drv = dev->driver;
1075 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1076 struct mhi_event *mhi_event;
1077 struct mhi_chan *ul_chan = mhi_dev->ul_chan;
1078 struct mhi_chan *dl_chan = mhi_dev->dl_chan;
1081 /* Bring device out of LPM */
1082 ret = mhi_device_get_sync(mhi_dev);
1090 * If channel supports LPM notifications then status_cb should
1093 if (ul_chan->lpm_notify && !mhi_drv->status_cb)
1096 /* For non-offload channels then xfer_cb should be provided */
1097 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
1100 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
1101 if (ul_chan->auto_start) {
1102 ret = mhi_prepare_channel(mhi_cntrl, ul_chan);
1111 * If channel supports LPM notifications then status_cb should
1114 if (dl_chan->lpm_notify && !mhi_drv->status_cb)
1117 /* For non-offload channels then xfer_cb should be provided */
1118 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
1121 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
1124 * If the channel event ring is managed by client, then
1125 * status_cb must be provided so that the framework can
1126 * notify pending data
1128 if (mhi_event->cl_manage && !mhi_drv->status_cb)
1131 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
1134 /* Call the user provided probe function */
1135 ret = mhi_drv->probe(mhi_dev, mhi_dev->id);
1139 if (dl_chan && dl_chan->auto_start)
1140 mhi_prepare_channel(mhi_cntrl, dl_chan);
1142 mhi_device_put(mhi_dev);
1147 mhi_unprepare_from_transfer(mhi_dev);
1149 mhi_device_put(mhi_dev);
1154 static int mhi_driver_remove(struct device *dev)
1156 struct mhi_device *mhi_dev = to_mhi_device(dev);
1157 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver);
1158 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1159 struct mhi_chan *mhi_chan;
1160 enum mhi_ch_state ch_state[] = {
1161 MHI_CH_STATE_DISABLED,
1162 MHI_CH_STATE_DISABLED
1166 /* Skip if it is a controller device */
1167 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1170 /* Reset both channels */
1171 for (dir = 0; dir < 2; dir++) {
1172 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1177 /* Wake all threads waiting for completion */
1178 write_lock_irq(&mhi_chan->lock);
1179 mhi_chan->ccs = MHI_EV_CC_INVALID;
1180 complete_all(&mhi_chan->completion);
1181 write_unlock_irq(&mhi_chan->lock);
1183 /* Set the channel state to disabled */
1184 mutex_lock(&mhi_chan->mutex);
1185 write_lock_irq(&mhi_chan->lock);
1186 ch_state[dir] = mhi_chan->ch_state;
1187 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
1188 write_unlock_irq(&mhi_chan->lock);
1190 /* Reset the non-offload channel */
1191 if (!mhi_chan->offload_ch)
1192 mhi_reset_chan(mhi_cntrl, mhi_chan);
1194 mutex_unlock(&mhi_chan->mutex);
1197 mhi_drv->remove(mhi_dev);
1199 /* De-init channel if it was enabled */
1200 for (dir = 0; dir < 2; dir++) {
1201 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1206 mutex_lock(&mhi_chan->mutex);
1208 if (ch_state[dir] == MHI_CH_STATE_ENABLED &&
1209 !mhi_chan->offload_ch)
1210 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1212 mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1214 mutex_unlock(&mhi_chan->mutex);
1217 read_lock_bh(&mhi_cntrl->pm_lock);
1218 while (mhi_dev->dev_wake)
1219 mhi_device_put(mhi_dev);
1220 read_unlock_bh(&mhi_cntrl->pm_lock);
1225 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner)
1227 struct device_driver *driver = &mhi_drv->driver;
1229 if (!mhi_drv->probe || !mhi_drv->remove)
1232 driver->bus = &mhi_bus_type;
1233 driver->owner = owner;
1234 driver->probe = mhi_driver_probe;
1235 driver->remove = mhi_driver_remove;
1237 return driver_register(driver);
1239 EXPORT_SYMBOL_GPL(__mhi_driver_register);
1241 void mhi_driver_unregister(struct mhi_driver *mhi_drv)
1243 driver_unregister(&mhi_drv->driver);
1245 EXPORT_SYMBOL_GPL(mhi_driver_unregister);
1247 static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
1249 struct mhi_device *mhi_dev = to_mhi_device(dev);
1251 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
1252 mhi_dev->chan_name);
1255 static int mhi_match(struct device *dev, struct device_driver *drv)
1257 struct mhi_device *mhi_dev = to_mhi_device(dev);
1258 struct mhi_driver *mhi_drv = to_mhi_driver(drv);
1259 const struct mhi_device_id *id;
1262 * If the device is a controller type then there is no client driver
1263 * associated with it
1265 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
1268 for (id = mhi_drv->id_table; id->chan[0]; id++)
1269 if (!strcmp(mhi_dev->chan_name, id->chan)) {
1277 struct bus_type mhi_bus_type = {
1281 .uevent = mhi_uevent,
1284 static int __init mhi_init(void)
1286 return bus_register(&mhi_bus_type);
1289 static void __exit mhi_exit(void)
1291 bus_unregister(&mhi_bus_type);
1294 postcore_initcall(mhi_init);
1295 module_exit(mhi_exit);
1297 MODULE_LICENSE("GPL v2");
1298 MODULE_DESCRIPTION("MHI Host Interface");