bus: mhi: core: remove redundant initialization of variables state and ee
[linux-2.6-microblaze.git] / drivers / bus / mhi / core / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include "internal.h"
17
18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
19                               void __iomem *base, u32 offset, u32 *out)
20 {
21         return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
22 }
23
24 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
25                                     void __iomem *base, u32 offset,
26                                     u32 mask, u32 shift, u32 *out)
27 {
28         u32 tmp;
29         int ret;
30
31         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
32         if (ret)
33                 return ret;
34
35         *out = (tmp & mask) >> shift;
36
37         return 0;
38 }
39
40 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
41                    u32 offset, u32 val)
42 {
43         mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
44 }
45
46 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
47                          u32 offset, u32 mask, u32 shift, u32 val)
48 {
49         int ret;
50         u32 tmp;
51
52         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
53         if (ret)
54                 return;
55
56         tmp &= ~mask;
57         tmp |= (val << shift);
58         mhi_write_reg(mhi_cntrl, base, offset, tmp);
59 }
60
61 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
62                   dma_addr_t db_val)
63 {
64         mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
65         mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
66 }
67
68 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
69                      struct db_cfg *db_cfg,
70                      void __iomem *db_addr,
71                      dma_addr_t db_val)
72 {
73         if (db_cfg->db_mode) {
74                 db_cfg->db_val = db_val;
75                 mhi_write_db(mhi_cntrl, db_addr, db_val);
76                 db_cfg->db_mode = 0;
77         }
78 }
79
80 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
81                              struct db_cfg *db_cfg,
82                              void __iomem *db_addr,
83                              dma_addr_t db_val)
84 {
85         db_cfg->db_val = db_val;
86         mhi_write_db(mhi_cntrl, db_addr, db_val);
87 }
88
89 void mhi_ring_er_db(struct mhi_event *mhi_event)
90 {
91         struct mhi_ring *ring = &mhi_event->ring;
92
93         mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
94                                      ring->db_addr, *ring->ctxt_wp);
95 }
96
97 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
98 {
99         dma_addr_t db;
100         struct mhi_ring *ring = &mhi_cmd->ring;
101
102         db = ring->iommu_base + (ring->wp - ring->base);
103         *ring->ctxt_wp = db;
104         mhi_write_db(mhi_cntrl, ring->db_addr, db);
105 }
106
107 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
108                       struct mhi_chan *mhi_chan)
109 {
110         struct mhi_ring *ring = &mhi_chan->tre_ring;
111         dma_addr_t db;
112
113         db = ring->iommu_base + (ring->wp - ring->base);
114
115         /*
116          * Writes to the new ring element must be visible to the hardware
117          * before letting h/w know there is new element to fetch.
118          */
119         dma_wmb();
120         *ring->ctxt_wp = db;
121
122         mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
123                                     ring->db_addr, db);
124 }
125
126 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
127 {
128         u32 exec;
129         int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
130
131         return (ret) ? MHI_EE_MAX : exec;
132 }
133 EXPORT_SYMBOL_GPL(mhi_get_exec_env);
134
135 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
136 {
137         u32 state;
138         int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
139                                      MHISTATUS_MHISTATE_MASK,
140                                      MHISTATUS_MHISTATE_SHIFT, &state);
141         return ret ? MHI_STATE_MAX : state;
142 }
143 EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
144
145 void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
146 {
147         if (mhi_cntrl->reset) {
148                 mhi_cntrl->reset(mhi_cntrl);
149                 return;
150         }
151
152         /* Generic MHI SoC reset */
153         mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
154                       MHI_SOC_RESET_REQ);
155 }
156 EXPORT_SYMBOL_GPL(mhi_soc_reset);
157
158 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
159                          struct mhi_buf_info *buf_info)
160 {
161         buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
162                                           buf_info->v_addr, buf_info->len,
163                                           buf_info->dir);
164         if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
165                 return -ENOMEM;
166
167         return 0;
168 }
169
170 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
171                           struct mhi_buf_info *buf_info)
172 {
173         void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
174                                        &buf_info->p_addr, GFP_ATOMIC);
175
176         if (!buf)
177                 return -ENOMEM;
178
179         if (buf_info->dir == DMA_TO_DEVICE)
180                 memcpy(buf, buf_info->v_addr, buf_info->len);
181
182         buf_info->bb_addr = buf;
183
184         return 0;
185 }
186
187 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
188                             struct mhi_buf_info *buf_info)
189 {
190         dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
191                          buf_info->dir);
192 }
193
194 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
195                              struct mhi_buf_info *buf_info)
196 {
197         if (buf_info->dir == DMA_FROM_DEVICE)
198                 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
199
200         mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
201                           buf_info->p_addr);
202 }
203
204 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
205                                       struct mhi_ring *ring)
206 {
207         int nr_el;
208
209         if (ring->wp < ring->rp) {
210                 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
211         } else {
212                 nr_el = (ring->rp - ring->base) / ring->el_size;
213                 nr_el += ((ring->base + ring->len - ring->wp) /
214                           ring->el_size) - 1;
215         }
216
217         return nr_el;
218 }
219
220 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
221 {
222         return (addr - ring->iommu_base) + ring->base;
223 }
224
225 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
226                                  struct mhi_ring *ring)
227 {
228         ring->wp += ring->el_size;
229         if (ring->wp >= (ring->base + ring->len))
230                 ring->wp = ring->base;
231         /* smp update */
232         smp_wmb();
233 }
234
235 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
236                                  struct mhi_ring *ring)
237 {
238         ring->rp += ring->el_size;
239         if (ring->rp >= (ring->base + ring->len))
240                 ring->rp = ring->base;
241         /* smp update */
242         smp_wmb();
243 }
244
245 int mhi_destroy_device(struct device *dev, void *data)
246 {
247         struct mhi_chan *ul_chan, *dl_chan;
248         struct mhi_device *mhi_dev;
249         struct mhi_controller *mhi_cntrl;
250         enum mhi_ee_type ee = MHI_EE_MAX;
251
252         if (dev->bus != &mhi_bus_type)
253                 return 0;
254
255         mhi_dev = to_mhi_device(dev);
256         mhi_cntrl = mhi_dev->mhi_cntrl;
257
258         /* Only destroy virtual devices thats attached to bus */
259         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
260                 return 0;
261
262         ul_chan = mhi_dev->ul_chan;
263         dl_chan = mhi_dev->dl_chan;
264
265         /*
266          * If execution environment is specified, remove only those devices that
267          * started in them based on ee_mask for the channels as we move on to a
268          * different execution environment
269          */
270         if (data)
271                 ee = *(enum mhi_ee_type *)data;
272
273         /*
274          * For the suspend and resume case, this function will get called
275          * without mhi_unregister_controller(). Hence, we need to drop the
276          * references to mhi_dev created for ul and dl channels. We can
277          * be sure that there will be no instances of mhi_dev left after
278          * this.
279          */
280         if (ul_chan) {
281                 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee)))
282                         return 0;
283
284                 put_device(&ul_chan->mhi_dev->dev);
285         }
286
287         if (dl_chan) {
288                 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee)))
289                         return 0;
290
291                 put_device(&dl_chan->mhi_dev->dev);
292         }
293
294         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
295                  mhi_dev->name);
296
297         /* Notify the client and remove the device from MHI bus */
298         device_del(dev);
299         put_device(dev);
300
301         return 0;
302 }
303
304 int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
305                                 enum dma_data_direction dir)
306 {
307         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
308         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
309                 mhi_dev->ul_chan : mhi_dev->dl_chan;
310         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
311
312         return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
313 }
314 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
315
316 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
317 {
318         struct mhi_driver *mhi_drv;
319
320         if (!mhi_dev->dev.driver)
321                 return;
322
323         mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
324
325         if (mhi_drv->status_cb)
326                 mhi_drv->status_cb(mhi_dev, cb_reason);
327 }
328 EXPORT_SYMBOL_GPL(mhi_notify);
329
330 /* Bind MHI channels to MHI devices */
331 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
332 {
333         struct mhi_chan *mhi_chan;
334         struct mhi_device *mhi_dev;
335         struct device *dev = &mhi_cntrl->mhi_dev->dev;
336         int i, ret;
337
338         mhi_chan = mhi_cntrl->mhi_chan;
339         for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
340                 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
341                     !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
342                         continue;
343                 mhi_dev = mhi_alloc_device(mhi_cntrl);
344                 if (IS_ERR(mhi_dev))
345                         return;
346
347                 mhi_dev->dev_type = MHI_DEVICE_XFER;
348                 switch (mhi_chan->dir) {
349                 case DMA_TO_DEVICE:
350                         mhi_dev->ul_chan = mhi_chan;
351                         mhi_dev->ul_chan_id = mhi_chan->chan;
352                         break;
353                 case DMA_FROM_DEVICE:
354                         /* We use dl_chan as offload channels */
355                         mhi_dev->dl_chan = mhi_chan;
356                         mhi_dev->dl_chan_id = mhi_chan->chan;
357                         break;
358                 default:
359                         dev_err(dev, "Direction not supported\n");
360                         put_device(&mhi_dev->dev);
361                         return;
362                 }
363
364                 get_device(&mhi_dev->dev);
365                 mhi_chan->mhi_dev = mhi_dev;
366
367                 /* Check next channel if it matches */
368                 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
369                         if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
370                                 i++;
371                                 mhi_chan++;
372                                 if (mhi_chan->dir == DMA_TO_DEVICE) {
373                                         mhi_dev->ul_chan = mhi_chan;
374                                         mhi_dev->ul_chan_id = mhi_chan->chan;
375                                 } else {
376                                         mhi_dev->dl_chan = mhi_chan;
377                                         mhi_dev->dl_chan_id = mhi_chan->chan;
378                                 }
379                                 get_device(&mhi_dev->dev);
380                                 mhi_chan->mhi_dev = mhi_dev;
381                         }
382                 }
383
384                 /* Channel name is same for both UL and DL */
385                 mhi_dev->name = mhi_chan->name;
386                 dev_set_name(&mhi_dev->dev, "%s_%s",
387                              dev_name(&mhi_cntrl->mhi_dev->dev),
388                              mhi_dev->name);
389
390                 /* Init wakeup source if available */
391                 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
392                         device_init_wakeup(&mhi_dev->dev, true);
393
394                 ret = device_add(&mhi_dev->dev);
395                 if (ret)
396                         put_device(&mhi_dev->dev);
397         }
398 }
399
400 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
401 {
402         struct mhi_event *mhi_event = dev;
403         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
404         struct mhi_event_ctxt *er_ctxt =
405                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
406         struct mhi_ring *ev_ring = &mhi_event->ring;
407         void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
408
409         /* Only proceed if event ring has pending events */
410         if (ev_ring->rp == dev_rp)
411                 return IRQ_HANDLED;
412
413         /* For client managed event ring, notify pending data */
414         if (mhi_event->cl_manage) {
415                 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
416                 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
417
418                 if (mhi_dev)
419                         mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
420         } else {
421                 tasklet_schedule(&mhi_event->task);
422         }
423
424         return IRQ_HANDLED;
425 }
426
427 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
428 {
429         struct mhi_controller *mhi_cntrl = priv;
430         struct device *dev = &mhi_cntrl->mhi_dev->dev;
431         enum mhi_state state;
432         enum mhi_pm_state pm_state = 0;
433         enum mhi_ee_type ee;
434
435         write_lock_irq(&mhi_cntrl->pm_lock);
436         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
437                 write_unlock_irq(&mhi_cntrl->pm_lock);
438                 goto exit_intvec;
439         }
440
441         state = mhi_get_mhi_state(mhi_cntrl);
442         ee = mhi_get_exec_env(mhi_cntrl);
443         dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
444                 TO_MHI_EXEC_STR(mhi_cntrl->ee),
445                 TO_MHI_STATE_STR(mhi_cntrl->dev_state),
446                 TO_MHI_EXEC_STR(ee), TO_MHI_STATE_STR(state));
447
448         if (state == MHI_STATE_SYS_ERR) {
449                 dev_dbg(dev, "System error detected\n");
450                 pm_state = mhi_tryset_pm_state(mhi_cntrl,
451                                                MHI_PM_SYS_ERR_DETECT);
452         }
453         write_unlock_irq(&mhi_cntrl->pm_lock);
454
455         if (pm_state != MHI_PM_SYS_ERR_DETECT || ee == mhi_cntrl->ee)
456                 goto exit_intvec;
457
458         switch (ee) {
459         case MHI_EE_RDDM:
460                 /* proceed if power down is not already in progress */
461                 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
462                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
463                         mhi_cntrl->ee = ee;
464                         wake_up_all(&mhi_cntrl->state_event);
465                 }
466                 break;
467         case MHI_EE_PBL:
468         case MHI_EE_EDL:
469         case MHI_EE_PTHRU:
470                 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
471                 mhi_cntrl->ee = ee;
472                 wake_up_all(&mhi_cntrl->state_event);
473                 mhi_pm_sys_err_handler(mhi_cntrl);
474                 break;
475         default:
476                 wake_up_all(&mhi_cntrl->state_event);
477                 mhi_pm_sys_err_handler(mhi_cntrl);
478                 break;
479         }
480
481 exit_intvec:
482
483         return IRQ_HANDLED;
484 }
485
486 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
487 {
488         struct mhi_controller *mhi_cntrl = dev;
489
490         /* Wake up events waiting for state change */
491         wake_up_all(&mhi_cntrl->state_event);
492
493         return IRQ_WAKE_THREAD;
494 }
495
496 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
497                                         struct mhi_ring *ring)
498 {
499         dma_addr_t ctxt_wp;
500
501         /* Update the WP */
502         ring->wp += ring->el_size;
503         ctxt_wp = *ring->ctxt_wp + ring->el_size;
504
505         if (ring->wp >= (ring->base + ring->len)) {
506                 ring->wp = ring->base;
507                 ctxt_wp = ring->iommu_base;
508         }
509
510         *ring->ctxt_wp = ctxt_wp;
511
512         /* Update the RP */
513         ring->rp += ring->el_size;
514         if (ring->rp >= (ring->base + ring->len))
515                 ring->rp = ring->base;
516
517         /* Update to all cores */
518         smp_wmb();
519 }
520
521 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
522                             struct mhi_tre *event,
523                             struct mhi_chan *mhi_chan)
524 {
525         struct mhi_ring *buf_ring, *tre_ring;
526         struct device *dev = &mhi_cntrl->mhi_dev->dev;
527         struct mhi_result result;
528         unsigned long flags = 0;
529         u32 ev_code;
530
531         ev_code = MHI_TRE_GET_EV_CODE(event);
532         buf_ring = &mhi_chan->buf_ring;
533         tre_ring = &mhi_chan->tre_ring;
534
535         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
536                 -EOVERFLOW : 0;
537
538         /*
539          * If it's a DB Event then we need to grab the lock
540          * with preemption disabled and as a write because we
541          * have to update db register and there are chances that
542          * another thread could be doing the same.
543          */
544         if (ev_code >= MHI_EV_CC_OOB)
545                 write_lock_irqsave(&mhi_chan->lock, flags);
546         else
547                 read_lock_bh(&mhi_chan->lock);
548
549         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
550                 goto end_process_tx_event;
551
552         switch (ev_code) {
553         case MHI_EV_CC_OVERFLOW:
554         case MHI_EV_CC_EOB:
555         case MHI_EV_CC_EOT:
556         {
557                 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
558                 struct mhi_tre *local_rp, *ev_tre;
559                 void *dev_rp;
560                 struct mhi_buf_info *buf_info;
561                 u16 xfer_len;
562
563                 /* Get the TRB this event points to */
564                 ev_tre = mhi_to_virtual(tre_ring, ptr);
565
566                 dev_rp = ev_tre + 1;
567                 if (dev_rp >= (tre_ring->base + tre_ring->len))
568                         dev_rp = tre_ring->base;
569
570                 result.dir = mhi_chan->dir;
571
572                 local_rp = tre_ring->rp;
573                 while (local_rp != dev_rp) {
574                         buf_info = buf_ring->rp;
575                         /* If it's the last TRE, get length from the event */
576                         if (local_rp == ev_tre)
577                                 xfer_len = MHI_TRE_GET_EV_LEN(event);
578                         else
579                                 xfer_len = buf_info->len;
580
581                         /* Unmap if it's not pre-mapped by client */
582                         if (likely(!buf_info->pre_mapped))
583                                 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
584
585                         result.buf_addr = buf_info->cb_buf;
586
587                         /* truncate to buf len if xfer_len is larger */
588                         result.bytes_xferd =
589                                 min_t(u16, xfer_len, buf_info->len);
590                         mhi_del_ring_element(mhi_cntrl, buf_ring);
591                         mhi_del_ring_element(mhi_cntrl, tre_ring);
592                         local_rp = tre_ring->rp;
593
594                         /* notify client */
595                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
596
597                         if (mhi_chan->dir == DMA_TO_DEVICE)
598                                 atomic_dec(&mhi_cntrl->pending_pkts);
599
600                         /*
601                          * Recycle the buffer if buffer is pre-allocated,
602                          * if there is an error, not much we can do apart
603                          * from dropping the packet
604                          */
605                         if (mhi_chan->pre_alloc) {
606                                 if (mhi_queue_buf(mhi_chan->mhi_dev,
607                                                   mhi_chan->dir,
608                                                   buf_info->cb_buf,
609                                                   buf_info->len, MHI_EOT)) {
610                                         dev_err(dev,
611                                                 "Error recycling buffer for chan:%d\n",
612                                                 mhi_chan->chan);
613                                         kfree(buf_info->cb_buf);
614                                 }
615                         }
616                 }
617                 break;
618         } /* CC_EOT */
619         case MHI_EV_CC_OOB:
620         case MHI_EV_CC_DB_MODE:
621         {
622                 unsigned long flags;
623
624                 mhi_chan->db_cfg.db_mode = 1;
625                 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
626                 if (tre_ring->wp != tre_ring->rp &&
627                     MHI_DB_ACCESS_VALID(mhi_cntrl)) {
628                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
629                 }
630                 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
631                 break;
632         }
633         case MHI_EV_CC_BAD_TRE:
634         default:
635                 dev_err(dev, "Unknown event 0x%x\n", ev_code);
636                 break;
637         } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
638
639 end_process_tx_event:
640         if (ev_code >= MHI_EV_CC_OOB)
641                 write_unlock_irqrestore(&mhi_chan->lock, flags);
642         else
643                 read_unlock_bh(&mhi_chan->lock);
644
645         return 0;
646 }
647
648 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
649                            struct mhi_tre *event,
650                            struct mhi_chan *mhi_chan)
651 {
652         struct mhi_ring *buf_ring, *tre_ring;
653         struct mhi_buf_info *buf_info;
654         struct mhi_result result;
655         int ev_code;
656         u32 cookie; /* offset to local descriptor */
657         u16 xfer_len;
658
659         buf_ring = &mhi_chan->buf_ring;
660         tre_ring = &mhi_chan->tre_ring;
661
662         ev_code = MHI_TRE_GET_EV_CODE(event);
663         cookie = MHI_TRE_GET_EV_COOKIE(event);
664         xfer_len = MHI_TRE_GET_EV_LEN(event);
665
666         /* Received out of bound cookie */
667         WARN_ON(cookie >= buf_ring->len);
668
669         buf_info = buf_ring->base + cookie;
670
671         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
672                 -EOVERFLOW : 0;
673
674         /* truncate to buf len if xfer_len is larger */
675         result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
676         result.buf_addr = buf_info->cb_buf;
677         result.dir = mhi_chan->dir;
678
679         read_lock_bh(&mhi_chan->lock);
680
681         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
682                 goto end_process_rsc_event;
683
684         WARN_ON(!buf_info->used);
685
686         /* notify the client */
687         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
688
689         /*
690          * Note: We're arbitrarily incrementing RP even though, completion
691          * packet we processed might not be the same one, reason we can do this
692          * is because device guaranteed to cache descriptors in order it
693          * receive, so even though completion event is different we can re-use
694          * all descriptors in between.
695          * Example:
696          * Transfer Ring has descriptors: A, B, C, D
697          * Last descriptor host queue is D (WP) and first descriptor
698          * host queue is A (RP).
699          * The completion event we just serviced is descriptor C.
700          * Then we can safely queue descriptors to replace A, B, and C
701          * even though host did not receive any completions.
702          */
703         mhi_del_ring_element(mhi_cntrl, tre_ring);
704         buf_info->used = false;
705
706 end_process_rsc_event:
707         read_unlock_bh(&mhi_chan->lock);
708
709         return 0;
710 }
711
712 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
713                                        struct mhi_tre *tre)
714 {
715         dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
716         struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
717         struct mhi_ring *mhi_ring = &cmd_ring->ring;
718         struct mhi_tre *cmd_pkt;
719         struct mhi_chan *mhi_chan;
720         u32 chan;
721
722         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
723
724         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
725         mhi_chan = &mhi_cntrl->mhi_chan[chan];
726         write_lock_bh(&mhi_chan->lock);
727         mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
728         complete(&mhi_chan->completion);
729         write_unlock_bh(&mhi_chan->lock);
730
731         mhi_del_ring_element(mhi_cntrl, mhi_ring);
732 }
733
734 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
735                              struct mhi_event *mhi_event,
736                              u32 event_quota)
737 {
738         struct mhi_tre *dev_rp, *local_rp;
739         struct mhi_ring *ev_ring = &mhi_event->ring;
740         struct mhi_event_ctxt *er_ctxt =
741                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
742         struct mhi_chan *mhi_chan;
743         struct device *dev = &mhi_cntrl->mhi_dev->dev;
744         u32 chan;
745         int count = 0;
746
747         /*
748          * This is a quick check to avoid unnecessary event processing
749          * in case MHI is already in error state, but it's still possible
750          * to transition to error state while processing events
751          */
752         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
753                 return -EIO;
754
755         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
756         local_rp = ev_ring->rp;
757
758         while (dev_rp != local_rp) {
759                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
760
761                 switch (type) {
762                 case MHI_PKT_TYPE_BW_REQ_EVENT:
763                 {
764                         struct mhi_link_info *link_info;
765
766                         link_info = &mhi_cntrl->mhi_link_info;
767                         write_lock_irq(&mhi_cntrl->pm_lock);
768                         link_info->target_link_speed =
769                                 MHI_TRE_GET_EV_LINKSPEED(local_rp);
770                         link_info->target_link_width =
771                                 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
772                         write_unlock_irq(&mhi_cntrl->pm_lock);
773                         dev_dbg(dev, "Received BW_REQ event\n");
774                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
775                         break;
776                 }
777                 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
778                 {
779                         enum mhi_state new_state;
780
781                         new_state = MHI_TRE_GET_EV_STATE(local_rp);
782
783                         dev_dbg(dev, "State change event to state: %s\n",
784                                 TO_MHI_STATE_STR(new_state));
785
786                         switch (new_state) {
787                         case MHI_STATE_M0:
788                                 mhi_pm_m0_transition(mhi_cntrl);
789                                 break;
790                         case MHI_STATE_M1:
791                                 mhi_pm_m1_transition(mhi_cntrl);
792                                 break;
793                         case MHI_STATE_M3:
794                                 mhi_pm_m3_transition(mhi_cntrl);
795                                 break;
796                         case MHI_STATE_SYS_ERR:
797                         {
798                                 enum mhi_pm_state new_state;
799
800                                 dev_dbg(dev, "System error detected\n");
801                                 write_lock_irq(&mhi_cntrl->pm_lock);
802                                 new_state = mhi_tryset_pm_state(mhi_cntrl,
803                                                         MHI_PM_SYS_ERR_DETECT);
804                                 write_unlock_irq(&mhi_cntrl->pm_lock);
805                                 if (new_state == MHI_PM_SYS_ERR_DETECT)
806                                         mhi_pm_sys_err_handler(mhi_cntrl);
807                                 break;
808                         }
809                         default:
810                                 dev_err(dev, "Invalid state: %s\n",
811                                         TO_MHI_STATE_STR(new_state));
812                         }
813
814                         break;
815                 }
816                 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
817                         mhi_process_cmd_completion(mhi_cntrl, local_rp);
818                         break;
819                 case MHI_PKT_TYPE_EE_EVENT:
820                 {
821                         enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
822                         enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
823
824                         dev_dbg(dev, "Received EE event: %s\n",
825                                 TO_MHI_EXEC_STR(event));
826                         switch (event) {
827                         case MHI_EE_SBL:
828                                 st = DEV_ST_TRANSITION_SBL;
829                                 break;
830                         case MHI_EE_WFW:
831                         case MHI_EE_AMSS:
832                                 st = DEV_ST_TRANSITION_MISSION_MODE;
833                                 break;
834                         case MHI_EE_RDDM:
835                                 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
836                                 write_lock_irq(&mhi_cntrl->pm_lock);
837                                 mhi_cntrl->ee = event;
838                                 write_unlock_irq(&mhi_cntrl->pm_lock);
839                                 wake_up_all(&mhi_cntrl->state_event);
840                                 break;
841                         default:
842                                 dev_err(dev,
843                                         "Unhandled EE event: 0x%x\n", type);
844                         }
845                         if (st != DEV_ST_TRANSITION_MAX)
846                                 mhi_queue_state_transition(mhi_cntrl, st);
847
848                         break;
849                 }
850                 case MHI_PKT_TYPE_TX_EVENT:
851                         chan = MHI_TRE_GET_EV_CHID(local_rp);
852
853                         WARN_ON(chan >= mhi_cntrl->max_chan);
854
855                         /*
856                          * Only process the event ring elements whose channel
857                          * ID is within the maximum supported range.
858                          */
859                         if (chan < mhi_cntrl->max_chan) {
860                                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
861                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
862                                 event_quota--;
863                         }
864                         break;
865                 default:
866                         dev_err(dev, "Unhandled event type: %d\n", type);
867                         break;
868                 }
869
870                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
871                 local_rp = ev_ring->rp;
872                 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
873                 count++;
874         }
875
876         read_lock_bh(&mhi_cntrl->pm_lock);
877         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
878                 mhi_ring_er_db(mhi_event);
879         read_unlock_bh(&mhi_cntrl->pm_lock);
880
881         return count;
882 }
883
884 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
885                                 struct mhi_event *mhi_event,
886                                 u32 event_quota)
887 {
888         struct mhi_tre *dev_rp, *local_rp;
889         struct mhi_ring *ev_ring = &mhi_event->ring;
890         struct mhi_event_ctxt *er_ctxt =
891                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
892         int count = 0;
893         u32 chan;
894         struct mhi_chan *mhi_chan;
895
896         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
897                 return -EIO;
898
899         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
900         local_rp = ev_ring->rp;
901
902         while (dev_rp != local_rp && event_quota > 0) {
903                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
904
905                 chan = MHI_TRE_GET_EV_CHID(local_rp);
906
907                 WARN_ON(chan >= mhi_cntrl->max_chan);
908
909                 /*
910                  * Only process the event ring elements whose channel
911                  * ID is within the maximum supported range.
912                  */
913                 if (chan < mhi_cntrl->max_chan) {
914                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
915
916                         if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
917                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
918                                 event_quota--;
919                         } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
920                                 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
921                                 event_quota--;
922                         }
923                 }
924
925                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
926                 local_rp = ev_ring->rp;
927                 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
928                 count++;
929         }
930         read_lock_bh(&mhi_cntrl->pm_lock);
931         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
932                 mhi_ring_er_db(mhi_event);
933         read_unlock_bh(&mhi_cntrl->pm_lock);
934
935         return count;
936 }
937
938 void mhi_ev_task(unsigned long data)
939 {
940         struct mhi_event *mhi_event = (struct mhi_event *)data;
941         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
942
943         /* process all pending events */
944         spin_lock_bh(&mhi_event->lock);
945         mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
946         spin_unlock_bh(&mhi_event->lock);
947 }
948
949 void mhi_ctrl_ev_task(unsigned long data)
950 {
951         struct mhi_event *mhi_event = (struct mhi_event *)data;
952         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
953         struct device *dev = &mhi_cntrl->mhi_dev->dev;
954         enum mhi_state state;
955         enum mhi_pm_state pm_state = 0;
956         int ret;
957
958         /*
959          * We can check PM state w/o a lock here because there is no way
960          * PM state can change from reg access valid to no access while this
961          * thread being executed.
962          */
963         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
964                 /*
965                  * We may have a pending event but not allowed to
966                  * process it since we are probably in a suspended state,
967                  * so trigger a resume.
968                  */
969                 mhi_trigger_resume(mhi_cntrl);
970
971                 return;
972         }
973
974         /* Process ctrl events events */
975         ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
976
977         /*
978          * We received an IRQ but no events to process, maybe device went to
979          * SYS_ERR state? Check the state to confirm.
980          */
981         if (!ret) {
982                 write_lock_irq(&mhi_cntrl->pm_lock);
983                 state = mhi_get_mhi_state(mhi_cntrl);
984                 if (state == MHI_STATE_SYS_ERR) {
985                         dev_dbg(dev, "System error detected\n");
986                         pm_state = mhi_tryset_pm_state(mhi_cntrl,
987                                                        MHI_PM_SYS_ERR_DETECT);
988                 }
989                 write_unlock_irq(&mhi_cntrl->pm_lock);
990                 if (pm_state == MHI_PM_SYS_ERR_DETECT)
991                         mhi_pm_sys_err_handler(mhi_cntrl);
992         }
993 }
994
995 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
996                              struct mhi_ring *ring)
997 {
998         void *tmp = ring->wp + ring->el_size;
999
1000         if (tmp >= (ring->base + ring->len))
1001                 tmp = ring->base;
1002
1003         return (tmp == ring->rp);
1004 }
1005
1006 static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
1007                      enum dma_data_direction dir, enum mhi_flags mflags)
1008 {
1009         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1010         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1011                                                              mhi_dev->dl_chan;
1012         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1013         unsigned long flags;
1014         int ret;
1015
1016         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1017                 return -EIO;
1018
1019         read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1020
1021         ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
1022         if (unlikely(ret)) {
1023                 ret = -ENOMEM;
1024                 goto exit_unlock;
1025         }
1026
1027         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
1028         if (unlikely(ret))
1029                 goto exit_unlock;
1030
1031         /* trigger M3 exit if necessary */
1032         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1033                 mhi_trigger_resume(mhi_cntrl);
1034
1035         /* Assert dev_wake (to exit/prevent M1/M2)*/
1036         mhi_cntrl->wake_toggle(mhi_cntrl);
1037
1038         if (mhi_chan->dir == DMA_TO_DEVICE)
1039                 atomic_inc(&mhi_cntrl->pending_pkts);
1040
1041         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1042                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1043
1044 exit_unlock:
1045         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1046
1047         return ret;
1048 }
1049
1050 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1051                   struct sk_buff *skb, size_t len, enum mhi_flags mflags)
1052 {
1053         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1054                                                              mhi_dev->dl_chan;
1055         struct mhi_buf_info buf_info = { };
1056
1057         buf_info.v_addr = skb->data;
1058         buf_info.cb_buf = skb;
1059         buf_info.len = len;
1060
1061         if (unlikely(mhi_chan->pre_alloc))
1062                 return -EINVAL;
1063
1064         return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1065 }
1066 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1067
1068 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1069                   struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1070 {
1071         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1072                                                              mhi_dev->dl_chan;
1073         struct mhi_buf_info buf_info = { };
1074
1075         buf_info.p_addr = mhi_buf->dma_addr;
1076         buf_info.cb_buf = mhi_buf;
1077         buf_info.pre_mapped = true;
1078         buf_info.len = len;
1079
1080         if (unlikely(mhi_chan->pre_alloc))
1081                 return -EINVAL;
1082
1083         return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1084 }
1085 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1086
1087 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1088                         struct mhi_buf_info *info, enum mhi_flags flags)
1089 {
1090         struct mhi_ring *buf_ring, *tre_ring;
1091         struct mhi_tre *mhi_tre;
1092         struct mhi_buf_info *buf_info;
1093         int eot, eob, chain, bei;
1094         int ret;
1095
1096         buf_ring = &mhi_chan->buf_ring;
1097         tre_ring = &mhi_chan->tre_ring;
1098
1099         buf_info = buf_ring->wp;
1100         WARN_ON(buf_info->used);
1101         buf_info->pre_mapped = info->pre_mapped;
1102         if (info->pre_mapped)
1103                 buf_info->p_addr = info->p_addr;
1104         else
1105                 buf_info->v_addr = info->v_addr;
1106         buf_info->cb_buf = info->cb_buf;
1107         buf_info->wp = tre_ring->wp;
1108         buf_info->dir = mhi_chan->dir;
1109         buf_info->len = info->len;
1110
1111         if (!info->pre_mapped) {
1112                 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1113                 if (ret)
1114                         return ret;
1115         }
1116
1117         eob = !!(flags & MHI_EOB);
1118         eot = !!(flags & MHI_EOT);
1119         chain = !!(flags & MHI_CHAIN);
1120         bei = !!(mhi_chan->intmod);
1121
1122         mhi_tre = tre_ring->wp;
1123         mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1124         mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1125         mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1126
1127         /* increment WP */
1128         mhi_add_ring_element(mhi_cntrl, tre_ring);
1129         mhi_add_ring_element(mhi_cntrl, buf_ring);
1130
1131         return 0;
1132 }
1133
1134 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1135                   void *buf, size_t len, enum mhi_flags mflags)
1136 {
1137         struct mhi_buf_info buf_info = { };
1138
1139         buf_info.v_addr = buf;
1140         buf_info.cb_buf = buf;
1141         buf_info.len = len;
1142
1143         return mhi_queue(mhi_dev, &buf_info, dir, mflags);
1144 }
1145 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1146
1147 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1148 {
1149         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1150         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1151                                         mhi_dev->ul_chan : mhi_dev->dl_chan;
1152         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1153
1154         return mhi_is_ring_full(mhi_cntrl, tre_ring);
1155 }
1156 EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1157
1158 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1159                  struct mhi_chan *mhi_chan,
1160                  enum mhi_cmd_type cmd)
1161 {
1162         struct mhi_tre *cmd_tre = NULL;
1163         struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1164         struct mhi_ring *ring = &mhi_cmd->ring;
1165         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1166         int chan = 0;
1167
1168         if (mhi_chan)
1169                 chan = mhi_chan->chan;
1170
1171         spin_lock_bh(&mhi_cmd->lock);
1172         if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1173                 spin_unlock_bh(&mhi_cmd->lock);
1174                 return -ENOMEM;
1175         }
1176
1177         /* prepare the cmd tre */
1178         cmd_tre = ring->wp;
1179         switch (cmd) {
1180         case MHI_CMD_RESET_CHAN:
1181                 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1182                 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1183                 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1184                 break;
1185         case MHI_CMD_START_CHAN:
1186                 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1187                 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1188                 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1189                 break;
1190         default:
1191                 dev_err(dev, "Command not supported\n");
1192                 break;
1193         }
1194
1195         /* queue to hardware */
1196         mhi_add_ring_element(mhi_cntrl, ring);
1197         read_lock_bh(&mhi_cntrl->pm_lock);
1198         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1199                 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1200         read_unlock_bh(&mhi_cntrl->pm_lock);
1201         spin_unlock_bh(&mhi_cmd->lock);
1202
1203         return 0;
1204 }
1205
1206 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1207                                     struct mhi_chan *mhi_chan)
1208 {
1209         int ret;
1210         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1211
1212         dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1213
1214         /* no more processing events for this channel */
1215         mutex_lock(&mhi_chan->mutex);
1216         write_lock_irq(&mhi_chan->lock);
1217         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1218             mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1219                 write_unlock_irq(&mhi_chan->lock);
1220                 mutex_unlock(&mhi_chan->mutex);
1221                 return;
1222         }
1223
1224         mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1225         write_unlock_irq(&mhi_chan->lock);
1226
1227         reinit_completion(&mhi_chan->completion);
1228         read_lock_bh(&mhi_cntrl->pm_lock);
1229         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1230                 read_unlock_bh(&mhi_cntrl->pm_lock);
1231                 goto error_invalid_state;
1232         }
1233
1234         mhi_cntrl->wake_toggle(mhi_cntrl);
1235         read_unlock_bh(&mhi_cntrl->pm_lock);
1236
1237         mhi_cntrl->runtime_get(mhi_cntrl);
1238         mhi_cntrl->runtime_put(mhi_cntrl);
1239         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1240         if (ret)
1241                 goto error_invalid_state;
1242
1243         /* even if it fails we will still reset */
1244         ret = wait_for_completion_timeout(&mhi_chan->completion,
1245                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1246         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1247                 dev_err(dev,
1248                         "Failed to receive cmd completion, still resetting\n");
1249
1250 error_invalid_state:
1251         if (!mhi_chan->offload_ch) {
1252                 mhi_reset_chan(mhi_cntrl, mhi_chan);
1253                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1254         }
1255         dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1256         mutex_unlock(&mhi_chan->mutex);
1257 }
1258
1259 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1260                         struct mhi_chan *mhi_chan)
1261 {
1262         int ret = 0;
1263         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1264
1265         dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1266
1267         if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1268                 dev_err(dev,
1269                         "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1270                         TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1271                         mhi_chan->name);
1272                 return -ENOTCONN;
1273         }
1274
1275         mutex_lock(&mhi_chan->mutex);
1276
1277         /* If channel is not in disable state, do not allow it to start */
1278         if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1279                 ret = -EIO;
1280                 dev_dbg(dev, "channel: %d is not in disabled state\n",
1281                         mhi_chan->chan);
1282                 goto error_init_chan;
1283         }
1284
1285         /* Check of client manages channel context for offload channels */
1286         if (!mhi_chan->offload_ch) {
1287                 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1288                 if (ret)
1289                         goto error_init_chan;
1290         }
1291
1292         reinit_completion(&mhi_chan->completion);
1293         read_lock_bh(&mhi_cntrl->pm_lock);
1294         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1295                 read_unlock_bh(&mhi_cntrl->pm_lock);
1296                 ret = -EIO;
1297                 goto error_pm_state;
1298         }
1299
1300         mhi_cntrl->wake_toggle(mhi_cntrl);
1301         read_unlock_bh(&mhi_cntrl->pm_lock);
1302         mhi_cntrl->runtime_get(mhi_cntrl);
1303         mhi_cntrl->runtime_put(mhi_cntrl);
1304
1305         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1306         if (ret)
1307                 goto error_pm_state;
1308
1309         ret = wait_for_completion_timeout(&mhi_chan->completion,
1310                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1311         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1312                 ret = -EIO;
1313                 goto error_pm_state;
1314         }
1315
1316         write_lock_irq(&mhi_chan->lock);
1317         mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1318         write_unlock_irq(&mhi_chan->lock);
1319
1320         /* Pre-allocate buffer for xfer ring */
1321         if (mhi_chan->pre_alloc) {
1322                 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1323                                                        &mhi_chan->tre_ring);
1324                 size_t len = mhi_cntrl->buffer_len;
1325
1326                 while (nr_el--) {
1327                         void *buf;
1328                         struct mhi_buf_info info = { };
1329                         buf = kmalloc(len, GFP_KERNEL);
1330                         if (!buf) {
1331                                 ret = -ENOMEM;
1332                                 goto error_pre_alloc;
1333                         }
1334
1335                         /* Prepare transfer descriptors */
1336                         info.v_addr = buf;
1337                         info.cb_buf = buf;
1338                         info.len = len;
1339                         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1340                         if (ret) {
1341                                 kfree(buf);
1342                                 goto error_pre_alloc;
1343                         }
1344                 }
1345
1346                 read_lock_bh(&mhi_cntrl->pm_lock);
1347                 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1348                         read_lock_irq(&mhi_chan->lock);
1349                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1350                         read_unlock_irq(&mhi_chan->lock);
1351                 }
1352                 read_unlock_bh(&mhi_cntrl->pm_lock);
1353         }
1354
1355         mutex_unlock(&mhi_chan->mutex);
1356
1357         dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1358                 mhi_chan->chan);
1359
1360         return 0;
1361
1362 error_pm_state:
1363         if (!mhi_chan->offload_ch)
1364                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1365
1366 error_init_chan:
1367         mutex_unlock(&mhi_chan->mutex);
1368
1369         return ret;
1370
1371 error_pre_alloc:
1372         mutex_unlock(&mhi_chan->mutex);
1373         __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1374
1375         return ret;
1376 }
1377
1378 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1379                                   struct mhi_event *mhi_event,
1380                                   struct mhi_event_ctxt *er_ctxt,
1381                                   int chan)
1382
1383 {
1384         struct mhi_tre *dev_rp, *local_rp;
1385         struct mhi_ring *ev_ring;
1386         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1387         unsigned long flags;
1388
1389         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1390
1391         ev_ring = &mhi_event->ring;
1392
1393         /* mark all stale events related to channel as STALE event */
1394         spin_lock_irqsave(&mhi_event->lock, flags);
1395         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
1396
1397         local_rp = ev_ring->rp;
1398         while (dev_rp != local_rp) {
1399                 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1400                     chan == MHI_TRE_GET_EV_CHID(local_rp))
1401                         local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1402                                         MHI_PKT_TYPE_STALE_EVENT);
1403                 local_rp++;
1404                 if (local_rp == (ev_ring->base + ev_ring->len))
1405                         local_rp = ev_ring->base;
1406         }
1407
1408         dev_dbg(dev, "Finished marking events as stale events\n");
1409         spin_unlock_irqrestore(&mhi_event->lock, flags);
1410 }
1411
1412 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1413                                 struct mhi_chan *mhi_chan)
1414 {
1415         struct mhi_ring *buf_ring, *tre_ring;
1416         struct mhi_result result;
1417
1418         /* Reset any pending buffers */
1419         buf_ring = &mhi_chan->buf_ring;
1420         tre_ring = &mhi_chan->tre_ring;
1421         result.transaction_status = -ENOTCONN;
1422         result.bytes_xferd = 0;
1423         while (tre_ring->rp != tre_ring->wp) {
1424                 struct mhi_buf_info *buf_info = buf_ring->rp;
1425
1426                 if (mhi_chan->dir == DMA_TO_DEVICE)
1427                         atomic_dec(&mhi_cntrl->pending_pkts);
1428
1429                 if (!buf_info->pre_mapped)
1430                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1431
1432                 mhi_del_ring_element(mhi_cntrl, buf_ring);
1433                 mhi_del_ring_element(mhi_cntrl, tre_ring);
1434
1435                 if (mhi_chan->pre_alloc) {
1436                         kfree(buf_info->cb_buf);
1437                 } else {
1438                         result.buf_addr = buf_info->cb_buf;
1439                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1440                 }
1441         }
1442 }
1443
1444 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1445 {
1446         struct mhi_event *mhi_event;
1447         struct mhi_event_ctxt *er_ctxt;
1448         int chan = mhi_chan->chan;
1449
1450         /* Nothing to reset, client doesn't queue buffers */
1451         if (mhi_chan->offload_ch)
1452                 return;
1453
1454         read_lock_bh(&mhi_cntrl->pm_lock);
1455         mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1456         er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1457
1458         mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1459
1460         mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1461
1462         read_unlock_bh(&mhi_cntrl->pm_lock);
1463 }
1464
1465 /* Move channel to start state */
1466 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1467 {
1468         int ret, dir;
1469         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1470         struct mhi_chan *mhi_chan;
1471
1472         for (dir = 0; dir < 2; dir++) {
1473                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1474                 if (!mhi_chan)
1475                         continue;
1476
1477                 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1478                 if (ret)
1479                         goto error_open_chan;
1480         }
1481
1482         return 0;
1483
1484 error_open_chan:
1485         for (--dir; dir >= 0; dir--) {
1486                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1487                 if (!mhi_chan)
1488                         continue;
1489
1490                 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1491         }
1492
1493         return ret;
1494 }
1495 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1496
1497 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1498 {
1499         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1500         struct mhi_chan *mhi_chan;
1501         int dir;
1502
1503         for (dir = 0; dir < 2; dir++) {
1504                 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1505                 if (!mhi_chan)
1506                         continue;
1507
1508                 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1509         }
1510 }
1511 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1512
1513 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1514 {
1515         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1516         struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1517         struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1518         int ret;
1519
1520         spin_lock_bh(&mhi_event->lock);
1521         ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1522         spin_unlock_bh(&mhi_event->lock);
1523
1524         return ret;
1525 }
1526 EXPORT_SYMBOL_GPL(mhi_poll);