Merge tag 'char-misc-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[linux-2.6-microblaze.git] / drivers / bus / mhi / core / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include "internal.h"
17
18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
19                               void __iomem *base, u32 offset, u32 *out)
20 {
21         return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out);
22 }
23
24 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
25                                     void __iomem *base, u32 offset,
26                                     u32 mask, u32 shift, u32 *out)
27 {
28         u32 tmp;
29         int ret;
30
31         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
32         if (ret)
33                 return ret;
34
35         *out = (tmp & mask) >> shift;
36
37         return 0;
38 }
39
40 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
41                    u32 offset, u32 val)
42 {
43         mhi_cntrl->write_reg(mhi_cntrl, base + offset, val);
44 }
45
46 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
47                          u32 offset, u32 mask, u32 shift, u32 val)
48 {
49         int ret;
50         u32 tmp;
51
52         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
53         if (ret)
54                 return;
55
56         tmp &= ~mask;
57         tmp |= (val << shift);
58         mhi_write_reg(mhi_cntrl, base, offset, tmp);
59 }
60
61 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
62                   dma_addr_t db_val)
63 {
64         mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
65         mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
66 }
67
68 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
69                      struct db_cfg *db_cfg,
70                      void __iomem *db_addr,
71                      dma_addr_t db_val)
72 {
73         if (db_cfg->db_mode) {
74                 db_cfg->db_val = db_val;
75                 mhi_write_db(mhi_cntrl, db_addr, db_val);
76                 db_cfg->db_mode = 0;
77         }
78 }
79
80 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
81                              struct db_cfg *db_cfg,
82                              void __iomem *db_addr,
83                              dma_addr_t db_val)
84 {
85         db_cfg->db_val = db_val;
86         mhi_write_db(mhi_cntrl, db_addr, db_val);
87 }
88
89 void mhi_ring_er_db(struct mhi_event *mhi_event)
90 {
91         struct mhi_ring *ring = &mhi_event->ring;
92
93         mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
94                                      ring->db_addr, *ring->ctxt_wp);
95 }
96
97 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
98 {
99         dma_addr_t db;
100         struct mhi_ring *ring = &mhi_cmd->ring;
101
102         db = ring->iommu_base + (ring->wp - ring->base);
103         *ring->ctxt_wp = db;
104         mhi_write_db(mhi_cntrl, ring->db_addr, db);
105 }
106
107 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
108                       struct mhi_chan *mhi_chan)
109 {
110         struct mhi_ring *ring = &mhi_chan->tre_ring;
111         dma_addr_t db;
112
113         db = ring->iommu_base + (ring->wp - ring->base);
114         *ring->ctxt_wp = db;
115         mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
116                                     ring->db_addr, db);
117 }
118
119 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
120 {
121         u32 exec;
122         int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
123
124         return (ret) ? MHI_EE_MAX : exec;
125 }
126 EXPORT_SYMBOL_GPL(mhi_get_exec_env);
127
128 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
129 {
130         u32 state;
131         int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
132                                      MHISTATUS_MHISTATE_MASK,
133                                      MHISTATUS_MHISTATE_SHIFT, &state);
134         return ret ? MHI_STATE_MAX : state;
135 }
136 EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
137
138 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
139                          struct mhi_buf_info *buf_info)
140 {
141         buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
142                                           buf_info->v_addr, buf_info->len,
143                                           buf_info->dir);
144         if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
145                 return -ENOMEM;
146
147         return 0;
148 }
149
150 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
151                           struct mhi_buf_info *buf_info)
152 {
153         void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
154                                        &buf_info->p_addr, GFP_ATOMIC);
155
156         if (!buf)
157                 return -ENOMEM;
158
159         if (buf_info->dir == DMA_TO_DEVICE)
160                 memcpy(buf, buf_info->v_addr, buf_info->len);
161
162         buf_info->bb_addr = buf;
163
164         return 0;
165 }
166
167 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
168                             struct mhi_buf_info *buf_info)
169 {
170         dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
171                          buf_info->dir);
172 }
173
174 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
175                              struct mhi_buf_info *buf_info)
176 {
177         if (buf_info->dir == DMA_FROM_DEVICE)
178                 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
179
180         mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
181                           buf_info->p_addr);
182 }
183
184 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
185                                       struct mhi_ring *ring)
186 {
187         int nr_el;
188
189         if (ring->wp < ring->rp) {
190                 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
191         } else {
192                 nr_el = (ring->rp - ring->base) / ring->el_size;
193                 nr_el += ((ring->base + ring->len - ring->wp) /
194                           ring->el_size) - 1;
195         }
196
197         return nr_el;
198 }
199
200 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
201 {
202         return (addr - ring->iommu_base) + ring->base;
203 }
204
205 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
206                                  struct mhi_ring *ring)
207 {
208         ring->wp += ring->el_size;
209         if (ring->wp >= (ring->base + ring->len))
210                 ring->wp = ring->base;
211         /* smp update */
212         smp_wmb();
213 }
214
215 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
216                                  struct mhi_ring *ring)
217 {
218         ring->rp += ring->el_size;
219         if (ring->rp >= (ring->base + ring->len))
220                 ring->rp = ring->base;
221         /* smp update */
222         smp_wmb();
223 }
224
225 int mhi_destroy_device(struct device *dev, void *data)
226 {
227         struct mhi_device *mhi_dev;
228         struct mhi_controller *mhi_cntrl;
229
230         if (dev->bus != &mhi_bus_type)
231                 return 0;
232
233         mhi_dev = to_mhi_device(dev);
234         mhi_cntrl = mhi_dev->mhi_cntrl;
235
236         /* Only destroy virtual devices thats attached to bus */
237         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
238                 return 0;
239
240         /*
241          * For the suspend and resume case, this function will get called
242          * without mhi_unregister_controller(). Hence, we need to drop the
243          * references to mhi_dev created for ul and dl channels. We can
244          * be sure that there will be no instances of mhi_dev left after
245          * this.
246          */
247         if (mhi_dev->ul_chan)
248                 put_device(&mhi_dev->ul_chan->mhi_dev->dev);
249
250         if (mhi_dev->dl_chan)
251                 put_device(&mhi_dev->dl_chan->mhi_dev->dev);
252
253         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
254                  mhi_dev->name);
255
256         /* Notify the client and remove the device from MHI bus */
257         device_del(dev);
258         put_device(dev);
259
260         return 0;
261 }
262
263 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
264 {
265         struct mhi_driver *mhi_drv;
266
267         if (!mhi_dev->dev.driver)
268                 return;
269
270         mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
271
272         if (mhi_drv->status_cb)
273                 mhi_drv->status_cb(mhi_dev, cb_reason);
274 }
275 EXPORT_SYMBOL_GPL(mhi_notify);
276
277 /* Bind MHI channels to MHI devices */
278 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
279 {
280         struct mhi_chan *mhi_chan;
281         struct mhi_device *mhi_dev;
282         struct device *dev = &mhi_cntrl->mhi_dev->dev;
283         int i, ret;
284
285         mhi_chan = mhi_cntrl->mhi_chan;
286         for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
287                 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
288                     !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
289                         continue;
290                 mhi_dev = mhi_alloc_device(mhi_cntrl);
291                 if (IS_ERR(mhi_dev))
292                         return;
293
294                 mhi_dev->dev_type = MHI_DEVICE_XFER;
295                 switch (mhi_chan->dir) {
296                 case DMA_TO_DEVICE:
297                         mhi_dev->ul_chan = mhi_chan;
298                         mhi_dev->ul_chan_id = mhi_chan->chan;
299                         break;
300                 case DMA_FROM_DEVICE:
301                         /* We use dl_chan as offload channels */
302                         mhi_dev->dl_chan = mhi_chan;
303                         mhi_dev->dl_chan_id = mhi_chan->chan;
304                         break;
305                 default:
306                         dev_err(dev, "Direction not supported\n");
307                         put_device(&mhi_dev->dev);
308                         return;
309                 }
310
311                 get_device(&mhi_dev->dev);
312                 mhi_chan->mhi_dev = mhi_dev;
313
314                 /* Check next channel if it matches */
315                 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
316                         if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
317                                 i++;
318                                 mhi_chan++;
319                                 if (mhi_chan->dir == DMA_TO_DEVICE) {
320                                         mhi_dev->ul_chan = mhi_chan;
321                                         mhi_dev->ul_chan_id = mhi_chan->chan;
322                                 } else {
323                                         mhi_dev->dl_chan = mhi_chan;
324                                         mhi_dev->dl_chan_id = mhi_chan->chan;
325                                 }
326                                 get_device(&mhi_dev->dev);
327                                 mhi_chan->mhi_dev = mhi_dev;
328                         }
329                 }
330
331                 /* Channel name is same for both UL and DL */
332                 mhi_dev->name = mhi_chan->name;
333                 dev_set_name(&mhi_dev->dev, "%s_%s",
334                              dev_name(&mhi_cntrl->mhi_dev->dev),
335                              mhi_dev->name);
336
337                 /* Init wakeup source if available */
338                 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
339                         device_init_wakeup(&mhi_dev->dev, true);
340
341                 ret = device_add(&mhi_dev->dev);
342                 if (ret)
343                         put_device(&mhi_dev->dev);
344         }
345 }
346
347 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
348 {
349         struct mhi_event *mhi_event = dev;
350         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
351         struct mhi_event_ctxt *er_ctxt =
352                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
353         struct mhi_ring *ev_ring = &mhi_event->ring;
354         void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
355
356         /* Only proceed if event ring has pending events */
357         if (ev_ring->rp == dev_rp)
358                 return IRQ_HANDLED;
359
360         /* For client managed event ring, notify pending data */
361         if (mhi_event->cl_manage) {
362                 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
363                 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
364
365                 if (mhi_dev)
366                         mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
367         } else {
368                 tasklet_schedule(&mhi_event->task);
369         }
370
371         return IRQ_HANDLED;
372 }
373
374 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
375 {
376         struct mhi_controller *mhi_cntrl = priv;
377         struct device *dev = &mhi_cntrl->mhi_dev->dev;
378         enum mhi_state state = MHI_STATE_MAX;
379         enum mhi_pm_state pm_state = 0;
380         enum mhi_ee_type ee = 0;
381
382         write_lock_irq(&mhi_cntrl->pm_lock);
383         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
384                 write_unlock_irq(&mhi_cntrl->pm_lock);
385                 goto exit_intvec;
386         }
387
388         state = mhi_get_mhi_state(mhi_cntrl);
389         ee = mhi_cntrl->ee;
390         mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
391         dev_dbg(dev, "local ee:%s device ee:%s dev_state:%s\n",
392                 TO_MHI_EXEC_STR(mhi_cntrl->ee), TO_MHI_EXEC_STR(ee),
393                 TO_MHI_STATE_STR(state));
394
395         if (state == MHI_STATE_SYS_ERR) {
396                 dev_dbg(dev, "System error detected\n");
397                 pm_state = mhi_tryset_pm_state(mhi_cntrl,
398                                                MHI_PM_SYS_ERR_DETECT);
399         }
400         write_unlock_irq(&mhi_cntrl->pm_lock);
401
402          /* If device supports RDDM don't bother processing SYS error */
403         if (mhi_cntrl->rddm_image) {
404                 /* host may be performing a device power down already */
405                 if (!mhi_is_active(mhi_cntrl))
406                         goto exit_intvec;
407
408                 if (mhi_cntrl->ee == MHI_EE_RDDM && mhi_cntrl->ee != ee) {
409                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
410                         wake_up_all(&mhi_cntrl->state_event);
411                 }
412                 goto exit_intvec;
413         }
414
415         if (pm_state == MHI_PM_SYS_ERR_DETECT) {
416                 wake_up_all(&mhi_cntrl->state_event);
417
418                 /* For fatal errors, we let controller decide next step */
419                 if (MHI_IN_PBL(ee))
420                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
421                 else
422                         mhi_pm_sys_err_handler(mhi_cntrl);
423         }
424
425 exit_intvec:
426
427         return IRQ_HANDLED;
428 }
429
430 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
431 {
432         struct mhi_controller *mhi_cntrl = dev;
433
434         /* Wake up events waiting for state change */
435         wake_up_all(&mhi_cntrl->state_event);
436
437         return IRQ_WAKE_THREAD;
438 }
439
440 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
441                                         struct mhi_ring *ring)
442 {
443         dma_addr_t ctxt_wp;
444
445         /* Update the WP */
446         ring->wp += ring->el_size;
447         ctxt_wp = *ring->ctxt_wp + ring->el_size;
448
449         if (ring->wp >= (ring->base + ring->len)) {
450                 ring->wp = ring->base;
451                 ctxt_wp = ring->iommu_base;
452         }
453
454         *ring->ctxt_wp = ctxt_wp;
455
456         /* Update the RP */
457         ring->rp += ring->el_size;
458         if (ring->rp >= (ring->base + ring->len))
459                 ring->rp = ring->base;
460
461         /* Update to all cores */
462         smp_wmb();
463 }
464
465 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
466                             struct mhi_tre *event,
467                             struct mhi_chan *mhi_chan)
468 {
469         struct mhi_ring *buf_ring, *tre_ring;
470         struct device *dev = &mhi_cntrl->mhi_dev->dev;
471         struct mhi_result result;
472         unsigned long flags = 0;
473         u32 ev_code;
474
475         ev_code = MHI_TRE_GET_EV_CODE(event);
476         buf_ring = &mhi_chan->buf_ring;
477         tre_ring = &mhi_chan->tre_ring;
478
479         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
480                 -EOVERFLOW : 0;
481
482         /*
483          * If it's a DB Event then we need to grab the lock
484          * with preemption disabled and as a write because we
485          * have to update db register and there are chances that
486          * another thread could be doing the same.
487          */
488         if (ev_code >= MHI_EV_CC_OOB)
489                 write_lock_irqsave(&mhi_chan->lock, flags);
490         else
491                 read_lock_bh(&mhi_chan->lock);
492
493         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
494                 goto end_process_tx_event;
495
496         switch (ev_code) {
497         case MHI_EV_CC_OVERFLOW:
498         case MHI_EV_CC_EOB:
499         case MHI_EV_CC_EOT:
500         {
501                 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
502                 struct mhi_tre *local_rp, *ev_tre;
503                 void *dev_rp;
504                 struct mhi_buf_info *buf_info;
505                 u16 xfer_len;
506
507                 /* Get the TRB this event points to */
508                 ev_tre = mhi_to_virtual(tre_ring, ptr);
509
510                 dev_rp = ev_tre + 1;
511                 if (dev_rp >= (tre_ring->base + tre_ring->len))
512                         dev_rp = tre_ring->base;
513
514                 result.dir = mhi_chan->dir;
515
516                 local_rp = tre_ring->rp;
517                 while (local_rp != dev_rp) {
518                         buf_info = buf_ring->rp;
519                         /* If it's the last TRE, get length from the event */
520                         if (local_rp == ev_tre)
521                                 xfer_len = MHI_TRE_GET_EV_LEN(event);
522                         else
523                                 xfer_len = buf_info->len;
524
525                         /* Unmap if it's not pre-mapped by client */
526                         if (likely(!buf_info->pre_mapped))
527                                 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
528
529                         result.buf_addr = buf_info->cb_buf;
530
531                         /* truncate to buf len if xfer_len is larger */
532                         result.bytes_xferd =
533                                 min_t(u16, xfer_len, buf_info->len);
534                         mhi_del_ring_element(mhi_cntrl, buf_ring);
535                         mhi_del_ring_element(mhi_cntrl, tre_ring);
536                         local_rp = tre_ring->rp;
537
538                         /* notify client */
539                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
540
541                         if (mhi_chan->dir == DMA_TO_DEVICE)
542                                 atomic_dec(&mhi_cntrl->pending_pkts);
543
544                         /*
545                          * Recycle the buffer if buffer is pre-allocated,
546                          * if there is an error, not much we can do apart
547                          * from dropping the packet
548                          */
549                         if (mhi_chan->pre_alloc) {
550                                 if (mhi_queue_buf(mhi_chan->mhi_dev,
551                                                   mhi_chan->dir,
552                                                   buf_info->cb_buf,
553                                                   buf_info->len, MHI_EOT)) {
554                                         dev_err(dev,
555                                                 "Error recycling buffer for chan:%d\n",
556                                                 mhi_chan->chan);
557                                         kfree(buf_info->cb_buf);
558                                 }
559                         }
560                 }
561                 break;
562         } /* CC_EOT */
563         case MHI_EV_CC_OOB:
564         case MHI_EV_CC_DB_MODE:
565         {
566                 unsigned long flags;
567
568                 mhi_chan->db_cfg.db_mode = 1;
569                 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
570                 if (tre_ring->wp != tre_ring->rp &&
571                     MHI_DB_ACCESS_VALID(mhi_cntrl)) {
572                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
573                 }
574                 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
575                 break;
576         }
577         case MHI_EV_CC_BAD_TRE:
578         default:
579                 dev_err(dev, "Unknown event 0x%x\n", ev_code);
580                 break;
581         } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
582
583 end_process_tx_event:
584         if (ev_code >= MHI_EV_CC_OOB)
585                 write_unlock_irqrestore(&mhi_chan->lock, flags);
586         else
587                 read_unlock_bh(&mhi_chan->lock);
588
589         return 0;
590 }
591
592 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
593                            struct mhi_tre *event,
594                            struct mhi_chan *mhi_chan)
595 {
596         struct mhi_ring *buf_ring, *tre_ring;
597         struct mhi_buf_info *buf_info;
598         struct mhi_result result;
599         int ev_code;
600         u32 cookie; /* offset to local descriptor */
601         u16 xfer_len;
602
603         buf_ring = &mhi_chan->buf_ring;
604         tre_ring = &mhi_chan->tre_ring;
605
606         ev_code = MHI_TRE_GET_EV_CODE(event);
607         cookie = MHI_TRE_GET_EV_COOKIE(event);
608         xfer_len = MHI_TRE_GET_EV_LEN(event);
609
610         /* Received out of bound cookie */
611         WARN_ON(cookie >= buf_ring->len);
612
613         buf_info = buf_ring->base + cookie;
614
615         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
616                 -EOVERFLOW : 0;
617
618         /* truncate to buf len if xfer_len is larger */
619         result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
620         result.buf_addr = buf_info->cb_buf;
621         result.dir = mhi_chan->dir;
622
623         read_lock_bh(&mhi_chan->lock);
624
625         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
626                 goto end_process_rsc_event;
627
628         WARN_ON(!buf_info->used);
629
630         /* notify the client */
631         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
632
633         /*
634          * Note: We're arbitrarily incrementing RP even though, completion
635          * packet we processed might not be the same one, reason we can do this
636          * is because device guaranteed to cache descriptors in order it
637          * receive, so even though completion event is different we can re-use
638          * all descriptors in between.
639          * Example:
640          * Transfer Ring has descriptors: A, B, C, D
641          * Last descriptor host queue is D (WP) and first descriptor
642          * host queue is A (RP).
643          * The completion event we just serviced is descriptor C.
644          * Then we can safely queue descriptors to replace A, B, and C
645          * even though host did not receive any completions.
646          */
647         mhi_del_ring_element(mhi_cntrl, tre_ring);
648         buf_info->used = false;
649
650 end_process_rsc_event:
651         read_unlock_bh(&mhi_chan->lock);
652
653         return 0;
654 }
655
656 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
657                                        struct mhi_tre *tre)
658 {
659         dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
660         struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
661         struct mhi_ring *mhi_ring = &cmd_ring->ring;
662         struct mhi_tre *cmd_pkt;
663         struct mhi_chan *mhi_chan;
664         u32 chan;
665
666         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
667
668         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
669         mhi_chan = &mhi_cntrl->mhi_chan[chan];
670         write_lock_bh(&mhi_chan->lock);
671         mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
672         complete(&mhi_chan->completion);
673         write_unlock_bh(&mhi_chan->lock);
674
675         mhi_del_ring_element(mhi_cntrl, mhi_ring);
676 }
677
678 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
679                              struct mhi_event *mhi_event,
680                              u32 event_quota)
681 {
682         struct mhi_tre *dev_rp, *local_rp;
683         struct mhi_ring *ev_ring = &mhi_event->ring;
684         struct mhi_event_ctxt *er_ctxt =
685                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
686         struct mhi_chan *mhi_chan;
687         struct device *dev = &mhi_cntrl->mhi_dev->dev;
688         u32 chan;
689         int count = 0;
690
691         /*
692          * This is a quick check to avoid unnecessary event processing
693          * in case MHI is already in error state, but it's still possible
694          * to transition to error state while processing events
695          */
696         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
697                 return -EIO;
698
699         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
700         local_rp = ev_ring->rp;
701
702         while (dev_rp != local_rp) {
703                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
704
705                 switch (type) {
706                 case MHI_PKT_TYPE_BW_REQ_EVENT:
707                 {
708                         struct mhi_link_info *link_info;
709
710                         link_info = &mhi_cntrl->mhi_link_info;
711                         write_lock_irq(&mhi_cntrl->pm_lock);
712                         link_info->target_link_speed =
713                                 MHI_TRE_GET_EV_LINKSPEED(local_rp);
714                         link_info->target_link_width =
715                                 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
716                         write_unlock_irq(&mhi_cntrl->pm_lock);
717                         dev_dbg(dev, "Received BW_REQ event\n");
718                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
719                         break;
720                 }
721                 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
722                 {
723                         enum mhi_state new_state;
724
725                         new_state = MHI_TRE_GET_EV_STATE(local_rp);
726
727                         dev_dbg(dev, "State change event to state: %s\n",
728                                 TO_MHI_STATE_STR(new_state));
729
730                         switch (new_state) {
731                         case MHI_STATE_M0:
732                                 mhi_pm_m0_transition(mhi_cntrl);
733                                 break;
734                         case MHI_STATE_M1:
735                                 mhi_pm_m1_transition(mhi_cntrl);
736                                 break;
737                         case MHI_STATE_M3:
738                                 mhi_pm_m3_transition(mhi_cntrl);
739                                 break;
740                         case MHI_STATE_SYS_ERR:
741                         {
742                                 enum mhi_pm_state new_state;
743
744                                 dev_dbg(dev, "System error detected\n");
745                                 write_lock_irq(&mhi_cntrl->pm_lock);
746                                 new_state = mhi_tryset_pm_state(mhi_cntrl,
747                                                         MHI_PM_SYS_ERR_DETECT);
748                                 write_unlock_irq(&mhi_cntrl->pm_lock);
749                                 if (new_state == MHI_PM_SYS_ERR_DETECT)
750                                         mhi_pm_sys_err_handler(mhi_cntrl);
751                                 break;
752                         }
753                         default:
754                                 dev_err(dev, "Invalid state: %s\n",
755                                         TO_MHI_STATE_STR(new_state));
756                         }
757
758                         break;
759                 }
760                 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
761                         mhi_process_cmd_completion(mhi_cntrl, local_rp);
762                         break;
763                 case MHI_PKT_TYPE_EE_EVENT:
764                 {
765                         enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
766                         enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
767
768                         dev_dbg(dev, "Received EE event: %s\n",
769                                 TO_MHI_EXEC_STR(event));
770                         switch (event) {
771                         case MHI_EE_SBL:
772                                 st = DEV_ST_TRANSITION_SBL;
773                                 break;
774                         case MHI_EE_WFW:
775                         case MHI_EE_AMSS:
776                                 st = DEV_ST_TRANSITION_MISSION_MODE;
777                                 break;
778                         case MHI_EE_RDDM:
779                                 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
780                                 write_lock_irq(&mhi_cntrl->pm_lock);
781                                 mhi_cntrl->ee = event;
782                                 write_unlock_irq(&mhi_cntrl->pm_lock);
783                                 wake_up_all(&mhi_cntrl->state_event);
784                                 break;
785                         default:
786                                 dev_err(dev,
787                                         "Unhandled EE event: 0x%x\n", type);
788                         }
789                         if (st != DEV_ST_TRANSITION_MAX)
790                                 mhi_queue_state_transition(mhi_cntrl, st);
791
792                         break;
793                 }
794                 case MHI_PKT_TYPE_TX_EVENT:
795                         chan = MHI_TRE_GET_EV_CHID(local_rp);
796
797                         WARN_ON(chan >= mhi_cntrl->max_chan);
798
799                         /*
800                          * Only process the event ring elements whose channel
801                          * ID is within the maximum supported range.
802                          */
803                         if (chan < mhi_cntrl->max_chan) {
804                                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
805                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
806                                 event_quota--;
807                         }
808                         break;
809                 default:
810                         dev_err(dev, "Unhandled event type: %d\n", type);
811                         break;
812                 }
813
814                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
815                 local_rp = ev_ring->rp;
816                 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
817                 count++;
818         }
819
820         read_lock_bh(&mhi_cntrl->pm_lock);
821         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
822                 mhi_ring_er_db(mhi_event);
823         read_unlock_bh(&mhi_cntrl->pm_lock);
824
825         return count;
826 }
827
828 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
829                                 struct mhi_event *mhi_event,
830                                 u32 event_quota)
831 {
832         struct mhi_tre *dev_rp, *local_rp;
833         struct mhi_ring *ev_ring = &mhi_event->ring;
834         struct mhi_event_ctxt *er_ctxt =
835                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
836         int count = 0;
837         u32 chan;
838         struct mhi_chan *mhi_chan;
839
840         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
841                 return -EIO;
842
843         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
844         local_rp = ev_ring->rp;
845
846         while (dev_rp != local_rp && event_quota > 0) {
847                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
848
849                 chan = MHI_TRE_GET_EV_CHID(local_rp);
850
851                 WARN_ON(chan >= mhi_cntrl->max_chan);
852
853                 /*
854                  * Only process the event ring elements whose channel
855                  * ID is within the maximum supported range.
856                  */
857                 if (chan < mhi_cntrl->max_chan) {
858                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
859
860                         if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
861                                 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
862                                 event_quota--;
863                         } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
864                                 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
865                                 event_quota--;
866                         }
867                 }
868
869                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
870                 local_rp = ev_ring->rp;
871                 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
872                 count++;
873         }
874         read_lock_bh(&mhi_cntrl->pm_lock);
875         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
876                 mhi_ring_er_db(mhi_event);
877         read_unlock_bh(&mhi_cntrl->pm_lock);
878
879         return count;
880 }
881
882 void mhi_ev_task(unsigned long data)
883 {
884         struct mhi_event *mhi_event = (struct mhi_event *)data;
885         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
886
887         /* process all pending events */
888         spin_lock_bh(&mhi_event->lock);
889         mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
890         spin_unlock_bh(&mhi_event->lock);
891 }
892
893 void mhi_ctrl_ev_task(unsigned long data)
894 {
895         struct mhi_event *mhi_event = (struct mhi_event *)data;
896         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
897         struct device *dev = &mhi_cntrl->mhi_dev->dev;
898         enum mhi_state state;
899         enum mhi_pm_state pm_state = 0;
900         int ret;
901
902         /*
903          * We can check PM state w/o a lock here because there is no way
904          * PM state can change from reg access valid to no access while this
905          * thread being executed.
906          */
907         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
908                 /*
909                  * We may have a pending event but not allowed to
910                  * process it since we are probably in a suspended state,
911                  * so trigger a resume.
912                  */
913                 mhi_trigger_resume(mhi_cntrl);
914
915                 return;
916         }
917
918         /* Process ctrl events events */
919         ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
920
921         /*
922          * We received an IRQ but no events to process, maybe device went to
923          * SYS_ERR state? Check the state to confirm.
924          */
925         if (!ret) {
926                 write_lock_irq(&mhi_cntrl->pm_lock);
927                 state = mhi_get_mhi_state(mhi_cntrl);
928                 if (state == MHI_STATE_SYS_ERR) {
929                         dev_dbg(dev, "System error detected\n");
930                         pm_state = mhi_tryset_pm_state(mhi_cntrl,
931                                                        MHI_PM_SYS_ERR_DETECT);
932                 }
933                 write_unlock_irq(&mhi_cntrl->pm_lock);
934                 if (pm_state == MHI_PM_SYS_ERR_DETECT)
935                         mhi_pm_sys_err_handler(mhi_cntrl);
936         }
937 }
938
939 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
940                              struct mhi_ring *ring)
941 {
942         void *tmp = ring->wp + ring->el_size;
943
944         if (tmp >= (ring->base + ring->len))
945                 tmp = ring->base;
946
947         return (tmp == ring->rp);
948 }
949
950 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
951                   struct sk_buff *skb, size_t len, enum mhi_flags mflags)
952 {
953         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
954         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
955                                                              mhi_dev->dl_chan;
956         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
957         struct mhi_buf_info buf_info = { };
958         int ret;
959
960         /* If MHI host pre-allocates buffers then client drivers cannot queue */
961         if (mhi_chan->pre_alloc)
962                 return -EINVAL;
963
964         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
965                 return -ENOMEM;
966
967         read_lock_bh(&mhi_cntrl->pm_lock);
968         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
969                 read_unlock_bh(&mhi_cntrl->pm_lock);
970                 return -EIO;
971         }
972
973         /* we're in M3 or transitioning to M3 */
974         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
975                 mhi_trigger_resume(mhi_cntrl);
976
977         /* Toggle wake to exit out of M2 */
978         mhi_cntrl->wake_toggle(mhi_cntrl);
979
980         buf_info.v_addr = skb->data;
981         buf_info.cb_buf = skb;
982         buf_info.len = len;
983
984         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
985         if (unlikely(ret)) {
986                 read_unlock_bh(&mhi_cntrl->pm_lock);
987                 return ret;
988         }
989
990         if (mhi_chan->dir == DMA_TO_DEVICE)
991                 atomic_inc(&mhi_cntrl->pending_pkts);
992
993         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
994                 read_lock_bh(&mhi_chan->lock);
995                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
996                 read_unlock_bh(&mhi_chan->lock);
997         }
998
999         read_unlock_bh(&mhi_cntrl->pm_lock);
1000
1001         return 0;
1002 }
1003 EXPORT_SYMBOL_GPL(mhi_queue_skb);
1004
1005 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1006                   struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
1007 {
1008         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1009         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1010                                                              mhi_dev->dl_chan;
1011         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1012         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1013         struct mhi_buf_info buf_info = { };
1014         int ret;
1015
1016         /* If MHI host pre-allocates buffers then client drivers cannot queue */
1017         if (mhi_chan->pre_alloc)
1018                 return -EINVAL;
1019
1020         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1021                 return -ENOMEM;
1022
1023         read_lock_bh(&mhi_cntrl->pm_lock);
1024         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1025                 dev_err(dev, "MHI is not in activate state, PM state: %s\n",
1026                         to_mhi_pm_state_str(mhi_cntrl->pm_state));
1027                 read_unlock_bh(&mhi_cntrl->pm_lock);
1028
1029                 return -EIO;
1030         }
1031
1032         /* we're in M3 or transitioning to M3 */
1033         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1034                 mhi_trigger_resume(mhi_cntrl);
1035
1036         /* Toggle wake to exit out of M2 */
1037         mhi_cntrl->wake_toggle(mhi_cntrl);
1038
1039         buf_info.p_addr = mhi_buf->dma_addr;
1040         buf_info.cb_buf = mhi_buf;
1041         buf_info.pre_mapped = true;
1042         buf_info.len = len;
1043
1044         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1045         if (unlikely(ret)) {
1046                 read_unlock_bh(&mhi_cntrl->pm_lock);
1047                 return ret;
1048         }
1049
1050         if (mhi_chan->dir == DMA_TO_DEVICE)
1051                 atomic_inc(&mhi_cntrl->pending_pkts);
1052
1053         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1054                 read_lock_bh(&mhi_chan->lock);
1055                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1056                 read_unlock_bh(&mhi_chan->lock);
1057         }
1058
1059         read_unlock_bh(&mhi_cntrl->pm_lock);
1060
1061         return 0;
1062 }
1063 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1064
1065 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1066                         struct mhi_buf_info *info, enum mhi_flags flags)
1067 {
1068         struct mhi_ring *buf_ring, *tre_ring;
1069         struct mhi_tre *mhi_tre;
1070         struct mhi_buf_info *buf_info;
1071         int eot, eob, chain, bei;
1072         int ret;
1073
1074         buf_ring = &mhi_chan->buf_ring;
1075         tre_ring = &mhi_chan->tre_ring;
1076
1077         buf_info = buf_ring->wp;
1078         WARN_ON(buf_info->used);
1079         buf_info->pre_mapped = info->pre_mapped;
1080         if (info->pre_mapped)
1081                 buf_info->p_addr = info->p_addr;
1082         else
1083                 buf_info->v_addr = info->v_addr;
1084         buf_info->cb_buf = info->cb_buf;
1085         buf_info->wp = tre_ring->wp;
1086         buf_info->dir = mhi_chan->dir;
1087         buf_info->len = info->len;
1088
1089         if (!info->pre_mapped) {
1090                 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1091                 if (ret)
1092                         return ret;
1093         }
1094
1095         eob = !!(flags & MHI_EOB);
1096         eot = !!(flags & MHI_EOT);
1097         chain = !!(flags & MHI_CHAIN);
1098         bei = !!(mhi_chan->intmod);
1099
1100         mhi_tre = tre_ring->wp;
1101         mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1102         mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
1103         mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1104
1105         /* increment WP */
1106         mhi_add_ring_element(mhi_cntrl, tre_ring);
1107         mhi_add_ring_element(mhi_cntrl, buf_ring);
1108
1109         return 0;
1110 }
1111
1112 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1113                   void *buf, size_t len, enum mhi_flags mflags)
1114 {
1115         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1116         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1117                                                              mhi_dev->dl_chan;
1118         struct mhi_ring *tre_ring;
1119         struct mhi_buf_info buf_info = { };
1120         unsigned long flags;
1121         int ret;
1122
1123         /*
1124          * this check here only as a guard, it's always
1125          * possible mhi can enter error while executing rest of function,
1126          * which is not fatal so we do not need to hold pm_lock
1127          */
1128         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1129                 return -EIO;
1130
1131         tre_ring = &mhi_chan->tre_ring;
1132         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1133                 return -ENOMEM;
1134
1135         buf_info.v_addr = buf;
1136         buf_info.cb_buf = buf;
1137         buf_info.len = len;
1138
1139         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
1140         if (unlikely(ret))
1141                 return ret;
1142
1143         read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1144
1145         /* we're in M3 or transitioning to M3 */
1146         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
1147                 mhi_trigger_resume(mhi_cntrl);
1148
1149         /* Toggle wake to exit out of M2 */
1150         mhi_cntrl->wake_toggle(mhi_cntrl);
1151
1152         if (mhi_chan->dir == DMA_TO_DEVICE)
1153                 atomic_inc(&mhi_cntrl->pending_pkts);
1154
1155         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1156                 unsigned long flags;
1157
1158                 read_lock_irqsave(&mhi_chan->lock, flags);
1159                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1160                 read_unlock_irqrestore(&mhi_chan->lock, flags);
1161         }
1162
1163         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1164
1165         return 0;
1166 }
1167 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1168
1169 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir)
1170 {
1171         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1172         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
1173                                         mhi_dev->ul_chan : mhi_dev->dl_chan;
1174         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1175
1176         return mhi_is_ring_full(mhi_cntrl, tre_ring);
1177 }
1178 EXPORT_SYMBOL_GPL(mhi_queue_is_full);
1179
1180 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1181                  struct mhi_chan *mhi_chan,
1182                  enum mhi_cmd_type cmd)
1183 {
1184         struct mhi_tre *cmd_tre = NULL;
1185         struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1186         struct mhi_ring *ring = &mhi_cmd->ring;
1187         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1188         int chan = 0;
1189
1190         if (mhi_chan)
1191                 chan = mhi_chan->chan;
1192
1193         spin_lock_bh(&mhi_cmd->lock);
1194         if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1195                 spin_unlock_bh(&mhi_cmd->lock);
1196                 return -ENOMEM;
1197         }
1198
1199         /* prepare the cmd tre */
1200         cmd_tre = ring->wp;
1201         switch (cmd) {
1202         case MHI_CMD_RESET_CHAN:
1203                 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1204                 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1205                 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1206                 break;
1207         case MHI_CMD_START_CHAN:
1208                 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1209                 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1210                 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1211                 break;
1212         default:
1213                 dev_err(dev, "Command not supported\n");
1214                 break;
1215         }
1216
1217         /* queue to hardware */
1218         mhi_add_ring_element(mhi_cntrl, ring);
1219         read_lock_bh(&mhi_cntrl->pm_lock);
1220         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1221                 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1222         read_unlock_bh(&mhi_cntrl->pm_lock);
1223         spin_unlock_bh(&mhi_cmd->lock);
1224
1225         return 0;
1226 }
1227
1228 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1229                                     struct mhi_chan *mhi_chan)
1230 {
1231         int ret;
1232         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1233
1234         dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1235
1236         /* no more processing events for this channel */
1237         mutex_lock(&mhi_chan->mutex);
1238         write_lock_irq(&mhi_chan->lock);
1239         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED &&
1240             mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) {
1241                 write_unlock_irq(&mhi_chan->lock);
1242                 mutex_unlock(&mhi_chan->mutex);
1243                 return;
1244         }
1245
1246         mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1247         write_unlock_irq(&mhi_chan->lock);
1248
1249         reinit_completion(&mhi_chan->completion);
1250         read_lock_bh(&mhi_cntrl->pm_lock);
1251         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1252                 read_unlock_bh(&mhi_cntrl->pm_lock);
1253                 goto error_invalid_state;
1254         }
1255
1256         mhi_cntrl->wake_toggle(mhi_cntrl);
1257         read_unlock_bh(&mhi_cntrl->pm_lock);
1258
1259         mhi_cntrl->runtime_get(mhi_cntrl);
1260         mhi_cntrl->runtime_put(mhi_cntrl);
1261         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1262         if (ret)
1263                 goto error_invalid_state;
1264
1265         /* even if it fails we will still reset */
1266         ret = wait_for_completion_timeout(&mhi_chan->completion,
1267                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1268         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1269                 dev_err(dev,
1270                         "Failed to receive cmd completion, still resetting\n");
1271
1272 error_invalid_state:
1273         if (!mhi_chan->offload_ch) {
1274                 mhi_reset_chan(mhi_cntrl, mhi_chan);
1275                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1276         }
1277         dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1278         mutex_unlock(&mhi_chan->mutex);
1279 }
1280
1281 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1282                         struct mhi_chan *mhi_chan)
1283 {
1284         int ret = 0;
1285         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1286
1287         dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1288
1289         if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1290                 dev_err(dev,
1291                         "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1292                         TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1293                         mhi_chan->name);
1294                 return -ENOTCONN;
1295         }
1296
1297         mutex_lock(&mhi_chan->mutex);
1298
1299         /* If channel is not in disable state, do not allow it to start */
1300         if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1301                 ret = -EIO;
1302                 dev_dbg(dev, "channel: %d is not in disabled state\n",
1303                         mhi_chan->chan);
1304                 goto error_init_chan;
1305         }
1306
1307         /* Check of client manages channel context for offload channels */
1308         if (!mhi_chan->offload_ch) {
1309                 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1310                 if (ret)
1311                         goto error_init_chan;
1312         }
1313
1314         reinit_completion(&mhi_chan->completion);
1315         read_lock_bh(&mhi_cntrl->pm_lock);
1316         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1317                 read_unlock_bh(&mhi_cntrl->pm_lock);
1318                 ret = -EIO;
1319                 goto error_pm_state;
1320         }
1321
1322         mhi_cntrl->wake_toggle(mhi_cntrl);
1323         read_unlock_bh(&mhi_cntrl->pm_lock);
1324         mhi_cntrl->runtime_get(mhi_cntrl);
1325         mhi_cntrl->runtime_put(mhi_cntrl);
1326
1327         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1328         if (ret)
1329                 goto error_pm_state;
1330
1331         ret = wait_for_completion_timeout(&mhi_chan->completion,
1332                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1333         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1334                 ret = -EIO;
1335                 goto error_pm_state;
1336         }
1337
1338         write_lock_irq(&mhi_chan->lock);
1339         mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1340         write_unlock_irq(&mhi_chan->lock);
1341
1342         /* Pre-allocate buffer for xfer ring */
1343         if (mhi_chan->pre_alloc) {
1344                 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1345                                                        &mhi_chan->tre_ring);
1346                 size_t len = mhi_cntrl->buffer_len;
1347
1348                 while (nr_el--) {
1349                         void *buf;
1350                         struct mhi_buf_info info = { };
1351                         buf = kmalloc(len, GFP_KERNEL);
1352                         if (!buf) {
1353                                 ret = -ENOMEM;
1354                                 goto error_pre_alloc;
1355                         }
1356
1357                         /* Prepare transfer descriptors */
1358                         info.v_addr = buf;
1359                         info.cb_buf = buf;
1360                         info.len = len;
1361                         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT);
1362                         if (ret) {
1363                                 kfree(buf);
1364                                 goto error_pre_alloc;
1365                         }
1366                 }
1367
1368                 read_lock_bh(&mhi_cntrl->pm_lock);
1369                 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1370                         read_lock_irq(&mhi_chan->lock);
1371                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1372                         read_unlock_irq(&mhi_chan->lock);
1373                 }
1374                 read_unlock_bh(&mhi_cntrl->pm_lock);
1375         }
1376
1377         mutex_unlock(&mhi_chan->mutex);
1378
1379         dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1380                 mhi_chan->chan);
1381
1382         return 0;
1383
1384 error_pm_state:
1385         if (!mhi_chan->offload_ch)
1386                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1387
1388 error_init_chan:
1389         mutex_unlock(&mhi_chan->mutex);
1390
1391         return ret;
1392
1393 error_pre_alloc:
1394         mutex_unlock(&mhi_chan->mutex);
1395         __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1396
1397         return ret;
1398 }
1399
1400 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1401                                   struct mhi_event *mhi_event,
1402                                   struct mhi_event_ctxt *er_ctxt,
1403                                   int chan)
1404
1405 {
1406         struct mhi_tre *dev_rp, *local_rp;
1407         struct mhi_ring *ev_ring;
1408         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1409         unsigned long flags;
1410
1411         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1412
1413         ev_ring = &mhi_event->ring;
1414
1415         /* mark all stale events related to channel as STALE event */
1416         spin_lock_irqsave(&mhi_event->lock, flags);
1417         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
1418
1419         local_rp = ev_ring->rp;
1420         while (dev_rp != local_rp) {
1421                 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1422                     chan == MHI_TRE_GET_EV_CHID(local_rp))
1423                         local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1424                                         MHI_PKT_TYPE_STALE_EVENT);
1425                 local_rp++;
1426                 if (local_rp == (ev_ring->base + ev_ring->len))
1427                         local_rp = ev_ring->base;
1428         }
1429
1430         dev_dbg(dev, "Finished marking events as stale events\n");
1431         spin_unlock_irqrestore(&mhi_event->lock, flags);
1432 }
1433
1434 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1435                                 struct mhi_chan *mhi_chan)
1436 {
1437         struct mhi_ring *buf_ring, *tre_ring;
1438         struct mhi_result result;
1439
1440         /* Reset any pending buffers */
1441         buf_ring = &mhi_chan->buf_ring;
1442         tre_ring = &mhi_chan->tre_ring;
1443         result.transaction_status = -ENOTCONN;
1444         result.bytes_xferd = 0;
1445         while (tre_ring->rp != tre_ring->wp) {
1446                 struct mhi_buf_info *buf_info = buf_ring->rp;
1447
1448                 if (mhi_chan->dir == DMA_TO_DEVICE)
1449                         atomic_dec(&mhi_cntrl->pending_pkts);
1450
1451                 if (!buf_info->pre_mapped)
1452                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1453
1454                 mhi_del_ring_element(mhi_cntrl, buf_ring);
1455                 mhi_del_ring_element(mhi_cntrl, tre_ring);
1456
1457                 if (mhi_chan->pre_alloc) {
1458                         kfree(buf_info->cb_buf);
1459                 } else {
1460                         result.buf_addr = buf_info->cb_buf;
1461                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1462                 }
1463         }
1464 }
1465
1466 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1467 {
1468         struct mhi_event *mhi_event;
1469         struct mhi_event_ctxt *er_ctxt;
1470         int chan = mhi_chan->chan;
1471
1472         /* Nothing to reset, client doesn't queue buffers */
1473         if (mhi_chan->offload_ch)
1474                 return;
1475
1476         read_lock_bh(&mhi_cntrl->pm_lock);
1477         mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1478         er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1479
1480         mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1481
1482         mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1483
1484         read_unlock_bh(&mhi_cntrl->pm_lock);
1485 }
1486
1487 /* Move channel to start state */
1488 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1489 {
1490         int ret, dir;
1491         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1492         struct mhi_chan *mhi_chan;
1493
1494         for (dir = 0; dir < 2; dir++) {
1495                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1496                 if (!mhi_chan)
1497                         continue;
1498
1499                 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1500                 if (ret)
1501                         goto error_open_chan;
1502         }
1503
1504         return 0;
1505
1506 error_open_chan:
1507         for (--dir; dir >= 0; dir--) {
1508                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1509                 if (!mhi_chan)
1510                         continue;
1511
1512                 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1513         }
1514
1515         return ret;
1516 }
1517 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1518
1519 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1520 {
1521         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1522         struct mhi_chan *mhi_chan;
1523         int dir;
1524
1525         for (dir = 0; dir < 2; dir++) {
1526                 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1527                 if (!mhi_chan)
1528                         continue;
1529
1530                 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1531         }
1532 }
1533 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1534
1535 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1536 {
1537         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1538         struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1539         struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1540         int ret;
1541
1542         spin_lock_bh(&mhi_event->lock);
1543         ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1544         spin_unlock_bh(&mhi_event->lock);
1545
1546         return ret;
1547 }
1548 EXPORT_SYMBOL_GPL(mhi_poll);