f8401535e61a34871ef98ea9b9fed268dfc0166b
[linux-2.6-microblaze.git] / drivers / bus / mhi / core / main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
4  *
5  */
6
7 #include <linux/device.h>
8 #include <linux/dma-direction.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/list.h>
12 #include <linux/mhi.h>
13 #include <linux/module.h>
14 #include <linux/skbuff.h>
15 #include <linux/slab.h>
16 #include "internal.h"
17
18 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
19                               void __iomem *base, u32 offset, u32 *out)
20 {
21         u32 tmp = readl(base + offset);
22
23         /* If the value is invalid, the link is down */
24         if (PCI_INVALID_READ(tmp))
25                 return -EIO;
26
27         *out = tmp;
28
29         return 0;
30 }
31
32 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
33                                     void __iomem *base, u32 offset,
34                                     u32 mask, u32 shift, u32 *out)
35 {
36         u32 tmp;
37         int ret;
38
39         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
40         if (ret)
41                 return ret;
42
43         *out = (tmp & mask) >> shift;
44
45         return 0;
46 }
47
48 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
49                    u32 offset, u32 val)
50 {
51         writel(val, base + offset);
52 }
53
54 void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
55                          u32 offset, u32 mask, u32 shift, u32 val)
56 {
57         int ret;
58         u32 tmp;
59
60         ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
61         if (ret)
62                 return;
63
64         tmp &= ~mask;
65         tmp |= (val << shift);
66         mhi_write_reg(mhi_cntrl, base, offset, tmp);
67 }
68
69 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
70                   dma_addr_t db_val)
71 {
72         mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
73         mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
74 }
75
76 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
77                      struct db_cfg *db_cfg,
78                      void __iomem *db_addr,
79                      dma_addr_t db_val)
80 {
81         if (db_cfg->db_mode) {
82                 db_cfg->db_val = db_val;
83                 mhi_write_db(mhi_cntrl, db_addr, db_val);
84                 db_cfg->db_mode = 0;
85         }
86 }
87
88 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
89                              struct db_cfg *db_cfg,
90                              void __iomem *db_addr,
91                              dma_addr_t db_val)
92 {
93         db_cfg->db_val = db_val;
94         mhi_write_db(mhi_cntrl, db_addr, db_val);
95 }
96
97 void mhi_ring_er_db(struct mhi_event *mhi_event)
98 {
99         struct mhi_ring *ring = &mhi_event->ring;
100
101         mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
102                                      ring->db_addr, *ring->ctxt_wp);
103 }
104
105 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
106 {
107         dma_addr_t db;
108         struct mhi_ring *ring = &mhi_cmd->ring;
109
110         db = ring->iommu_base + (ring->wp - ring->base);
111         *ring->ctxt_wp = db;
112         mhi_write_db(mhi_cntrl, ring->db_addr, db);
113 }
114
115 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
116                       struct mhi_chan *mhi_chan)
117 {
118         struct mhi_ring *ring = &mhi_chan->tre_ring;
119         dma_addr_t db;
120
121         db = ring->iommu_base + (ring->wp - ring->base);
122         *ring->ctxt_wp = db;
123         mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
124                                     ring->db_addr, db);
125 }
126
127 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
128 {
129         u32 exec;
130         int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
131
132         return (ret) ? MHI_EE_MAX : exec;
133 }
134
135 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
136 {
137         u32 state;
138         int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
139                                      MHISTATUS_MHISTATE_MASK,
140                                      MHISTATUS_MHISTATE_SHIFT, &state);
141         return ret ? MHI_STATE_MAX : state;
142 }
143
144 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
145                          struct mhi_buf_info *buf_info)
146 {
147         buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev,
148                                           buf_info->v_addr, buf_info->len,
149                                           buf_info->dir);
150         if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr))
151                 return -ENOMEM;
152
153         return 0;
154 }
155
156 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl,
157                           struct mhi_buf_info *buf_info)
158 {
159         void *buf = mhi_alloc_coherent(mhi_cntrl, buf_info->len,
160                                        &buf_info->p_addr, GFP_ATOMIC);
161
162         if (!buf)
163                 return -ENOMEM;
164
165         if (buf_info->dir == DMA_TO_DEVICE)
166                 memcpy(buf, buf_info->v_addr, buf_info->len);
167
168         buf_info->bb_addr = buf;
169
170         return 0;
171 }
172
173 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl,
174                             struct mhi_buf_info *buf_info)
175 {
176         dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len,
177                          buf_info->dir);
178 }
179
180 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
181                              struct mhi_buf_info *buf_info)
182 {
183         if (buf_info->dir == DMA_FROM_DEVICE)
184                 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len);
185
186         mhi_free_coherent(mhi_cntrl, buf_info->len, buf_info->bb_addr,
187                           buf_info->p_addr);
188 }
189
190 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
191                                       struct mhi_ring *ring)
192 {
193         int nr_el;
194
195         if (ring->wp < ring->rp) {
196                 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1;
197         } else {
198                 nr_el = (ring->rp - ring->base) / ring->el_size;
199                 nr_el += ((ring->base + ring->len - ring->wp) /
200                           ring->el_size) - 1;
201         }
202
203         return nr_el;
204 }
205
206 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
207 {
208         return (addr - ring->iommu_base) + ring->base;
209 }
210
211 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl,
212                                  struct mhi_ring *ring)
213 {
214         ring->wp += ring->el_size;
215         if (ring->wp >= (ring->base + ring->len))
216                 ring->wp = ring->base;
217         /* smp update */
218         smp_wmb();
219 }
220
221 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl,
222                                  struct mhi_ring *ring)
223 {
224         ring->rp += ring->el_size;
225         if (ring->rp >= (ring->base + ring->len))
226                 ring->rp = ring->base;
227         /* smp update */
228         smp_wmb();
229 }
230
231 int mhi_destroy_device(struct device *dev, void *data)
232 {
233         struct mhi_device *mhi_dev;
234         struct mhi_controller *mhi_cntrl;
235
236         if (dev->bus != &mhi_bus_type)
237                 return 0;
238
239         mhi_dev = to_mhi_device(dev);
240         mhi_cntrl = mhi_dev->mhi_cntrl;
241
242         /* Only destroy virtual devices thats attached to bus */
243         if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
244                 return 0;
245
246         /*
247          * For the suspend and resume case, this function will get called
248          * without mhi_unregister_controller(). Hence, we need to drop the
249          * references to mhi_dev created for ul and dl channels. We can
250          * be sure that there will be no instances of mhi_dev left after
251          * this.
252          */
253         if (mhi_dev->ul_chan)
254                 put_device(&mhi_dev->ul_chan->mhi_dev->dev);
255
256         if (mhi_dev->dl_chan)
257                 put_device(&mhi_dev->dl_chan->mhi_dev->dev);
258
259         dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
260                  mhi_dev->chan_name);
261
262         /* Notify the client and remove the device from MHI bus */
263         device_del(dev);
264         put_device(dev);
265
266         return 0;
267 }
268
269 static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
270 {
271         struct mhi_driver *mhi_drv;
272
273         if (!mhi_dev->dev.driver)
274                 return;
275
276         mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
277
278         if (mhi_drv->status_cb)
279                 mhi_drv->status_cb(mhi_dev, cb_reason);
280 }
281
282 /* Bind MHI channels to MHI devices */
283 void mhi_create_devices(struct mhi_controller *mhi_cntrl)
284 {
285         struct mhi_chan *mhi_chan;
286         struct mhi_device *mhi_dev;
287         struct device *dev = &mhi_cntrl->mhi_dev->dev;
288         int i, ret;
289
290         mhi_chan = mhi_cntrl->mhi_chan;
291         for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
292                 if (!mhi_chan->configured || mhi_chan->mhi_dev ||
293                     !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
294                         continue;
295                 mhi_dev = mhi_alloc_device(mhi_cntrl);
296                 if (IS_ERR(mhi_dev))
297                         return;
298
299                 mhi_dev->dev_type = MHI_DEVICE_XFER;
300                 switch (mhi_chan->dir) {
301                 case DMA_TO_DEVICE:
302                         mhi_dev->ul_chan = mhi_chan;
303                         mhi_dev->ul_chan_id = mhi_chan->chan;
304                         break;
305                 case DMA_FROM_DEVICE:
306                         /* We use dl_chan as offload channels */
307                         mhi_dev->dl_chan = mhi_chan;
308                         mhi_dev->dl_chan_id = mhi_chan->chan;
309                         break;
310                 default:
311                         dev_err(dev, "Direction not supported\n");
312                         put_device(&mhi_dev->dev);
313                         return;
314                 }
315
316                 get_device(&mhi_dev->dev);
317                 mhi_chan->mhi_dev = mhi_dev;
318
319                 /* Check next channel if it matches */
320                 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
321                         if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
322                                 i++;
323                                 mhi_chan++;
324                                 if (mhi_chan->dir == DMA_TO_DEVICE) {
325                                         mhi_dev->ul_chan = mhi_chan;
326                                         mhi_dev->ul_chan_id = mhi_chan->chan;
327                                 } else {
328                                         mhi_dev->dl_chan = mhi_chan;
329                                         mhi_dev->dl_chan_id = mhi_chan->chan;
330                                 }
331                                 get_device(&mhi_dev->dev);
332                                 mhi_chan->mhi_dev = mhi_dev;
333                         }
334                 }
335
336                 /* Channel name is same for both UL and DL */
337                 mhi_dev->chan_name = mhi_chan->name;
338                 dev_set_name(&mhi_dev->dev, "%04x_%s", mhi_chan->chan,
339                              mhi_dev->chan_name);
340
341                 /* Init wakeup source if available */
342                 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
343                         device_init_wakeup(&mhi_dev->dev, true);
344
345                 ret = device_add(&mhi_dev->dev);
346                 if (ret)
347                         put_device(&mhi_dev->dev);
348         }
349 }
350
351 irqreturn_t mhi_irq_handler(int irq_number, void *dev)
352 {
353         struct mhi_event *mhi_event = dev;
354         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
355         struct mhi_event_ctxt *er_ctxt =
356                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
357         struct mhi_ring *ev_ring = &mhi_event->ring;
358         void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
359
360         /* Only proceed if event ring has pending events */
361         if (ev_ring->rp == dev_rp)
362                 return IRQ_HANDLED;
363
364         /* For client managed event ring, notify pending data */
365         if (mhi_event->cl_manage) {
366                 struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
367                 struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
368
369                 if (mhi_dev)
370                         mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
371         } else {
372                 tasklet_schedule(&mhi_event->task);
373         }
374
375         return IRQ_HANDLED;
376 }
377
378 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
379 {
380         struct mhi_controller *mhi_cntrl = dev;
381         enum mhi_state state = MHI_STATE_MAX;
382         enum mhi_pm_state pm_state = 0;
383         enum mhi_ee_type ee = 0;
384
385         write_lock_irq(&mhi_cntrl->pm_lock);
386         if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
387                 state = mhi_get_mhi_state(mhi_cntrl);
388                 ee = mhi_cntrl->ee;
389                 mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
390         }
391
392         if (state == MHI_STATE_SYS_ERR) {
393                 dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
394                 pm_state = mhi_tryset_pm_state(mhi_cntrl,
395                                                MHI_PM_SYS_ERR_DETECT);
396         }
397         write_unlock_irq(&mhi_cntrl->pm_lock);
398
399         /* If device in RDDM don't bother processing SYS error */
400         if (mhi_cntrl->ee == MHI_EE_RDDM) {
401                 if (mhi_cntrl->ee != ee) {
402                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
403                         wake_up_all(&mhi_cntrl->state_event);
404                 }
405                 goto exit_intvec;
406         }
407
408         if (pm_state == MHI_PM_SYS_ERR_DETECT) {
409                 wake_up_all(&mhi_cntrl->state_event);
410
411                 /* For fatal errors, we let controller decide next step */
412                 if (MHI_IN_PBL(ee))
413                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
414                 else
415                         schedule_work(&mhi_cntrl->syserr_worker);
416         }
417
418 exit_intvec:
419
420         return IRQ_HANDLED;
421 }
422
423 irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
424 {
425         struct mhi_controller *mhi_cntrl = dev;
426
427         /* Wake up events waiting for state change */
428         wake_up_all(&mhi_cntrl->state_event);
429
430         return IRQ_WAKE_THREAD;
431 }
432
433 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl,
434                                         struct mhi_ring *ring)
435 {
436         dma_addr_t ctxt_wp;
437
438         /* Update the WP */
439         ring->wp += ring->el_size;
440         ctxt_wp = *ring->ctxt_wp + ring->el_size;
441
442         if (ring->wp >= (ring->base + ring->len)) {
443                 ring->wp = ring->base;
444                 ctxt_wp = ring->iommu_base;
445         }
446
447         *ring->ctxt_wp = ctxt_wp;
448
449         /* Update the RP */
450         ring->rp += ring->el_size;
451         if (ring->rp >= (ring->base + ring->len))
452                 ring->rp = ring->base;
453
454         /* Update to all cores */
455         smp_wmb();
456 }
457
458 static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
459                             struct mhi_tre *event,
460                             struct mhi_chan *mhi_chan)
461 {
462         struct mhi_ring *buf_ring, *tre_ring;
463         struct device *dev = &mhi_cntrl->mhi_dev->dev;
464         struct mhi_result result;
465         unsigned long flags = 0;
466         u32 ev_code;
467
468         ev_code = MHI_TRE_GET_EV_CODE(event);
469         buf_ring = &mhi_chan->buf_ring;
470         tre_ring = &mhi_chan->tre_ring;
471
472         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
473                 -EOVERFLOW : 0;
474
475         /*
476          * If it's a DB Event then we need to grab the lock
477          * with preemption disabled and as a write because we
478          * have to update db register and there are chances that
479          * another thread could be doing the same.
480          */
481         if (ev_code >= MHI_EV_CC_OOB)
482                 write_lock_irqsave(&mhi_chan->lock, flags);
483         else
484                 read_lock_bh(&mhi_chan->lock);
485
486         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
487                 goto end_process_tx_event;
488
489         switch (ev_code) {
490         case MHI_EV_CC_OVERFLOW:
491         case MHI_EV_CC_EOB:
492         case MHI_EV_CC_EOT:
493         {
494                 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event);
495                 struct mhi_tre *local_rp, *ev_tre;
496                 void *dev_rp;
497                 struct mhi_buf_info *buf_info;
498                 u16 xfer_len;
499
500                 /* Get the TRB this event points to */
501                 ev_tre = mhi_to_virtual(tre_ring, ptr);
502
503                 dev_rp = ev_tre + 1;
504                 if (dev_rp >= (tre_ring->base + tre_ring->len))
505                         dev_rp = tre_ring->base;
506
507                 result.dir = mhi_chan->dir;
508
509                 local_rp = tre_ring->rp;
510                 while (local_rp != dev_rp) {
511                         buf_info = buf_ring->rp;
512                         /* If it's the last TRE, get length from the event */
513                         if (local_rp == ev_tre)
514                                 xfer_len = MHI_TRE_GET_EV_LEN(event);
515                         else
516                                 xfer_len = buf_info->len;
517
518                         /* Unmap if it's not pre-mapped by client */
519                         if (likely(!buf_info->pre_mapped))
520                                 mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
521
522                         result.buf_addr = buf_info->cb_buf;
523                         result.bytes_xferd = xfer_len;
524                         mhi_del_ring_element(mhi_cntrl, buf_ring);
525                         mhi_del_ring_element(mhi_cntrl, tre_ring);
526                         local_rp = tre_ring->rp;
527
528                         /* notify client */
529                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
530
531                         if (mhi_chan->dir == DMA_TO_DEVICE)
532                                 atomic_dec(&mhi_cntrl->pending_pkts);
533
534                         /*
535                          * Recycle the buffer if buffer is pre-allocated,
536                          * if there is an error, not much we can do apart
537                          * from dropping the packet
538                          */
539                         if (mhi_chan->pre_alloc) {
540                                 if (mhi_queue_buf(mhi_chan->mhi_dev,
541                                                   mhi_chan->dir,
542                                                   buf_info->cb_buf,
543                                                   buf_info->len, MHI_EOT)) {
544                                         dev_err(dev,
545                                                 "Error recycling buffer for chan:%d\n",
546                                                 mhi_chan->chan);
547                                         kfree(buf_info->cb_buf);
548                                 }
549                         }
550                 }
551                 break;
552         } /* CC_EOT */
553         case MHI_EV_CC_OOB:
554         case MHI_EV_CC_DB_MODE:
555         {
556                 unsigned long flags;
557
558                 mhi_chan->db_cfg.db_mode = 1;
559                 read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
560                 if (tre_ring->wp != tre_ring->rp &&
561                     MHI_DB_ACCESS_VALID(mhi_cntrl)) {
562                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
563                 }
564                 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
565                 break;
566         }
567         case MHI_EV_CC_BAD_TRE:
568         default:
569                 dev_err(dev, "Unknown event 0x%x\n", ev_code);
570                 break;
571         } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */
572
573 end_process_tx_event:
574         if (ev_code >= MHI_EV_CC_OOB)
575                 write_unlock_irqrestore(&mhi_chan->lock, flags);
576         else
577                 read_unlock_bh(&mhi_chan->lock);
578
579         return 0;
580 }
581
582 static int parse_rsc_event(struct mhi_controller *mhi_cntrl,
583                            struct mhi_tre *event,
584                            struct mhi_chan *mhi_chan)
585 {
586         struct mhi_ring *buf_ring, *tre_ring;
587         struct mhi_buf_info *buf_info;
588         struct mhi_result result;
589         int ev_code;
590         u32 cookie; /* offset to local descriptor */
591         u16 xfer_len;
592
593         buf_ring = &mhi_chan->buf_ring;
594         tre_ring = &mhi_chan->tre_ring;
595
596         ev_code = MHI_TRE_GET_EV_CODE(event);
597         cookie = MHI_TRE_GET_EV_COOKIE(event);
598         xfer_len = MHI_TRE_GET_EV_LEN(event);
599
600         /* Received out of bound cookie */
601         WARN_ON(cookie >= buf_ring->len);
602
603         buf_info = buf_ring->base + cookie;
604
605         result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ?
606                 -EOVERFLOW : 0;
607         result.bytes_xferd = xfer_len;
608         result.buf_addr = buf_info->cb_buf;
609         result.dir = mhi_chan->dir;
610
611         read_lock_bh(&mhi_chan->lock);
612
613         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED)
614                 goto end_process_rsc_event;
615
616         WARN_ON(!buf_info->used);
617
618         /* notify the client */
619         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
620
621         /*
622          * Note: We're arbitrarily incrementing RP even though, completion
623          * packet we processed might not be the same one, reason we can do this
624          * is because device guaranteed to cache descriptors in order it
625          * receive, so even though completion event is different we can re-use
626          * all descriptors in between.
627          * Example:
628          * Transfer Ring has descriptors: A, B, C, D
629          * Last descriptor host queue is D (WP) and first descriptor
630          * host queue is A (RP).
631          * The completion event we just serviced is descriptor C.
632          * Then we can safely queue descriptors to replace A, B, and C
633          * even though host did not receive any completions.
634          */
635         mhi_del_ring_element(mhi_cntrl, tre_ring);
636         buf_info->used = false;
637
638 end_process_rsc_event:
639         read_unlock_bh(&mhi_chan->lock);
640
641         return 0;
642 }
643
644 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl,
645                                        struct mhi_tre *tre)
646 {
647         dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre);
648         struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
649         struct mhi_ring *mhi_ring = &cmd_ring->ring;
650         struct mhi_tre *cmd_pkt;
651         struct mhi_chan *mhi_chan;
652         u32 chan;
653
654         cmd_pkt = mhi_to_virtual(mhi_ring, ptr);
655
656         chan = MHI_TRE_GET_CMD_CHID(cmd_pkt);
657         mhi_chan = &mhi_cntrl->mhi_chan[chan];
658         write_lock_bh(&mhi_chan->lock);
659         mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre);
660         complete(&mhi_chan->completion);
661         write_unlock_bh(&mhi_chan->lock);
662
663         mhi_del_ring_element(mhi_cntrl, mhi_ring);
664 }
665
666 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
667                              struct mhi_event *mhi_event,
668                              u32 event_quota)
669 {
670         struct mhi_tre *dev_rp, *local_rp;
671         struct mhi_ring *ev_ring = &mhi_event->ring;
672         struct mhi_event_ctxt *er_ctxt =
673                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
674         struct mhi_chan *mhi_chan;
675         struct device *dev = &mhi_cntrl->mhi_dev->dev;
676         u32 chan;
677         int count = 0;
678
679         /*
680          * This is a quick check to avoid unnecessary event processing
681          * in case MHI is already in error state, but it's still possible
682          * to transition to error state while processing events
683          */
684         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
685                 return -EIO;
686
687         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
688         local_rp = ev_ring->rp;
689
690         while (dev_rp != local_rp) {
691                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
692
693                 switch (type) {
694                 case MHI_PKT_TYPE_BW_REQ_EVENT:
695                 {
696                         struct mhi_link_info *link_info;
697
698                         link_info = &mhi_cntrl->mhi_link_info;
699                         write_lock_irq(&mhi_cntrl->pm_lock);
700                         link_info->target_link_speed =
701                                 MHI_TRE_GET_EV_LINKSPEED(local_rp);
702                         link_info->target_link_width =
703                                 MHI_TRE_GET_EV_LINKWIDTH(local_rp);
704                         write_unlock_irq(&mhi_cntrl->pm_lock);
705                         dev_dbg(dev, "Received BW_REQ event\n");
706                         mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ);
707                         break;
708                 }
709                 case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
710                 {
711                         enum mhi_state new_state;
712
713                         new_state = MHI_TRE_GET_EV_STATE(local_rp);
714
715                         dev_dbg(dev, "State change event to state: %s\n",
716                                 TO_MHI_STATE_STR(new_state));
717
718                         switch (new_state) {
719                         case MHI_STATE_M0:
720                                 mhi_pm_m0_transition(mhi_cntrl);
721                                 break;
722                         case MHI_STATE_M1:
723                                 mhi_pm_m1_transition(mhi_cntrl);
724                                 break;
725                         case MHI_STATE_M3:
726                                 mhi_pm_m3_transition(mhi_cntrl);
727                                 break;
728                         case MHI_STATE_SYS_ERR:
729                         {
730                                 enum mhi_pm_state new_state;
731
732                                 dev_dbg(dev, "System error detected\n");
733                                 write_lock_irq(&mhi_cntrl->pm_lock);
734                                 new_state = mhi_tryset_pm_state(mhi_cntrl,
735                                                         MHI_PM_SYS_ERR_DETECT);
736                                 write_unlock_irq(&mhi_cntrl->pm_lock);
737                                 if (new_state == MHI_PM_SYS_ERR_DETECT)
738                                         schedule_work(&mhi_cntrl->syserr_worker);
739                                 break;
740                         }
741                         default:
742                                 dev_err(dev, "Invalid state: %s\n",
743                                         TO_MHI_STATE_STR(new_state));
744                         }
745
746                         break;
747                 }
748                 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
749                         mhi_process_cmd_completion(mhi_cntrl, local_rp);
750                         break;
751                 case MHI_PKT_TYPE_EE_EVENT:
752                 {
753                         enum dev_st_transition st = DEV_ST_TRANSITION_MAX;
754                         enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
755
756                         dev_dbg(dev, "Received EE event: %s\n",
757                                 TO_MHI_EXEC_STR(event));
758                         switch (event) {
759                         case MHI_EE_SBL:
760                                 st = DEV_ST_TRANSITION_SBL;
761                                 break;
762                         case MHI_EE_WFW:
763                         case MHI_EE_AMSS:
764                                 st = DEV_ST_TRANSITION_MISSION_MODE;
765                                 break;
766                         case MHI_EE_RDDM:
767                                 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
768                                 write_lock_irq(&mhi_cntrl->pm_lock);
769                                 mhi_cntrl->ee = event;
770                                 write_unlock_irq(&mhi_cntrl->pm_lock);
771                                 wake_up_all(&mhi_cntrl->state_event);
772                                 break;
773                         default:
774                                 dev_err(dev,
775                                         "Unhandled EE event: 0x%x\n", type);
776                         }
777                         if (st != DEV_ST_TRANSITION_MAX)
778                                 mhi_queue_state_transition(mhi_cntrl, st);
779
780                         break;
781                 }
782                 case MHI_PKT_TYPE_TX_EVENT:
783                         chan = MHI_TRE_GET_EV_CHID(local_rp);
784                         mhi_chan = &mhi_cntrl->mhi_chan[chan];
785                         parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
786                         event_quota--;
787                         break;
788                 default:
789                         dev_err(dev, "Unhandled event type: %d\n", type);
790                         break;
791                 }
792
793                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
794                 local_rp = ev_ring->rp;
795                 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
796                 count++;
797         }
798
799         read_lock_bh(&mhi_cntrl->pm_lock);
800         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
801                 mhi_ring_er_db(mhi_event);
802         read_unlock_bh(&mhi_cntrl->pm_lock);
803
804         return count;
805 }
806
807 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
808                                 struct mhi_event *mhi_event,
809                                 u32 event_quota)
810 {
811         struct mhi_tre *dev_rp, *local_rp;
812         struct mhi_ring *ev_ring = &mhi_event->ring;
813         struct mhi_event_ctxt *er_ctxt =
814                 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
815         int count = 0;
816         u32 chan;
817         struct mhi_chan *mhi_chan;
818
819         if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state)))
820                 return -EIO;
821
822         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
823         local_rp = ev_ring->rp;
824
825         while (dev_rp != local_rp && event_quota > 0) {
826                 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
827
828                 chan = MHI_TRE_GET_EV_CHID(local_rp);
829                 mhi_chan = &mhi_cntrl->mhi_chan[chan];
830
831                 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) {
832                         parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
833                         event_quota--;
834                 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) {
835                         parse_rsc_event(mhi_cntrl, local_rp, mhi_chan);
836                         event_quota--;
837                 }
838
839                 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
840                 local_rp = ev_ring->rp;
841                 dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
842                 count++;
843         }
844         read_lock_bh(&mhi_cntrl->pm_lock);
845         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
846                 mhi_ring_er_db(mhi_event);
847         read_unlock_bh(&mhi_cntrl->pm_lock);
848
849         return count;
850 }
851
852 void mhi_ev_task(unsigned long data)
853 {
854         struct mhi_event *mhi_event = (struct mhi_event *)data;
855         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
856
857         /* process all pending events */
858         spin_lock_bh(&mhi_event->lock);
859         mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
860         spin_unlock_bh(&mhi_event->lock);
861 }
862
863 void mhi_ctrl_ev_task(unsigned long data)
864 {
865         struct mhi_event *mhi_event = (struct mhi_event *)data;
866         struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
867         struct device *dev = &mhi_cntrl->mhi_dev->dev;
868         enum mhi_state state;
869         enum mhi_pm_state pm_state = 0;
870         int ret;
871
872         /*
873          * We can check PM state w/o a lock here because there is no way
874          * PM state can change from reg access valid to no access while this
875          * thread being executed.
876          */
877         if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
878                 /*
879                  * We may have a pending event but not allowed to
880                  * process it since we are probably in a suspended state,
881                  * so trigger a resume.
882                  */
883                 mhi_cntrl->runtime_get(mhi_cntrl);
884                 mhi_cntrl->runtime_put(mhi_cntrl);
885
886                 return;
887         }
888
889         /* Process ctrl events events */
890         ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
891
892         /*
893          * We received an IRQ but no events to process, maybe device went to
894          * SYS_ERR state? Check the state to confirm.
895          */
896         if (!ret) {
897                 write_lock_irq(&mhi_cntrl->pm_lock);
898                 state = mhi_get_mhi_state(mhi_cntrl);
899                 if (state == MHI_STATE_SYS_ERR) {
900                         dev_dbg(dev, "System error detected\n");
901                         pm_state = mhi_tryset_pm_state(mhi_cntrl,
902                                                        MHI_PM_SYS_ERR_DETECT);
903                 }
904                 write_unlock_irq(&mhi_cntrl->pm_lock);
905                 if (pm_state == MHI_PM_SYS_ERR_DETECT)
906                         schedule_work(&mhi_cntrl->syserr_worker);
907         }
908 }
909
910 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
911                              struct mhi_ring *ring)
912 {
913         void *tmp = ring->wp + ring->el_size;
914
915         if (tmp >= (ring->base + ring->len))
916                 tmp = ring->base;
917
918         return (tmp == ring->rp);
919 }
920
921 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
922                   struct sk_buff *skb, size_t len, enum mhi_flags mflags)
923 {
924         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
925         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
926                                                              mhi_dev->dl_chan;
927         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
928         struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
929         struct mhi_buf_info *buf_info;
930         struct mhi_tre *mhi_tre;
931         int ret;
932
933         /* If MHI host pre-allocates buffers then client drivers cannot queue */
934         if (mhi_chan->pre_alloc)
935                 return -EINVAL;
936
937         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
938                 return -ENOMEM;
939
940         read_lock_bh(&mhi_cntrl->pm_lock);
941         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
942                 read_unlock_bh(&mhi_cntrl->pm_lock);
943                 return -EIO;
944         }
945
946         /* we're in M3 or transitioning to M3 */
947         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
948                 mhi_cntrl->runtime_get(mhi_cntrl);
949                 mhi_cntrl->runtime_put(mhi_cntrl);
950         }
951
952         /* Toggle wake to exit out of M2 */
953         mhi_cntrl->wake_toggle(mhi_cntrl);
954
955         /* Generate the TRE */
956         buf_info = buf_ring->wp;
957
958         buf_info->v_addr = skb->data;
959         buf_info->cb_buf = skb;
960         buf_info->wp = tre_ring->wp;
961         buf_info->dir = mhi_chan->dir;
962         buf_info->len = len;
963         ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
964         if (ret)
965                 goto map_error;
966
967         mhi_tre = tre_ring->wp;
968
969         mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
970         mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
971         mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
972
973         /* increment WP */
974         mhi_add_ring_element(mhi_cntrl, tre_ring);
975         mhi_add_ring_element(mhi_cntrl, buf_ring);
976
977         if (mhi_chan->dir == DMA_TO_DEVICE)
978                 atomic_inc(&mhi_cntrl->pending_pkts);
979
980         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
981                 read_lock_bh(&mhi_chan->lock);
982                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
983                 read_unlock_bh(&mhi_chan->lock);
984         }
985
986         read_unlock_bh(&mhi_cntrl->pm_lock);
987
988         return 0;
989
990 map_error:
991         read_unlock_bh(&mhi_cntrl->pm_lock);
992
993         return ret;
994 }
995 EXPORT_SYMBOL_GPL(mhi_queue_skb);
996
997 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
998                   struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
999 {
1000         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1001         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1002                                                              mhi_dev->dl_chan;
1003         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1004         struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
1005         struct mhi_ring *buf_ring = &mhi_chan->buf_ring;
1006         struct mhi_buf_info *buf_info;
1007         struct mhi_tre *mhi_tre;
1008
1009         /* If MHI host pre-allocates buffers then client drivers cannot queue */
1010         if (mhi_chan->pre_alloc)
1011                 return -EINVAL;
1012
1013         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1014                 return -ENOMEM;
1015
1016         read_lock_bh(&mhi_cntrl->pm_lock);
1017         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
1018                 dev_err(dev, "MHI is not in activate state, PM state: %s\n",
1019                         to_mhi_pm_state_str(mhi_cntrl->pm_state));
1020                 read_unlock_bh(&mhi_cntrl->pm_lock);
1021
1022                 return -EIO;
1023         }
1024
1025         /* we're in M3 or transitioning to M3 */
1026         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
1027                 mhi_cntrl->runtime_get(mhi_cntrl);
1028                 mhi_cntrl->runtime_put(mhi_cntrl);
1029         }
1030
1031         /* Toggle wake to exit out of M2 */
1032         mhi_cntrl->wake_toggle(mhi_cntrl);
1033
1034         /* Generate the TRE */
1035         buf_info = buf_ring->wp;
1036         WARN_ON(buf_info->used);
1037         buf_info->p_addr = mhi_buf->dma_addr;
1038         buf_info->pre_mapped = true;
1039         buf_info->cb_buf = mhi_buf;
1040         buf_info->wp = tre_ring->wp;
1041         buf_info->dir = mhi_chan->dir;
1042         buf_info->len = len;
1043
1044         mhi_tre = tre_ring->wp;
1045
1046         mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1047         mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_info->len);
1048         mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(1, 1, 0, 0);
1049
1050         /* increment WP */
1051         mhi_add_ring_element(mhi_cntrl, tre_ring);
1052         mhi_add_ring_element(mhi_cntrl, buf_ring);
1053
1054         if (mhi_chan->dir == DMA_TO_DEVICE)
1055                 atomic_inc(&mhi_cntrl->pending_pkts);
1056
1057         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1058                 read_lock_bh(&mhi_chan->lock);
1059                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1060                 read_unlock_bh(&mhi_chan->lock);
1061         }
1062
1063         read_unlock_bh(&mhi_cntrl->pm_lock);
1064
1065         return 0;
1066 }
1067 EXPORT_SYMBOL_GPL(mhi_queue_dma);
1068
1069 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
1070                 void *buf, void *cb, size_t buf_len, enum mhi_flags flags)
1071 {
1072         struct mhi_ring *buf_ring, *tre_ring;
1073         struct mhi_tre *mhi_tre;
1074         struct mhi_buf_info *buf_info;
1075         int eot, eob, chain, bei;
1076         int ret;
1077
1078         buf_ring = &mhi_chan->buf_ring;
1079         tre_ring = &mhi_chan->tre_ring;
1080
1081         buf_info = buf_ring->wp;
1082         buf_info->v_addr = buf;
1083         buf_info->cb_buf = cb;
1084         buf_info->wp = tre_ring->wp;
1085         buf_info->dir = mhi_chan->dir;
1086         buf_info->len = buf_len;
1087
1088         ret = mhi_cntrl->map_single(mhi_cntrl, buf_info);
1089         if (ret)
1090                 return ret;
1091
1092         eob = !!(flags & MHI_EOB);
1093         eot = !!(flags & MHI_EOT);
1094         chain = !!(flags & MHI_CHAIN);
1095         bei = !!(mhi_chan->intmod);
1096
1097         mhi_tre = tre_ring->wp;
1098         mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr);
1099         mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(buf_len);
1100         mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
1101
1102         /* increment WP */
1103         mhi_add_ring_element(mhi_cntrl, tre_ring);
1104         mhi_add_ring_element(mhi_cntrl, buf_ring);
1105
1106         return 0;
1107 }
1108
1109 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
1110                   void *buf, size_t len, enum mhi_flags mflags)
1111 {
1112         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1113         struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
1114                                                              mhi_dev->dl_chan;
1115         struct mhi_ring *tre_ring;
1116         unsigned long flags;
1117         int ret;
1118
1119         /*
1120          * this check here only as a guard, it's always
1121          * possible mhi can enter error while executing rest of function,
1122          * which is not fatal so we do not need to hold pm_lock
1123          */
1124         if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
1125                 return -EIO;
1126
1127         tre_ring = &mhi_chan->tre_ring;
1128         if (mhi_is_ring_full(mhi_cntrl, tre_ring))
1129                 return -ENOMEM;
1130
1131         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf, len, mflags);
1132         if (unlikely(ret))
1133                 return ret;
1134
1135         read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
1136
1137         /* we're in M3 or transitioning to M3 */
1138         if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
1139                 mhi_cntrl->runtime_get(mhi_cntrl);
1140                 mhi_cntrl->runtime_put(mhi_cntrl);
1141         }
1142
1143         /* Toggle wake to exit out of M2 */
1144         mhi_cntrl->wake_toggle(mhi_cntrl);
1145
1146         if (mhi_chan->dir == DMA_TO_DEVICE)
1147                 atomic_inc(&mhi_cntrl->pending_pkts);
1148
1149         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
1150                 unsigned long flags;
1151
1152                 read_lock_irqsave(&mhi_chan->lock, flags);
1153                 mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1154                 read_unlock_irqrestore(&mhi_chan->lock, flags);
1155         }
1156
1157         read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
1158
1159         return 0;
1160 }
1161 EXPORT_SYMBOL_GPL(mhi_queue_buf);
1162
1163 int mhi_send_cmd(struct mhi_controller *mhi_cntrl,
1164                  struct mhi_chan *mhi_chan,
1165                  enum mhi_cmd_type cmd)
1166 {
1167         struct mhi_tre *cmd_tre = NULL;
1168         struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING];
1169         struct mhi_ring *ring = &mhi_cmd->ring;
1170         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1171         int chan = 0;
1172
1173         if (mhi_chan)
1174                 chan = mhi_chan->chan;
1175
1176         spin_lock_bh(&mhi_cmd->lock);
1177         if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) {
1178                 spin_unlock_bh(&mhi_cmd->lock);
1179                 return -ENOMEM;
1180         }
1181
1182         /* prepare the cmd tre */
1183         cmd_tre = ring->wp;
1184         switch (cmd) {
1185         case MHI_CMD_RESET_CHAN:
1186                 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR;
1187                 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0;
1188                 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan);
1189                 break;
1190         case MHI_CMD_START_CHAN:
1191                 cmd_tre->ptr = MHI_TRE_CMD_START_PTR;
1192                 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0;
1193                 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan);
1194                 break;
1195         default:
1196                 dev_err(dev, "Command not supported\n");
1197                 break;
1198         }
1199
1200         /* queue to hardware */
1201         mhi_add_ring_element(mhi_cntrl, ring);
1202         read_lock_bh(&mhi_cntrl->pm_lock);
1203         if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
1204                 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd);
1205         read_unlock_bh(&mhi_cntrl->pm_lock);
1206         spin_unlock_bh(&mhi_cmd->lock);
1207
1208         return 0;
1209 }
1210
1211 static void __mhi_unprepare_channel(struct mhi_controller *mhi_cntrl,
1212                                     struct mhi_chan *mhi_chan)
1213 {
1214         int ret;
1215         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1216
1217         dev_dbg(dev, "Entered: unprepare channel:%d\n", mhi_chan->chan);
1218
1219         /* no more processing events for this channel */
1220         mutex_lock(&mhi_chan->mutex);
1221         write_lock_irq(&mhi_chan->lock);
1222         if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) {
1223                 write_unlock_irq(&mhi_chan->lock);
1224                 mutex_unlock(&mhi_chan->mutex);
1225                 return;
1226         }
1227
1228         mhi_chan->ch_state = MHI_CH_STATE_DISABLED;
1229         write_unlock_irq(&mhi_chan->lock);
1230
1231         reinit_completion(&mhi_chan->completion);
1232         read_lock_bh(&mhi_cntrl->pm_lock);
1233         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1234                 read_unlock_bh(&mhi_cntrl->pm_lock);
1235                 goto error_invalid_state;
1236         }
1237
1238         mhi_cntrl->wake_toggle(mhi_cntrl);
1239         read_unlock_bh(&mhi_cntrl->pm_lock);
1240
1241         mhi_cntrl->runtime_get(mhi_cntrl);
1242         mhi_cntrl->runtime_put(mhi_cntrl);
1243         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_RESET_CHAN);
1244         if (ret)
1245                 goto error_invalid_state;
1246
1247         /* even if it fails we will still reset */
1248         ret = wait_for_completion_timeout(&mhi_chan->completion,
1249                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1250         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS)
1251                 dev_err(dev,
1252                         "Failed to receive cmd completion, still resetting\n");
1253
1254 error_invalid_state:
1255         if (!mhi_chan->offload_ch) {
1256                 mhi_reset_chan(mhi_cntrl, mhi_chan);
1257                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1258         }
1259         dev_dbg(dev, "chan:%d successfully resetted\n", mhi_chan->chan);
1260         mutex_unlock(&mhi_chan->mutex);
1261 }
1262
1263 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
1264                         struct mhi_chan *mhi_chan)
1265 {
1266         int ret = 0;
1267         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1268
1269         dev_dbg(dev, "Preparing channel: %d\n", mhi_chan->chan);
1270
1271         if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
1272                 dev_err(dev,
1273                         "Current EE: %s Required EE Mask: 0x%x for chan: %s\n",
1274                         TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask,
1275                         mhi_chan->name);
1276                 return -ENOTCONN;
1277         }
1278
1279         mutex_lock(&mhi_chan->mutex);
1280
1281         /* If channel is not in disable state, do not allow it to start */
1282         if (mhi_chan->ch_state != MHI_CH_STATE_DISABLED) {
1283                 ret = -EIO;
1284                 dev_dbg(dev, "channel: %d is not in disabled state\n",
1285                         mhi_chan->chan);
1286                 goto error_init_chan;
1287         }
1288
1289         /* Check of client manages channel context for offload channels */
1290         if (!mhi_chan->offload_ch) {
1291                 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan);
1292                 if (ret)
1293                         goto error_init_chan;
1294         }
1295
1296         reinit_completion(&mhi_chan->completion);
1297         read_lock_bh(&mhi_cntrl->pm_lock);
1298         if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
1299                 read_unlock_bh(&mhi_cntrl->pm_lock);
1300                 ret = -EIO;
1301                 goto error_pm_state;
1302         }
1303
1304         mhi_cntrl->wake_toggle(mhi_cntrl);
1305         read_unlock_bh(&mhi_cntrl->pm_lock);
1306         mhi_cntrl->runtime_get(mhi_cntrl);
1307         mhi_cntrl->runtime_put(mhi_cntrl);
1308
1309         ret = mhi_send_cmd(mhi_cntrl, mhi_chan, MHI_CMD_START_CHAN);
1310         if (ret)
1311                 goto error_pm_state;
1312
1313         ret = wait_for_completion_timeout(&mhi_chan->completion,
1314                                 msecs_to_jiffies(mhi_cntrl->timeout_ms));
1315         if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
1316                 ret = -EIO;
1317                 goto error_pm_state;
1318         }
1319
1320         write_lock_irq(&mhi_chan->lock);
1321         mhi_chan->ch_state = MHI_CH_STATE_ENABLED;
1322         write_unlock_irq(&mhi_chan->lock);
1323
1324         /* Pre-allocate buffer for xfer ring */
1325         if (mhi_chan->pre_alloc) {
1326                 int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
1327                                                        &mhi_chan->tre_ring);
1328                 size_t len = mhi_cntrl->buffer_len;
1329
1330                 while (nr_el--) {
1331                         void *buf;
1332
1333                         buf = kmalloc(len, GFP_KERNEL);
1334                         if (!buf) {
1335                                 ret = -ENOMEM;
1336                                 goto error_pre_alloc;
1337                         }
1338
1339                         /* Prepare transfer descriptors */
1340                         ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf, buf,
1341                                           len, MHI_EOT);
1342                         if (ret) {
1343                                 kfree(buf);
1344                                 goto error_pre_alloc;
1345                         }
1346                 }
1347
1348                 read_lock_bh(&mhi_cntrl->pm_lock);
1349                 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) {
1350                         read_lock_irq(&mhi_chan->lock);
1351                         mhi_ring_chan_db(mhi_cntrl, mhi_chan);
1352                         read_unlock_irq(&mhi_chan->lock);
1353                 }
1354                 read_unlock_bh(&mhi_cntrl->pm_lock);
1355         }
1356
1357         mutex_unlock(&mhi_chan->mutex);
1358
1359         dev_dbg(dev, "Chan: %d successfully moved to start state\n",
1360                 mhi_chan->chan);
1361
1362         return 0;
1363
1364 error_pm_state:
1365         if (!mhi_chan->offload_ch)
1366                 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan);
1367
1368 error_init_chan:
1369         mutex_unlock(&mhi_chan->mutex);
1370
1371         return ret;
1372
1373 error_pre_alloc:
1374         mutex_unlock(&mhi_chan->mutex);
1375         __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1376
1377         return ret;
1378 }
1379
1380 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl,
1381                                   struct mhi_event *mhi_event,
1382                                   struct mhi_event_ctxt *er_ctxt,
1383                                   int chan)
1384
1385 {
1386         struct mhi_tre *dev_rp, *local_rp;
1387         struct mhi_ring *ev_ring;
1388         struct device *dev = &mhi_cntrl->mhi_dev->dev;
1389         unsigned long flags;
1390
1391         dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan);
1392
1393         ev_ring = &mhi_event->ring;
1394
1395         /* mark all stale events related to channel as STALE event */
1396         spin_lock_irqsave(&mhi_event->lock, flags);
1397         dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
1398
1399         local_rp = ev_ring->rp;
1400         while (dev_rp != local_rp) {
1401                 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT &&
1402                     chan == MHI_TRE_GET_EV_CHID(local_rp))
1403                         local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan,
1404                                         MHI_PKT_TYPE_STALE_EVENT);
1405                 local_rp++;
1406                 if (local_rp == (ev_ring->base + ev_ring->len))
1407                         local_rp = ev_ring->base;
1408         }
1409
1410         dev_dbg(dev, "Finished marking events as stale events\n");
1411         spin_unlock_irqrestore(&mhi_event->lock, flags);
1412 }
1413
1414 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl,
1415                                 struct mhi_chan *mhi_chan)
1416 {
1417         struct mhi_ring *buf_ring, *tre_ring;
1418         struct mhi_result result;
1419
1420         /* Reset any pending buffers */
1421         buf_ring = &mhi_chan->buf_ring;
1422         tre_ring = &mhi_chan->tre_ring;
1423         result.transaction_status = -ENOTCONN;
1424         result.bytes_xferd = 0;
1425         while (tre_ring->rp != tre_ring->wp) {
1426                 struct mhi_buf_info *buf_info = buf_ring->rp;
1427
1428                 if (mhi_chan->dir == DMA_TO_DEVICE)
1429                         atomic_dec(&mhi_cntrl->pending_pkts);
1430
1431                 if (!buf_info->pre_mapped)
1432                         mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
1433
1434                 mhi_del_ring_element(mhi_cntrl, buf_ring);
1435                 mhi_del_ring_element(mhi_cntrl, tre_ring);
1436
1437                 if (mhi_chan->pre_alloc) {
1438                         kfree(buf_info->cb_buf);
1439                 } else {
1440                         result.buf_addr = buf_info->cb_buf;
1441                         mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
1442                 }
1443         }
1444 }
1445
1446 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
1447 {
1448         struct mhi_event *mhi_event;
1449         struct mhi_event_ctxt *er_ctxt;
1450         int chan = mhi_chan->chan;
1451
1452         /* Nothing to reset, client doesn't queue buffers */
1453         if (mhi_chan->offload_ch)
1454                 return;
1455
1456         read_lock_bh(&mhi_cntrl->pm_lock);
1457         mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1458         er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index];
1459
1460         mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan);
1461
1462         mhi_reset_data_chan(mhi_cntrl, mhi_chan);
1463
1464         read_unlock_bh(&mhi_cntrl->pm_lock);
1465 }
1466
1467 /* Move channel to start state */
1468 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
1469 {
1470         int ret, dir;
1471         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1472         struct mhi_chan *mhi_chan;
1473
1474         for (dir = 0; dir < 2; dir++) {
1475                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1476                 if (!mhi_chan)
1477                         continue;
1478
1479                 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
1480                 if (ret)
1481                         goto error_open_chan;
1482         }
1483
1484         return 0;
1485
1486 error_open_chan:
1487         for (--dir; dir >= 0; dir--) {
1488                 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan;
1489                 if (!mhi_chan)
1490                         continue;
1491
1492                 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1493         }
1494
1495         return ret;
1496 }
1497 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
1498
1499 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
1500 {
1501         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1502         struct mhi_chan *mhi_chan;
1503         int dir;
1504
1505         for (dir = 0; dir < 2; dir++) {
1506                 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
1507                 if (!mhi_chan)
1508                         continue;
1509
1510                 __mhi_unprepare_channel(mhi_cntrl, mhi_chan);
1511         }
1512 }
1513 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer);
1514
1515 int mhi_poll(struct mhi_device *mhi_dev, u32 budget)
1516 {
1517         struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
1518         struct mhi_chan *mhi_chan = mhi_dev->dl_chan;
1519         struct mhi_event *mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index];
1520         int ret;
1521
1522         spin_lock_bh(&mhi_event->lock);
1523         ret = mhi_event->process_event(mhi_cntrl, mhi_event, budget);
1524         spin_unlock_bh(&mhi_event->lock);
1525
1526         return ret;
1527 }
1528 EXPORT_SYMBOL_GPL(mhi_poll);