Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-2.6-microblaze.git] / drivers / usb / dwc2 / hcd_ddma.c
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3  * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
4  *
5  * Copyright (C) 2004-2013 Synopsys, Inc.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The names of the above-listed copyright holders may not be used
17  *    to endorse or promote products derived from this software without
18  *    specific prior written permission.
19  *
20  * ALTERNATIVELY, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") as published by the Free Software
22  * Foundation; either version 2 of the License, or (at your option) any
23  * later version.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37
38 /*
39  * This file contains the Descriptor DMA implementation for Host mode
40  */
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/spinlock.h>
44 #include <linux/interrupt.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/io.h>
47 #include <linux/slab.h>
48 #include <linux/usb.h>
49
50 #include <linux/usb/hcd.h>
51 #include <linux/usb/ch11.h>
52
53 #include "core.h"
54 #include "hcd.h"
55
56 static u16 dwc2_frame_list_idx(u16 frame)
57 {
58         return frame & (FRLISTEN_64_SIZE - 1);
59 }
60
61 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
62 {
63         return (idx + inc) &
64                 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
65                   MAX_DMA_DESC_NUM_GENERIC) - 1);
66 }
67
68 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
69 {
70         return (idx - inc) &
71                 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
72                   MAX_DMA_DESC_NUM_GENERIC) - 1);
73 }
74
75 static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
76 {
77         return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
78                 qh->dev_speed == USB_SPEED_HIGH) ?
79                 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
80 }
81
82 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
83 {
84         return qh->dev_speed == USB_SPEED_HIGH ?
85                (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
86 }
87
88 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
89                                 gfp_t flags)
90 {
91         struct kmem_cache *desc_cache;
92
93         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
94             qh->dev_speed == USB_SPEED_HIGH)
95                 desc_cache = hsotg->desc_hsisoc_cache;
96         else
97                 desc_cache = hsotg->desc_gen_cache;
98
99         qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
100                                                 dwc2_max_desc_num(qh);
101
102         qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
103         if (!qh->desc_list)
104                 return -ENOMEM;
105
106         qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
107                                            qh->desc_list_sz,
108                                            DMA_TO_DEVICE);
109
110         qh->n_bytes = kcalloc(dwc2_max_desc_num(qh), sizeof(u32), flags);
111         if (!qh->n_bytes) {
112                 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
113                                  qh->desc_list_sz,
114                                  DMA_FROM_DEVICE);
115                 kmem_cache_free(desc_cache, qh->desc_list);
116                 qh->desc_list = NULL;
117                 return -ENOMEM;
118         }
119
120         return 0;
121 }
122
123 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
124 {
125         struct kmem_cache *desc_cache;
126
127         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
128             qh->dev_speed == USB_SPEED_HIGH)
129                 desc_cache = hsotg->desc_hsisoc_cache;
130         else
131                 desc_cache = hsotg->desc_gen_cache;
132
133         if (qh->desc_list) {
134                 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
135                                  qh->desc_list_sz, DMA_FROM_DEVICE);
136                 kmem_cache_free(desc_cache, qh->desc_list);
137                 qh->desc_list = NULL;
138         }
139
140         kfree(qh->n_bytes);
141         qh->n_bytes = NULL;
142 }
143
144 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
145 {
146         if (hsotg->frame_list)
147                 return 0;
148
149         hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
150         hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
151         if (!hsotg->frame_list)
152                 return -ENOMEM;
153
154         hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
155                                                hsotg->frame_list_sz,
156                                                DMA_TO_DEVICE);
157
158         return 0;
159 }
160
161 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
162 {
163         unsigned long flags;
164
165         spin_lock_irqsave(&hsotg->lock, flags);
166
167         if (!hsotg->frame_list) {
168                 spin_unlock_irqrestore(&hsotg->lock, flags);
169                 return;
170         }
171
172         dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
173                          hsotg->frame_list_sz, DMA_FROM_DEVICE);
174
175         kfree(hsotg->frame_list);
176         hsotg->frame_list = NULL;
177
178         spin_unlock_irqrestore(&hsotg->lock, flags);
179 }
180
181 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
182 {
183         u32 hcfg;
184         unsigned long flags;
185
186         spin_lock_irqsave(&hsotg->lock, flags);
187
188         hcfg = dwc2_readl(hsotg, HCFG);
189         if (hcfg & HCFG_PERSCHEDENA) {
190                 /* already enabled */
191                 spin_unlock_irqrestore(&hsotg->lock, flags);
192                 return;
193         }
194
195         dwc2_writel(hsotg, hsotg->frame_list_dma, HFLBADDR);
196
197         hcfg &= ~HCFG_FRLISTEN_MASK;
198         hcfg |= fr_list_en | HCFG_PERSCHEDENA;
199         dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
200         dwc2_writel(hsotg, hcfg, HCFG);
201
202         spin_unlock_irqrestore(&hsotg->lock, flags);
203 }
204
205 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
206 {
207         u32 hcfg;
208         unsigned long flags;
209
210         spin_lock_irqsave(&hsotg->lock, flags);
211
212         hcfg = dwc2_readl(hsotg, HCFG);
213         if (!(hcfg & HCFG_PERSCHEDENA)) {
214                 /* already disabled */
215                 spin_unlock_irqrestore(&hsotg->lock, flags);
216                 return;
217         }
218
219         hcfg &= ~HCFG_PERSCHEDENA;
220         dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
221         dwc2_writel(hsotg, hcfg, HCFG);
222
223         spin_unlock_irqrestore(&hsotg->lock, flags);
224 }
225
226 /*
227  * Activates/Deactivates FrameList entries for the channel based on endpoint
228  * servicing period
229  */
230 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
231                                    int enable)
232 {
233         struct dwc2_host_chan *chan;
234         u16 i, j, inc;
235
236         if (!hsotg) {
237                 pr_err("hsotg = %p\n", hsotg);
238                 return;
239         }
240
241         if (!qh->channel) {
242                 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
243                 return;
244         }
245
246         if (!hsotg->frame_list) {
247                 dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
248                         hsotg->frame_list);
249                 return;
250         }
251
252         chan = qh->channel;
253         inc = dwc2_frame_incr_val(qh);
254         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
255                 i = dwc2_frame_list_idx(qh->next_active_frame);
256         else
257                 i = 0;
258
259         j = i;
260         do {
261                 if (enable)
262                         hsotg->frame_list[j] |= 1 << chan->hc_num;
263                 else
264                         hsotg->frame_list[j] &= ~(1 << chan->hc_num);
265                 j = (j + inc) & (FRLISTEN_64_SIZE - 1);
266         } while (j != i);
267
268         /*
269          * Sync frame list since controller will access it if periodic
270          * channel is currently enabled.
271          */
272         dma_sync_single_for_device(hsotg->dev,
273                                    hsotg->frame_list_dma,
274                                    hsotg->frame_list_sz,
275                                    DMA_TO_DEVICE);
276
277         if (!enable)
278                 return;
279
280         chan->schinfo = 0;
281         if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
282                 j = 1;
283                 /* TODO - check this */
284                 inc = (8 + qh->host_interval - 1) / qh->host_interval;
285                 for (i = 0; i < inc; i++) {
286                         chan->schinfo |= j;
287                         j = j << qh->host_interval;
288                 }
289         } else {
290                 chan->schinfo = 0xff;
291         }
292 }
293
294 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
295                                       struct dwc2_qh *qh)
296 {
297         struct dwc2_host_chan *chan = qh->channel;
298
299         if (dwc2_qh_is_non_per(qh)) {
300                 if (hsotg->params.uframe_sched)
301                         hsotg->available_host_channels++;
302                 else
303                         hsotg->non_periodic_channels--;
304         } else {
305                 dwc2_update_frame_list(hsotg, qh, 0);
306                 hsotg->available_host_channels++;
307         }
308
309         /*
310          * The condition is added to prevent double cleanup try in case of
311          * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
312          */
313         if (chan->qh) {
314                 if (!list_empty(&chan->hc_list_entry))
315                         list_del(&chan->hc_list_entry);
316                 dwc2_hc_cleanup(hsotg, chan);
317                 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
318                 chan->qh = NULL;
319         }
320
321         qh->channel = NULL;
322         qh->ntd = 0;
323
324         if (qh->desc_list)
325                 memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
326                        dwc2_max_desc_num(qh));
327 }
328
329 /**
330  * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
331  * related members
332  *
333  * @hsotg: The HCD state structure for the DWC OTG controller
334  * @qh:    The QH to init
335  * @mem_flags: Indicates the type of memory allocation
336  *
337  * Return: 0 if successful, negative error code otherwise
338  *
339  * Allocates memory for the descriptor list. For the first periodic QH,
340  * allocates memory for the FrameList and enables periodic scheduling.
341  */
342 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
343                           gfp_t mem_flags)
344 {
345         int retval;
346
347         if (qh->do_split) {
348                 dev_err(hsotg->dev,
349                         "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
350                 retval = -EINVAL;
351                 goto err0;
352         }
353
354         retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
355         if (retval)
356                 goto err0;
357
358         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
359             qh->ep_type == USB_ENDPOINT_XFER_INT) {
360                 if (!hsotg->frame_list) {
361                         retval = dwc2_frame_list_alloc(hsotg, mem_flags);
362                         if (retval)
363                                 goto err1;
364                         /* Enable periodic schedule on first periodic QH */
365                         dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
366                 }
367         }
368
369         qh->ntd = 0;
370         return 0;
371
372 err1:
373         dwc2_desc_list_free(hsotg, qh);
374 err0:
375         return retval;
376 }
377
378 /**
379  * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
380  * members
381  *
382  * @hsotg: The HCD state structure for the DWC OTG controller
383  * @qh:    The QH to free
384  *
385  * Frees descriptor list memory associated with the QH. If QH is periodic and
386  * the last, frees FrameList memory and disables periodic scheduling.
387  */
388 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
389 {
390         unsigned long flags;
391
392         dwc2_desc_list_free(hsotg, qh);
393
394         /*
395          * Channel still assigned due to some reasons.
396          * Seen on Isoc URB dequeue. Channel halted but no subsequent
397          * ChHalted interrupt to release the channel. Afterwards
398          * when it comes here from endpoint disable routine
399          * channel remains assigned.
400          */
401         spin_lock_irqsave(&hsotg->lock, flags);
402         if (qh->channel)
403                 dwc2_release_channel_ddma(hsotg, qh);
404         spin_unlock_irqrestore(&hsotg->lock, flags);
405
406         if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
407              qh->ep_type == USB_ENDPOINT_XFER_INT) &&
408             (hsotg->params.uframe_sched ||
409              !hsotg->periodic_channels) && hsotg->frame_list) {
410                 dwc2_per_sched_disable(hsotg);
411                 dwc2_frame_list_free(hsotg);
412         }
413 }
414
415 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
416 {
417         if (qh->dev_speed == USB_SPEED_HIGH)
418                 /* Descriptor set (8 descriptors) index which is 8-aligned */
419                 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
420         else
421                 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
422 }
423
424 /*
425  * Determine starting frame for Isochronous transfer.
426  * Few frames skipped to prevent race condition with HC.
427  */
428 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
429                                     struct dwc2_qh *qh, u16 *skip_frames)
430 {
431         u16 frame;
432
433         hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
434
435         /*
436          * next_active_frame is always frame number (not uFrame) both in FS
437          * and HS!
438          */
439
440         /*
441          * skip_frames is used to limit activated descriptors number
442          * to avoid the situation when HC services the last activated
443          * descriptor firstly.
444          * Example for FS:
445          * Current frame is 1, scheduled frame is 3. Since HC always fetches
446          * the descriptor corresponding to curr_frame+1, the descriptor
447          * corresponding to frame 2 will be fetched. If the number of
448          * descriptors is max=64 (or greather) the list will be fully programmed
449          * with Active descriptors and it is possible case (rare) that the
450          * latest descriptor(considering rollback) corresponding to frame 2 will
451          * be serviced first. HS case is more probable because, in fact, up to
452          * 11 uframes (16 in the code) may be skipped.
453          */
454         if (qh->dev_speed == USB_SPEED_HIGH) {
455                 /*
456                  * Consider uframe counter also, to start xfer asap. If half of
457                  * the frame elapsed skip 2 frames otherwise just 1 frame.
458                  * Starting descriptor index must be 8-aligned, so if the
459                  * current frame is near to complete the next one is skipped as
460                  * well.
461                  */
462                 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
463                         *skip_frames = 2 * 8;
464                         frame = dwc2_frame_num_inc(hsotg->frame_number,
465                                                    *skip_frames);
466                 } else {
467                         *skip_frames = 1 * 8;
468                         frame = dwc2_frame_num_inc(hsotg->frame_number,
469                                                    *skip_frames);
470                 }
471
472                 frame = dwc2_full_frame_num(frame);
473         } else {
474                 /*
475                  * Two frames are skipped for FS - the current and the next.
476                  * But for descriptor programming, 1 frame (descriptor) is
477                  * enough, see example above.
478                  */
479                 *skip_frames = 1;
480                 frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
481         }
482
483         return frame;
484 }
485
486 /*
487  * Calculate initial descriptor index for isochronous transfer based on
488  * scheduled frame
489  */
490 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
491                                         struct dwc2_qh *qh)
492 {
493         u16 frame, fr_idx, fr_idx_tmp, skip_frames;
494
495         /*
496          * With current ISOC processing algorithm the channel is being released
497          * when no more QTDs in the list (qh->ntd == 0). Thus this function is
498          * called only when qh->ntd == 0 and qh->channel == 0.
499          *
500          * So qh->channel != NULL branch is not used and just not removed from
501          * the source file. It is required for another possible approach which
502          * is, do not disable and release the channel when ISOC session
503          * completed, just move QH to inactive schedule until new QTD arrives.
504          * On new QTD, the QH moved back to 'ready' schedule, starting frame and
505          * therefore starting desc_index are recalculated. In this case channel
506          * is released only on ep_disable.
507          */
508
509         /*
510          * Calculate starting descriptor index. For INTERRUPT endpoint it is
511          * always 0.
512          */
513         if (qh->channel) {
514                 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
515                 /*
516                  * Calculate initial descriptor index based on FrameList current
517                  * bitmap and servicing period
518                  */
519                 fr_idx_tmp = dwc2_frame_list_idx(frame);
520                 fr_idx = (FRLISTEN_64_SIZE +
521                           dwc2_frame_list_idx(qh->next_active_frame) -
522                           fr_idx_tmp) % dwc2_frame_incr_val(qh);
523                 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
524         } else {
525                 qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
526                                                            &skip_frames);
527                 fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
528         }
529
530         qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
531
532         return skip_frames;
533 }
534
535 #define ISOC_URB_GIVEBACK_ASAP
536
537 #define MAX_ISOC_XFER_SIZE_FS   1023
538 #define MAX_ISOC_XFER_SIZE_HS   3072
539 #define DESCNUM_THRESHOLD       4
540
541 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
542                                          struct dwc2_qtd *qtd,
543                                          struct dwc2_qh *qh, u32 max_xfer_size,
544                                          u16 idx)
545 {
546         struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
547         struct dwc2_hcd_iso_packet_desc *frame_desc;
548
549         memset(dma_desc, 0, sizeof(*dma_desc));
550         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
551
552         if (frame_desc->length > max_xfer_size)
553                 qh->n_bytes[idx] = max_xfer_size;
554         else
555                 qh->n_bytes[idx] = frame_desc->length;
556
557         dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
558         dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
559                            HOST_DMA_ISOC_NBYTES_MASK;
560
561         /* Set active bit */
562         dma_desc->status |= HOST_DMA_A;
563
564         qh->ntd++;
565         qtd->isoc_frame_index_last++;
566
567 #ifdef ISOC_URB_GIVEBACK_ASAP
568         /* Set IOC for each descriptor corresponding to last frame of URB */
569         if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
570                 dma_desc->status |= HOST_DMA_IOC;
571 #endif
572
573         dma_sync_single_for_device(hsotg->dev,
574                                    qh->desc_list_dma +
575                         (idx * sizeof(struct dwc2_dma_desc)),
576                         sizeof(struct dwc2_dma_desc),
577                         DMA_TO_DEVICE);
578 }
579
580 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
581                                     struct dwc2_qh *qh, u16 skip_frames)
582 {
583         struct dwc2_qtd *qtd;
584         u32 max_xfer_size;
585         u16 idx, inc, n_desc = 0, ntd_max = 0;
586         u16 cur_idx;
587         u16 next_idx;
588
589         idx = qh->td_last;
590         inc = qh->host_interval;
591         hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
592         cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
593         next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
594
595         /*
596          * Ensure current frame number didn't overstep last scheduled
597          * descriptor. If it happens, the only way to recover is to move
598          * qh->td_last to current frame number + 1.
599          * So that next isoc descriptor will be scheduled on frame number + 1
600          * and not on a past frame.
601          */
602         if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
603                 if (inc < 32) {
604                         dev_vdbg(hsotg->dev,
605                                  "current frame number overstep last descriptor\n");
606                         qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
607                                                             qh->dev_speed);
608                         idx = qh->td_last;
609                 }
610         }
611
612         if (qh->host_interval) {
613                 ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
614                                 qh->host_interval;
615                 if (skip_frames && !qh->channel)
616                         ntd_max -= skip_frames / qh->host_interval;
617         }
618
619         max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
620                         MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
621
622         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
623                 if (qtd->in_process &&
624                     qtd->isoc_frame_index_last ==
625                     qtd->urb->packet_count)
626                         continue;
627
628                 qtd->isoc_td_first = idx;
629                 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
630                                                 qtd->urb->packet_count) {
631                         dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
632                                                      max_xfer_size, idx);
633                         idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
634                         n_desc++;
635                 }
636                 qtd->isoc_td_last = idx;
637                 qtd->in_process = 1;
638         }
639
640         qh->td_last = idx;
641
642 #ifdef ISOC_URB_GIVEBACK_ASAP
643         /* Set IOC for last descriptor if descriptor list is full */
644         if (qh->ntd == ntd_max) {
645                 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
646                 qh->desc_list[idx].status |= HOST_DMA_IOC;
647                 dma_sync_single_for_device(hsotg->dev,
648                                            qh->desc_list_dma + (idx *
649                                            sizeof(struct dwc2_dma_desc)),
650                                            sizeof(struct dwc2_dma_desc),
651                                            DMA_TO_DEVICE);
652         }
653 #else
654         /*
655          * Set IOC bit only for one descriptor. Always try to be ahead of HW
656          * processing, i.e. on IOC generation driver activates next descriptor
657          * but core continues to process descriptors following the one with IOC
658          * set.
659          */
660
661         if (n_desc > DESCNUM_THRESHOLD)
662                 /*
663                  * Move IOC "up". Required even if there is only one QTD
664                  * in the list, because QTDs might continue to be queued,
665                  * but during the activation it was only one queued.
666                  * Actually more than one QTD might be in the list if this
667                  * function called from XferCompletion - QTDs was queued during
668                  * HW processing of the previous descriptor chunk.
669                  */
670                 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
671                                             qh->dev_speed);
672         else
673                 /*
674                  * Set the IOC for the latest descriptor if either number of
675                  * descriptors is not greater than threshold or no more new
676                  * descriptors activated
677                  */
678                 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
679
680         qh->desc_list[idx].status |= HOST_DMA_IOC;
681         dma_sync_single_for_device(hsotg->dev,
682                                    qh->desc_list_dma +
683                                    (idx * sizeof(struct dwc2_dma_desc)),
684                                    sizeof(struct dwc2_dma_desc),
685                                    DMA_TO_DEVICE);
686 #endif
687 }
688
689 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
690                                     struct dwc2_host_chan *chan,
691                                     struct dwc2_qtd *qtd, struct dwc2_qh *qh,
692                                     int n_desc)
693 {
694         struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
695         int len = chan->xfer_len;
696
697         if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
698                 len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
699
700         if (chan->ep_is_in) {
701                 int num_packets;
702
703                 if (len > 0 && chan->max_packet)
704                         num_packets = (len + chan->max_packet - 1)
705                                         / chan->max_packet;
706                 else
707                         /* Need 1 packet for transfer length of 0 */
708                         num_packets = 1;
709
710                 /* Always program an integral # of packets for IN transfers */
711                 len = num_packets * chan->max_packet;
712         }
713
714         dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
715         qh->n_bytes[n_desc] = len;
716
717         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
718             qtd->control_phase == DWC2_CONTROL_SETUP)
719                 dma_desc->status |= HOST_DMA_SUP;
720
721         dma_desc->buf = (u32)chan->xfer_dma;
722
723         dma_sync_single_for_device(hsotg->dev,
724                                    qh->desc_list_dma +
725                                    (n_desc * sizeof(struct dwc2_dma_desc)),
726                                    sizeof(struct dwc2_dma_desc),
727                                    DMA_TO_DEVICE);
728
729         /*
730          * Last (or only) descriptor of IN transfer with actual size less
731          * than MaxPacket
732          */
733         if (len > chan->xfer_len) {
734                 chan->xfer_len = 0;
735         } else {
736                 chan->xfer_dma += len;
737                 chan->xfer_len -= len;
738         }
739 }
740
741 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
742                                         struct dwc2_qh *qh)
743 {
744         struct dwc2_qtd *qtd;
745         struct dwc2_host_chan *chan = qh->channel;
746         int n_desc = 0;
747
748         dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
749                  (unsigned long)chan->xfer_dma, chan->xfer_len);
750
751         /*
752          * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
753          * if SG transfer consists of multiple URBs, this pointer is re-assigned
754          * to the buffer of the currently processed QTD. For non-SG request
755          * there is always one QTD active.
756          */
757
758         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
759                 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
760
761                 if (n_desc) {
762                         /* SG request - more than 1 QTD */
763                         chan->xfer_dma = qtd->urb->dma +
764                                         qtd->urb->actual_length;
765                         chan->xfer_len = qtd->urb->length -
766                                         qtd->urb->actual_length;
767                         dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
768                                  (unsigned long)chan->xfer_dma, chan->xfer_len);
769                 }
770
771                 qtd->n_desc = 0;
772                 do {
773                         if (n_desc > 1) {
774                                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
775                                 dev_vdbg(hsotg->dev,
776                                          "set A bit in desc %d (%p)\n",
777                                          n_desc - 1,
778                                          &qh->desc_list[n_desc - 1]);
779                                 dma_sync_single_for_device(hsotg->dev,
780                                                            qh->desc_list_dma +
781                                         ((n_desc - 1) *
782                                         sizeof(struct dwc2_dma_desc)),
783                                         sizeof(struct dwc2_dma_desc),
784                                         DMA_TO_DEVICE);
785                         }
786                         dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
787                         dev_vdbg(hsotg->dev,
788                                  "desc %d (%p) buf=%08x status=%08x\n",
789                                  n_desc, &qh->desc_list[n_desc],
790                                  qh->desc_list[n_desc].buf,
791                                  qh->desc_list[n_desc].status);
792                         qtd->n_desc++;
793                         n_desc++;
794                 } while (chan->xfer_len > 0 &&
795                          n_desc != MAX_DMA_DESC_NUM_GENERIC);
796
797                 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
798                 qtd->in_process = 1;
799                 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
800                         break;
801                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
802                         break;
803         }
804
805         if (n_desc) {
806                 qh->desc_list[n_desc - 1].status |=
807                                 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
808                 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
809                          n_desc - 1, &qh->desc_list[n_desc - 1]);
810                 dma_sync_single_for_device(hsotg->dev,
811                                            qh->desc_list_dma + (n_desc - 1) *
812                                            sizeof(struct dwc2_dma_desc),
813                                            sizeof(struct dwc2_dma_desc),
814                                            DMA_TO_DEVICE);
815                 if (n_desc > 1) {
816                         qh->desc_list[0].status |= HOST_DMA_A;
817                         dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
818                                  &qh->desc_list[0]);
819                         dma_sync_single_for_device(hsotg->dev,
820                                                    qh->desc_list_dma,
821                                         sizeof(struct dwc2_dma_desc),
822                                         DMA_TO_DEVICE);
823                 }
824                 chan->ntd = n_desc;
825         }
826 }
827
828 /**
829  * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
830  *
831  * @hsotg: The HCD state structure for the DWC OTG controller
832  * @qh:    The QH to init
833  *
834  * Return: 0 if successful, negative error code otherwise
835  *
836  * For Control and Bulk endpoints, initializes descriptor list and starts the
837  * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
838  * list then updates FrameList, marking appropriate entries as active.
839  *
840  * For Isochronous endpoints the starting descriptor index is calculated based
841  * on the scheduled frame, but only on the first transfer descriptor within a
842  * session. Then the transfer is started via enabling the channel.
843  *
844  * For Isochronous endpoints the channel is not halted on XferComplete
845  * interrupt so remains assigned to the endpoint(QH) until session is done.
846  */
847 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
848 {
849         /* Channel is already assigned */
850         struct dwc2_host_chan *chan = qh->channel;
851         u16 skip_frames = 0;
852
853         switch (chan->ep_type) {
854         case USB_ENDPOINT_XFER_CONTROL:
855         case USB_ENDPOINT_XFER_BULK:
856                 dwc2_init_non_isoc_dma_desc(hsotg, qh);
857                 dwc2_hc_start_transfer_ddma(hsotg, chan);
858                 break;
859         case USB_ENDPOINT_XFER_INT:
860                 dwc2_init_non_isoc_dma_desc(hsotg, qh);
861                 dwc2_update_frame_list(hsotg, qh, 1);
862                 dwc2_hc_start_transfer_ddma(hsotg, chan);
863                 break;
864         case USB_ENDPOINT_XFER_ISOC:
865                 if (!qh->ntd)
866                         skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
867                 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
868
869                 if (!chan->xfer_started) {
870                         dwc2_update_frame_list(hsotg, qh, 1);
871
872                         /*
873                          * Always set to max, instead of actual size. Otherwise
874                          * ntd will be changed with channel being enabled. Not
875                          * recommended.
876                          */
877                         chan->ntd = dwc2_max_desc_num(qh);
878
879                         /* Enable channel only once for ISOC */
880                         dwc2_hc_start_transfer_ddma(hsotg, chan);
881                 }
882
883                 break;
884         default:
885                 break;
886         }
887 }
888
889 #define DWC2_CMPL_DONE          1
890 #define DWC2_CMPL_STOP          2
891
892 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
893                                         struct dwc2_host_chan *chan,
894                                         struct dwc2_qtd *qtd,
895                                         struct dwc2_qh *qh, u16 idx)
896 {
897         struct dwc2_dma_desc *dma_desc;
898         struct dwc2_hcd_iso_packet_desc *frame_desc;
899         u16 remain = 0;
900         int rc = 0;
901
902         if (!qtd->urb)
903                 return -EINVAL;
904
905         dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
906                                 sizeof(struct dwc2_dma_desc)),
907                                 sizeof(struct dwc2_dma_desc),
908                                 DMA_FROM_DEVICE);
909
910         dma_desc = &qh->desc_list[idx];
911
912         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
913         dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
914         if (chan->ep_is_in)
915                 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
916                          HOST_DMA_ISOC_NBYTES_SHIFT;
917
918         if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
919                 /*
920                  * XactError, or unable to complete all the transactions
921                  * in the scheduled micro-frame/frame, both indicated by
922                  * HOST_DMA_STS_PKTERR
923                  */
924                 qtd->urb->error_count++;
925                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
926                 frame_desc->status = -EPROTO;
927         } else {
928                 /* Success */
929                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
930                 frame_desc->status = 0;
931         }
932
933         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
934                 /*
935                  * urb->status is not used for isoc transfers here. The
936                  * individual frame_desc status are used instead.
937                  */
938                 dwc2_host_complete(hsotg, qtd, 0);
939                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
940
941                 /*
942                  * This check is necessary because urb_dequeue can be called
943                  * from urb complete callback (sound driver for example). All
944                  * pending URBs are dequeued there, so no need for further
945                  * processing.
946                  */
947                 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
948                         return -1;
949                 rc = DWC2_CMPL_DONE;
950         }
951
952         qh->ntd--;
953
954         /* Stop if IOC requested descriptor reached */
955         if (dma_desc->status & HOST_DMA_IOC)
956                 rc = DWC2_CMPL_STOP;
957
958         return rc;
959 }
960
961 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
962                                          struct dwc2_host_chan *chan,
963                                          enum dwc2_halt_status halt_status)
964 {
965         struct dwc2_hcd_iso_packet_desc *frame_desc;
966         struct dwc2_qtd *qtd, *qtd_tmp;
967         struct dwc2_qh *qh;
968         u16 idx;
969         int rc;
970
971         qh = chan->qh;
972         idx = qh->td_first;
973
974         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
975                 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
976                         qtd->in_process = 0;
977                 return;
978         }
979
980         if (halt_status == DWC2_HC_XFER_AHB_ERR ||
981             halt_status == DWC2_HC_XFER_BABBLE_ERR) {
982                 /*
983                  * Channel is halted in these error cases, considered as serious
984                  * issues.
985                  * Complete all URBs marking all frames as failed, irrespective
986                  * whether some of the descriptors (frames) succeeded or not.
987                  * Pass error code to completion routine as well, to update
988                  * urb->status, some of class drivers might use it to stop
989                  * queing transfer requests.
990                  */
991                 int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
992                           -EIO : -EOVERFLOW;
993
994                 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
995                                          qtd_list_entry) {
996                         if (qtd->urb) {
997                                 for (idx = 0; idx < qtd->urb->packet_count;
998                                      idx++) {
999                                         frame_desc = &qtd->urb->iso_descs[idx];
1000                                         frame_desc->status = err;
1001                                 }
1002
1003                                 dwc2_host_complete(hsotg, qtd, err);
1004                         }
1005
1006                         dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1007                 }
1008
1009                 return;
1010         }
1011
1012         list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
1013                 if (!qtd->in_process)
1014                         break;
1015
1016                 /*
1017                  * Ensure idx corresponds to descriptor where first urb of this
1018                  * qtd was added. In fact, during isoc desc init, dwc2 may skip
1019                  * an index if current frame number is already over this index.
1020                  */
1021                 if (idx != qtd->isoc_td_first) {
1022                         dev_vdbg(hsotg->dev,
1023                                  "try to complete %d instead of %d\n",
1024                                  idx, qtd->isoc_td_first);
1025                         idx = qtd->isoc_td_first;
1026                 }
1027
1028                 do {
1029                         struct dwc2_qtd *qtd_next;
1030                         u16 cur_idx;
1031
1032                         rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
1033                                                           idx);
1034                         if (rc < 0)
1035                                 return;
1036                         idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
1037                                                     chan->speed);
1038                         if (!rc)
1039                                 continue;
1040
1041                         if (rc == DWC2_CMPL_DONE)
1042                                 break;
1043
1044                         /* rc == DWC2_CMPL_STOP */
1045
1046                         if (qh->host_interval >= 32)
1047                                 goto stop_scan;
1048
1049                         qh->td_first = idx;
1050                         cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
1051                         qtd_next = list_first_entry(&qh->qtd_list,
1052                                                     struct dwc2_qtd,
1053                                                     qtd_list_entry);
1054                         if (dwc2_frame_idx_num_gt(cur_idx,
1055                                                   qtd_next->isoc_td_last))
1056                                 break;
1057
1058                         goto stop_scan;
1059
1060                 } while (idx != qh->td_first);
1061         }
1062
1063 stop_scan:
1064         qh->td_first = idx;
1065 }
1066
1067 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
1068                                                struct dwc2_host_chan *chan,
1069                                         struct dwc2_qtd *qtd,
1070                                         struct dwc2_dma_desc *dma_desc,
1071                                         enum dwc2_halt_status halt_status,
1072                                         u32 n_bytes, int *xfer_done)
1073 {
1074         struct dwc2_hcd_urb *urb = qtd->urb;
1075         u16 remain = 0;
1076
1077         if (chan->ep_is_in)
1078                 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
1079                          HOST_DMA_NBYTES_SHIFT;
1080
1081         dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
1082
1083         if (halt_status == DWC2_HC_XFER_AHB_ERR) {
1084                 dev_err(hsotg->dev, "EIO\n");
1085                 urb->status = -EIO;
1086                 return 1;
1087         }
1088
1089         if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
1090                 switch (halt_status) {
1091                 case DWC2_HC_XFER_STALL:
1092                         dev_vdbg(hsotg->dev, "Stall\n");
1093                         urb->status = -EPIPE;
1094                         break;
1095                 case DWC2_HC_XFER_BABBLE_ERR:
1096                         dev_err(hsotg->dev, "Babble\n");
1097                         urb->status = -EOVERFLOW;
1098                         break;
1099                 case DWC2_HC_XFER_XACT_ERR:
1100                         dev_err(hsotg->dev, "XactErr\n");
1101                         urb->status = -EPROTO;
1102                         break;
1103                 default:
1104                         dev_err(hsotg->dev,
1105                                 "%s: Unhandled descriptor error status (%d)\n",
1106                                 __func__, halt_status);
1107                         break;
1108                 }
1109                 return 1;
1110         }
1111
1112         if (dma_desc->status & HOST_DMA_A) {
1113                 dev_vdbg(hsotg->dev,
1114                          "Active descriptor encountered on channel %d\n",
1115                          chan->hc_num);
1116                 return 0;
1117         }
1118
1119         if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1120                 if (qtd->control_phase == DWC2_CONTROL_DATA) {
1121                         urb->actual_length += n_bytes - remain;
1122                         if (remain || urb->actual_length >= urb->length) {
1123                                 /*
1124                                  * For Control Data stage do not set urb->status
1125                                  * to 0, to prevent URB callback. Set it when
1126                                  * Status phase is done. See below.
1127                                  */
1128                                 *xfer_done = 1;
1129                         }
1130                 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
1131                         urb->status = 0;
1132                         *xfer_done = 1;
1133                 }
1134                 /* No handling for SETUP stage */
1135         } else {
1136                 /* BULK and INTR */
1137                 urb->actual_length += n_bytes - remain;
1138                 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
1139                          urb->actual_length);
1140                 if (remain || urb->actual_length >= urb->length) {
1141                         urb->status = 0;
1142                         *xfer_done = 1;
1143                 }
1144         }
1145
1146         return 0;
1147 }
1148
1149 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1150                                       struct dwc2_host_chan *chan,
1151                                       int chnum, struct dwc2_qtd *qtd,
1152                                       int desc_num,
1153                                       enum dwc2_halt_status halt_status,
1154                                       int *xfer_done)
1155 {
1156         struct dwc2_qh *qh = chan->qh;
1157         struct dwc2_hcd_urb *urb = qtd->urb;
1158         struct dwc2_dma_desc *dma_desc;
1159         u32 n_bytes;
1160         int failed;
1161
1162         dev_vdbg(hsotg->dev, "%s()\n", __func__);
1163
1164         if (!urb)
1165                 return -EINVAL;
1166
1167         dma_sync_single_for_cpu(hsotg->dev,
1168                                 qh->desc_list_dma + (desc_num *
1169                                 sizeof(struct dwc2_dma_desc)),
1170                                 sizeof(struct dwc2_dma_desc),
1171                                 DMA_FROM_DEVICE);
1172
1173         dma_desc = &qh->desc_list[desc_num];
1174         n_bytes = qh->n_bytes[desc_num];
1175         dev_vdbg(hsotg->dev,
1176                  "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1177                  qtd, urb, desc_num, dma_desc, n_bytes);
1178         failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1179                                                      halt_status, n_bytes,
1180                                                      xfer_done);
1181         if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1182                 dwc2_host_complete(hsotg, qtd, urb->status);
1183                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1184                 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
1185                          failed, *xfer_done);
1186                 return failed;
1187         }
1188
1189         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1190                 switch (qtd->control_phase) {
1191                 case DWC2_CONTROL_SETUP:
1192                         if (urb->length > 0)
1193                                 qtd->control_phase = DWC2_CONTROL_DATA;
1194                         else
1195                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1196                         dev_vdbg(hsotg->dev,
1197                                  "  Control setup transaction done\n");
1198                         break;
1199                 case DWC2_CONTROL_DATA:
1200                         if (*xfer_done) {
1201                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1202                                 dev_vdbg(hsotg->dev,
1203                                          "  Control data transfer done\n");
1204                         } else if (desc_num + 1 == qtd->n_desc) {
1205                                 /*
1206                                  * Last descriptor for Control data stage which
1207                                  * is not completed yet
1208                                  */
1209                                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1210                                                           qtd);
1211                         }
1212                         break;
1213                 default:
1214                         break;
1215                 }
1216         }
1217
1218         return 0;
1219 }
1220
1221 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1222                                              struct dwc2_host_chan *chan,
1223                                              int chnum,
1224                                              enum dwc2_halt_status halt_status)
1225 {
1226         struct list_head *qtd_item, *qtd_tmp;
1227         struct dwc2_qh *qh = chan->qh;
1228         struct dwc2_qtd *qtd = NULL;
1229         int xfer_done;
1230         int desc_num = 0;
1231
1232         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1233                 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1234                         qtd->in_process = 0;
1235                 return;
1236         }
1237
1238         list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1239                 int i;
1240                 int qtd_desc_count;
1241
1242                 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1243                 xfer_done = 0;
1244                 qtd_desc_count = qtd->n_desc;
1245
1246                 for (i = 0; i < qtd_desc_count; i++) {
1247                         if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1248                                                        desc_num, halt_status,
1249                                                        &xfer_done)) {
1250                                 qtd = NULL;
1251                                 goto stop_scan;
1252                         }
1253
1254                         desc_num++;
1255                 }
1256         }
1257
1258 stop_scan:
1259         if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1260                 /*
1261                  * Resetting the data toggle for bulk and interrupt endpoints
1262                  * in case of stall. See handle_hc_stall_intr().
1263                  */
1264                 if (halt_status == DWC2_HC_XFER_STALL)
1265                         qh->data_toggle = DWC2_HC_PID_DATA0;
1266                 else
1267                         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
1268         }
1269
1270         if (halt_status == DWC2_HC_XFER_COMPLETE) {
1271                 if (chan->hcint & HCINTMSK_NYET) {
1272                         /*
1273                          * Got a NYET on the last transaction of the transfer.
1274                          * It means that the endpoint should be in the PING
1275                          * state at the beginning of the next transfer.
1276                          */
1277                         qh->ping_state = 1;
1278                 }
1279         }
1280 }
1281
1282 /**
1283  * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1284  * status and calls completion routine for the URB if it's done. Called from
1285  * interrupt handlers.
1286  *
1287  * @hsotg:       The HCD state structure for the DWC OTG controller
1288  * @chan:        Host channel the transfer is completed on
1289  * @chnum:       Index of Host channel registers
1290  * @halt_status: Reason the channel is being halted or just XferComplete
1291  *               for isochronous transfers
1292  *
1293  * Releases the channel to be used by other transfers.
1294  * In case of Isochronous endpoint the channel is not halted until the end of
1295  * the session, i.e. QTD list is empty.
1296  * If periodic channel released the FrameList is updated accordingly.
1297  * Calls transaction selection routines to activate pending transfers.
1298  */
1299 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1300                                  struct dwc2_host_chan *chan, int chnum,
1301                                  enum dwc2_halt_status halt_status)
1302 {
1303         struct dwc2_qh *qh = chan->qh;
1304         int continue_isoc_xfer = 0;
1305         enum dwc2_transaction_type tr_type;
1306
1307         if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1308                 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1309
1310                 /* Release the channel if halted or session completed */
1311                 if (halt_status != DWC2_HC_XFER_COMPLETE ||
1312                     list_empty(&qh->qtd_list)) {
1313                         struct dwc2_qtd *qtd, *qtd_tmp;
1314
1315                         /*
1316                          * Kill all remainings QTDs since channel has been
1317                          * halted.
1318                          */
1319                         list_for_each_entry_safe(qtd, qtd_tmp,
1320                                                  &qh->qtd_list,
1321                                                  qtd_list_entry) {
1322                                 dwc2_host_complete(hsotg, qtd,
1323                                                    -ECONNRESET);
1324                                 dwc2_hcd_qtd_unlink_and_free(hsotg,
1325                                                              qtd, qh);
1326                         }
1327
1328                         /* Halt the channel if session completed */
1329                         if (halt_status == DWC2_HC_XFER_COMPLETE)
1330                                 dwc2_hc_halt(hsotg, chan, halt_status);
1331                         dwc2_release_channel_ddma(hsotg, qh);
1332                         dwc2_hcd_qh_unlink(hsotg, qh);
1333                 } else {
1334                         /* Keep in assigned schedule to continue transfer */
1335                         list_move_tail(&qh->qh_list_entry,
1336                                        &hsotg->periodic_sched_assigned);
1337                         /*
1338                          * If channel has been halted during giveback of urb
1339                          * then prevent any new scheduling.
1340                          */
1341                         if (!chan->halt_status)
1342                                 continue_isoc_xfer = 1;
1343                 }
1344                 /*
1345                  * Todo: Consider the case when period exceeds FrameList size.
1346                  * Frame Rollover interrupt should be used.
1347                  */
1348         } else {
1349                 /*
1350                  * Scan descriptor list to complete the URB(s), then release
1351                  * the channel
1352                  */
1353                 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1354                                                  halt_status);
1355                 dwc2_release_channel_ddma(hsotg, qh);
1356                 dwc2_hcd_qh_unlink(hsotg, qh);
1357
1358                 if (!list_empty(&qh->qtd_list)) {
1359                         /*
1360                          * Add back to inactive non-periodic schedule on normal
1361                          * completion
1362                          */
1363                         dwc2_hcd_qh_add(hsotg, qh);
1364                 }
1365         }
1366
1367         tr_type = dwc2_hcd_select_transactions(hsotg);
1368         if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1369                 if (continue_isoc_xfer) {
1370                         if (tr_type == DWC2_TRANSACTION_NONE)
1371                                 tr_type = DWC2_TRANSACTION_PERIODIC;
1372                         else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1373                                 tr_type = DWC2_TRANSACTION_ALL;
1374                 }
1375                 dwc2_hcd_queue_transactions(hsotg, tr_type);
1376         }
1377 }