Merge tag 'gpio-fixes-for-v5.16-rc7' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / usb / dwc3 / gadget.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
4  *
5  * Copyright (C) 2010-2011 Texas Instruments Incorporated - https://www.ti.com
6  *
7  * Authors: Felipe Balbi <balbi@ti.com>,
8  *          Sebastian Andrzej Siewior <bigeasy@linutronix.de>
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/list.h>
20 #include <linux/dma-mapping.h>
21
22 #include <linux/usb/ch9.h>
23 #include <linux/usb/gadget.h>
24
25 #include "debug.h"
26 #include "core.h"
27 #include "gadget.h"
28 #include "io.h"
29
30 #define DWC3_ALIGN_FRAME(d, n)  (((d)->frame_number + ((d)->interval * (n))) \
31                                         & ~((d)->interval - 1))
32
33 /**
34  * dwc3_gadget_set_test_mode - enables usb2 test modes
35  * @dwc: pointer to our context structure
36  * @mode: the mode to set (J, K SE0 NAK, Force Enable)
37  *
38  * Caller should take care of locking. This function will return 0 on
39  * success or -EINVAL if wrong Test Selector is passed.
40  */
41 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
42 {
43         u32             reg;
44
45         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
46         reg &= ~DWC3_DCTL_TSTCTRL_MASK;
47
48         switch (mode) {
49         case USB_TEST_J:
50         case USB_TEST_K:
51         case USB_TEST_SE0_NAK:
52         case USB_TEST_PACKET:
53         case USB_TEST_FORCE_ENABLE:
54                 reg |= mode << 1;
55                 break;
56         default:
57                 return -EINVAL;
58         }
59
60         dwc3_gadget_dctl_write_safe(dwc, reg);
61
62         return 0;
63 }
64
65 /**
66  * dwc3_gadget_get_link_state - gets current state of usb link
67  * @dwc: pointer to our context structure
68  *
69  * Caller should take care of locking. This function will
70  * return the link state on success (>= 0) or -ETIMEDOUT.
71  */
72 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
73 {
74         u32             reg;
75
76         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
77
78         return DWC3_DSTS_USBLNKST(reg);
79 }
80
81 /**
82  * dwc3_gadget_set_link_state - sets usb link to a particular state
83  * @dwc: pointer to our context structure
84  * @state: the state to put link into
85  *
86  * Caller should take care of locking. This function will
87  * return 0 on success or -ETIMEDOUT.
88  */
89 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
90 {
91         int             retries = 10000;
92         u32             reg;
93
94         /*
95          * Wait until device controller is ready. Only applies to 1.94a and
96          * later RTL.
97          */
98         if (!DWC3_VER_IS_PRIOR(DWC3, 194A)) {
99                 while (--retries) {
100                         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
101                         if (reg & DWC3_DSTS_DCNRD)
102                                 udelay(5);
103                         else
104                                 break;
105                 }
106
107                 if (retries <= 0)
108                         return -ETIMEDOUT;
109         }
110
111         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
112         reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
113
114         /* set no action before sending new link state change */
115         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
116
117         /* set requested state */
118         reg |= DWC3_DCTL_ULSTCHNGREQ(state);
119         dwc3_writel(dwc->regs, DWC3_DCTL, reg);
120
121         /*
122          * The following code is racy when called from dwc3_gadget_wakeup,
123          * and is not needed, at least on newer versions
124          */
125         if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
126                 return 0;
127
128         /* wait for a change in DSTS */
129         retries = 10000;
130         while (--retries) {
131                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
132
133                 if (DWC3_DSTS_USBLNKST(reg) == state)
134                         return 0;
135
136                 udelay(5);
137         }
138
139         return -ETIMEDOUT;
140 }
141
142 /**
143  * dwc3_ep_inc_trb - increment a trb index.
144  * @index: Pointer to the TRB index to increment.
145  *
146  * The index should never point to the link TRB. After incrementing,
147  * if it is point to the link TRB, wrap around to the beginning. The
148  * link TRB is always at the last TRB entry.
149  */
150 static void dwc3_ep_inc_trb(u8 *index)
151 {
152         (*index)++;
153         if (*index == (DWC3_TRB_NUM - 1))
154                 *index = 0;
155 }
156
157 /**
158  * dwc3_ep_inc_enq - increment endpoint's enqueue pointer
159  * @dep: The endpoint whose enqueue pointer we're incrementing
160  */
161 static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
162 {
163         dwc3_ep_inc_trb(&dep->trb_enqueue);
164 }
165
166 /**
167  * dwc3_ep_inc_deq - increment endpoint's dequeue pointer
168  * @dep: The endpoint whose enqueue pointer we're incrementing
169  */
170 static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
171 {
172         dwc3_ep_inc_trb(&dep->trb_dequeue);
173 }
174
175 static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
176                 struct dwc3_request *req, int status)
177 {
178         struct dwc3                     *dwc = dep->dwc;
179
180         list_del(&req->list);
181         req->remaining = 0;
182         req->needs_extra_trb = false;
183
184         if (req->request.status == -EINPROGRESS)
185                 req->request.status = status;
186
187         if (req->trb)
188                 usb_gadget_unmap_request_by_dev(dwc->sysdev,
189                                 &req->request, req->direction);
190
191         req->trb = NULL;
192         trace_dwc3_gadget_giveback(req);
193
194         if (dep->number > 1)
195                 pm_runtime_put(dwc->dev);
196 }
197
198 /**
199  * dwc3_gadget_giveback - call struct usb_request's ->complete callback
200  * @dep: The endpoint to whom the request belongs to
201  * @req: The request we're giving back
202  * @status: completion code for the request
203  *
204  * Must be called with controller's lock held and interrupts disabled. This
205  * function will unmap @req and call its ->complete() callback to notify upper
206  * layers that it has completed.
207  */
208 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
209                 int status)
210 {
211         struct dwc3                     *dwc = dep->dwc;
212
213         dwc3_gadget_del_and_unmap_request(dep, req, status);
214         req->status = DWC3_REQUEST_STATUS_COMPLETED;
215
216         spin_unlock(&dwc->lock);
217         usb_gadget_giveback_request(&dep->endpoint, &req->request);
218         spin_lock(&dwc->lock);
219 }
220
221 /**
222  * dwc3_send_gadget_generic_command - issue a generic command for the controller
223  * @dwc: pointer to the controller context
224  * @cmd: the command to be issued
225  * @param: command parameter
226  *
227  * Caller should take care of locking. Issue @cmd with a given @param to @dwc
228  * and wait for its completion.
229  */
230 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd,
231                 u32 param)
232 {
233         u32             timeout = 500;
234         int             status = 0;
235         int             ret = 0;
236         u32             reg;
237
238         dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
239         dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
240
241         do {
242                 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
243                 if (!(reg & DWC3_DGCMD_CMDACT)) {
244                         status = DWC3_DGCMD_STATUS(reg);
245                         if (status)
246                                 ret = -EINVAL;
247                         break;
248                 }
249         } while (--timeout);
250
251         if (!timeout) {
252                 ret = -ETIMEDOUT;
253                 status = -ETIMEDOUT;
254         }
255
256         trace_dwc3_gadget_generic_cmd(cmd, param, status);
257
258         return ret;
259 }
260
261 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
262
263 /**
264  * dwc3_send_gadget_ep_cmd - issue an endpoint command
265  * @dep: the endpoint to which the command is going to be issued
266  * @cmd: the command to be issued
267  * @params: parameters to the command
268  *
269  * Caller should handle locking. This function will issue @cmd with given
270  * @params to @dep and wait for its completion.
271  */
272 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
273                 struct dwc3_gadget_ep_cmd_params *params)
274 {
275         const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
276         struct dwc3             *dwc = dep->dwc;
277         u32                     timeout = 5000;
278         u32                     saved_config = 0;
279         u32                     reg;
280
281         int                     cmd_status = 0;
282         int                     ret = -EINVAL;
283
284         /*
285          * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
286          * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
287          * endpoint command.
288          *
289          * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
290          * settings. Restore them after the command is completed.
291          *
292          * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
293          */
294         if (dwc->gadget->speed <= USB_SPEED_HIGH) {
295                 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
296                 if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
297                         saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
298                         reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
299                 }
300
301                 if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
302                         saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
303                         reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
304                 }
305
306                 if (saved_config)
307                         dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
308         }
309
310         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
311                 int link_state;
312
313                 /*
314                  * Initiate remote wakeup if the link state is in U3 when
315                  * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
316                  * link state is in U1/U2, no remote wakeup is needed. The Start
317                  * Transfer command will initiate the link recovery.
318                  */
319                 link_state = dwc3_gadget_get_link_state(dwc);
320                 switch (link_state) {
321                 case DWC3_LINK_STATE_U2:
322                         if (dwc->gadget->speed >= USB_SPEED_SUPER)
323                                 break;
324
325                         fallthrough;
326                 case DWC3_LINK_STATE_U3:
327                         ret = __dwc3_gadget_wakeup(dwc);
328                         dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
329                                         ret);
330                         break;
331                 }
332         }
333
334         dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
335         dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
336         dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
337
338         /*
339          * Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
340          * not relying on XferNotReady, we can make use of a special "No
341          * Response Update Transfer" command where we should clear both CmdAct
342          * and CmdIOC bits.
343          *
344          * With this, we don't need to wait for command completion and can
345          * straight away issue further commands to the endpoint.
346          *
347          * NOTICE: We're making an assumption that control endpoints will never
348          * make use of Update Transfer command. This is a safe assumption
349          * because we can never have more than one request at a time with
350          * Control Endpoints. If anybody changes that assumption, this chunk
351          * needs to be updated accordingly.
352          */
353         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_UPDATETRANSFER &&
354                         !usb_endpoint_xfer_isoc(desc))
355                 cmd &= ~(DWC3_DEPCMD_CMDIOC | DWC3_DEPCMD_CMDACT);
356         else
357                 cmd |= DWC3_DEPCMD_CMDACT;
358
359         dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
360         do {
361                 reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
362                 if (!(reg & DWC3_DEPCMD_CMDACT)) {
363                         cmd_status = DWC3_DEPCMD_STATUS(reg);
364
365                         switch (cmd_status) {
366                         case 0:
367                                 ret = 0;
368                                 break;
369                         case DEPEVT_TRANSFER_NO_RESOURCE:
370                                 dev_WARN(dwc->dev, "No resource for %s\n",
371                                          dep->name);
372                                 ret = -EINVAL;
373                                 break;
374                         case DEPEVT_TRANSFER_BUS_EXPIRY:
375                                 /*
376                                  * SW issues START TRANSFER command to
377                                  * isochronous ep with future frame interval. If
378                                  * future interval time has already passed when
379                                  * core receives the command, it will respond
380                                  * with an error status of 'Bus Expiry'.
381                                  *
382                                  * Instead of always returning -EINVAL, let's
383                                  * give a hint to the gadget driver that this is
384                                  * the case by returning -EAGAIN.
385                                  */
386                                 ret = -EAGAIN;
387                                 break;
388                         default:
389                                 dev_WARN(dwc->dev, "UNKNOWN cmd status\n");
390                         }
391
392                         break;
393                 }
394         } while (--timeout);
395
396         if (timeout == 0) {
397                 ret = -ETIMEDOUT;
398                 cmd_status = -ETIMEDOUT;
399         }
400
401         trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
402
403         if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
404                 if (ret == 0)
405                         dep->flags |= DWC3_EP_TRANSFER_STARTED;
406
407                 if (ret != -ETIMEDOUT)
408                         dwc3_gadget_ep_get_transfer_index(dep);
409         }
410
411         if (saved_config) {
412                 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
413                 reg |= saved_config;
414                 dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
415         }
416
417         return ret;
418 }
419
420 static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
421 {
422         struct dwc3 *dwc = dep->dwc;
423         struct dwc3_gadget_ep_cmd_params params;
424         u32 cmd = DWC3_DEPCMD_CLEARSTALL;
425
426         /*
427          * As of core revision 2.60a the recommended programming model
428          * is to set the ClearPendIN bit when issuing a Clear Stall EP
429          * command for IN endpoints. This is to prevent an issue where
430          * some (non-compliant) hosts may not send ACK TPs for pending
431          * IN transfers due to a mishandled error condition. Synopsys
432          * STAR 9000614252.
433          */
434         if (dep->direction &&
435             !DWC3_VER_IS_PRIOR(DWC3, 260A) &&
436             (dwc->gadget->speed >= USB_SPEED_SUPER))
437                 cmd |= DWC3_DEPCMD_CLEARPENDIN;
438
439         memset(&params, 0, sizeof(params));
440
441         return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
442 }
443
444 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
445                 struct dwc3_trb *trb)
446 {
447         u32             offset = (char *) trb - (char *) dep->trb_pool;
448
449         return dep->trb_pool_dma + offset;
450 }
451
452 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
453 {
454         struct dwc3             *dwc = dep->dwc;
455
456         if (dep->trb_pool)
457                 return 0;
458
459         dep->trb_pool = dma_alloc_coherent(dwc->sysdev,
460                         sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
461                         &dep->trb_pool_dma, GFP_KERNEL);
462         if (!dep->trb_pool) {
463                 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
464                                 dep->name);
465                 return -ENOMEM;
466         }
467
468         return 0;
469 }
470
471 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
472 {
473         struct dwc3             *dwc = dep->dwc;
474
475         dma_free_coherent(dwc->sysdev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
476                         dep->trb_pool, dep->trb_pool_dma);
477
478         dep->trb_pool = NULL;
479         dep->trb_pool_dma = 0;
480 }
481
482 static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
483 {
484         struct dwc3_gadget_ep_cmd_params params;
485
486         memset(&params, 0x00, sizeof(params));
487
488         params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
489
490         return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
491                         &params);
492 }
493
494 /**
495  * dwc3_gadget_start_config - configure ep resources
496  * @dep: endpoint that is being enabled
497  *
498  * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
499  * completion, it will set Transfer Resource for all available endpoints.
500  *
501  * The assignment of transfer resources cannot perfectly follow the data book
502  * due to the fact that the controller driver does not have all knowledge of the
503  * configuration in advance. It is given this information piecemeal by the
504  * composite gadget framework after every SET_CONFIGURATION and
505  * SET_INTERFACE. Trying to follow the databook programming model in this
506  * scenario can cause errors. For two reasons:
507  *
508  * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
509  * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
510  * incorrect in the scenario of multiple interfaces.
511  *
512  * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
513  * endpoint on alt setting (8.1.6).
514  *
515  * The following simplified method is used instead:
516  *
517  * All hardware endpoints can be assigned a transfer resource and this setting
518  * will stay persistent until either a core reset or hibernation. So whenever we
519  * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
520  * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
521  * guaranteed that there are as many transfer resources as endpoints.
522  *
523  * This function is called for each endpoint when it is being enabled but is
524  * triggered only when called for EP0-out, which always happens first, and which
525  * should only happen in one of the above conditions.
526  */
527 static int dwc3_gadget_start_config(struct dwc3_ep *dep)
528 {
529         struct dwc3_gadget_ep_cmd_params params;
530         struct dwc3             *dwc;
531         u32                     cmd;
532         int                     i;
533         int                     ret;
534
535         if (dep->number)
536                 return 0;
537
538         memset(&params, 0x00, sizeof(params));
539         cmd = DWC3_DEPCMD_DEPSTARTCFG;
540         dwc = dep->dwc;
541
542         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
543         if (ret)
544                 return ret;
545
546         for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
547                 struct dwc3_ep *dep = dwc->eps[i];
548
549                 if (!dep)
550                         continue;
551
552                 ret = dwc3_gadget_set_xfer_resource(dep);
553                 if (ret)
554                         return ret;
555         }
556
557         return 0;
558 }
559
560 static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
561 {
562         const struct usb_ss_ep_comp_descriptor *comp_desc;
563         const struct usb_endpoint_descriptor *desc;
564         struct dwc3_gadget_ep_cmd_params params;
565         struct dwc3 *dwc = dep->dwc;
566
567         comp_desc = dep->endpoint.comp_desc;
568         desc = dep->endpoint.desc;
569
570         memset(&params, 0x00, sizeof(params));
571
572         params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
573                 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
574
575         /* Burst size is only needed in SuperSpeed mode */
576         if (dwc->gadget->speed >= USB_SPEED_SUPER) {
577                 u32 burst = dep->endpoint.maxburst;
578
579                 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
580         }
581
582         params.param0 |= action;
583         if (action == DWC3_DEPCFG_ACTION_RESTORE)
584                 params.param2 |= dep->saved_state;
585
586         if (usb_endpoint_xfer_control(desc))
587                 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
588
589         if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
590                 params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
591
592         if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
593                 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
594                         | DWC3_DEPCFG_XFER_COMPLETE_EN
595                         | DWC3_DEPCFG_STREAM_EVENT_EN;
596                 dep->stream_capable = true;
597         }
598
599         if (!usb_endpoint_xfer_control(desc))
600                 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
601
602         /*
603          * We are doing 1:1 mapping for endpoints, meaning
604          * Physical Endpoints 2 maps to Logical Endpoint 2 and
605          * so on. We consider the direction bit as part of the physical
606          * endpoint number. So USB endpoint 0x81 is 0x03.
607          */
608         params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
609
610         /*
611          * We must use the lower 16 TX FIFOs even though
612          * HW might have more
613          */
614         if (dep->direction)
615                 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
616
617         if (desc->bInterval) {
618                 u8 bInterval_m1;
619
620                 /*
621                  * Valid range for DEPCFG.bInterval_m1 is from 0 to 13.
622                  *
623                  * NOTE: The programming guide incorrectly stated bInterval_m1
624                  * must be set to 0 when operating in fullspeed. Internally the
625                  * controller does not have this limitation. See DWC_usb3x
626                  * programming guide section 3.2.2.1.
627                  */
628                 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
629
630                 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
631                     dwc->gadget->speed == USB_SPEED_FULL)
632                         dep->interval = desc->bInterval;
633                 else
634                         dep->interval = 1 << (desc->bInterval - 1);
635
636                 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
637         }
638
639         return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
640 }
641
642 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
643                 bool interrupt);
644
645 /**
646  * dwc3_gadget_calc_tx_fifo_size - calculates the txfifo size value
647  * @dwc: pointer to the DWC3 context
648  * @nfifos: number of fifos to calculate for
649  *
650  * Calculates the size value based on the equation below:
651  *
652  * DWC3 revision 280A and prior:
653  * fifo_size = mult * (max_packet / mdwidth) + 1;
654  *
655  * DWC3 revision 290A and onwards:
656  * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
657  *
658  * The max packet size is set to 1024, as the txfifo requirements mainly apply
659  * to super speed USB use cases.  However, it is safe to overestimate the fifo
660  * allocations for other scenarios, i.e. high speed USB.
661  */
662 static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
663 {
664         int max_packet = 1024;
665         int fifo_size;
666         int mdwidth;
667
668         mdwidth = dwc3_mdwidth(dwc);
669
670         /* MDWIDTH is represented in bits, we need it in bytes */
671         mdwidth >>= 3;
672
673         if (DWC3_VER_IS_PRIOR(DWC3, 290A))
674                 fifo_size = mult * (max_packet / mdwidth) + 1;
675         else
676                 fifo_size = mult * ((max_packet + mdwidth) / mdwidth) + 1;
677         return fifo_size;
678 }
679
680 /**
681  * dwc3_gadget_clear_tx_fifo_size - Clears txfifo allocation
682  * @dwc: pointer to the DWC3 context
683  *
684  * Iterates through all the endpoint registers and clears the previous txfifo
685  * allocations.
686  */
687 void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
688 {
689         struct dwc3_ep *dep;
690         int fifo_depth;
691         int size;
692         int num;
693
694         if (!dwc->do_fifo_resize)
695                 return;
696
697         /* Read ep0IN related TXFIFO size */
698         dep = dwc->eps[1];
699         size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
700         if (DWC3_IP_IS(DWC3))
701                 fifo_depth = DWC3_GTXFIFOSIZ_TXFDEP(size);
702         else
703                 fifo_depth = DWC31_GTXFIFOSIZ_TXFDEP(size);
704
705         dwc->last_fifo_depth = fifo_depth;
706         /* Clear existing TXFIFO for all IN eps except ep0 */
707         for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM);
708              num += 2) {
709                 dep = dwc->eps[num];
710                 /* Don't change TXFRAMNUM on usb31 version */
711                 size = DWC3_IP_IS(DWC3) ? 0 :
712                         dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) &
713                                    DWC31_GTXFIFOSIZ_TXFRAMNUM;
714
715                 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size);
716                 dep->flags &= ~DWC3_EP_TXFIFO_RESIZED;
717         }
718         dwc->num_ep_resized = 0;
719 }
720
721 /*
722  * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
723  * @dwc: pointer to our context structure
724  *
725  * This function will a best effort FIFO allocation in order
726  * to improve FIFO usage and throughput, while still allowing
727  * us to enable as many endpoints as possible.
728  *
729  * Keep in mind that this operation will be highly dependent
730  * on the configured size for RAM1 - which contains TxFifo -,
731  * the amount of endpoints enabled on coreConsultant tool, and
732  * the width of the Master Bus.
733  *
734  * In general, FIFO depths are represented with the following equation:
735  *
736  * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1
737  *
738  * In conjunction with dwc3_gadget_check_config(), this resizing logic will
739  * ensure that all endpoints will have enough internal memory for one max
740  * packet per endpoint.
741  */
742 static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
743 {
744         struct dwc3 *dwc = dep->dwc;
745         int fifo_0_start;
746         int ram1_depth;
747         int fifo_size;
748         int min_depth;
749         int num_in_ep;
750         int remaining;
751         int num_fifos = 1;
752         int fifo;
753         int tmp;
754
755         if (!dwc->do_fifo_resize)
756                 return 0;
757
758         /* resize IN endpoints except ep0 */
759         if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1)
760                 return 0;
761
762         /* bail if already resized */
763         if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
764                 return 0;
765
766         ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
767
768         if ((dep->endpoint.maxburst > 1 &&
769              usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
770             usb_endpoint_xfer_isoc(dep->endpoint.desc))
771                 num_fifos = 3;
772
773         if (dep->endpoint.maxburst > 6 &&
774             usb_endpoint_xfer_bulk(dep->endpoint.desc) && DWC3_IP_IS(DWC31))
775                 num_fifos = dwc->tx_fifo_resize_max_num;
776
777         /* FIFO size for a single buffer */
778         fifo = dwc3_gadget_calc_tx_fifo_size(dwc, 1);
779
780         /* Calculate the number of remaining EPs w/o any FIFO */
781         num_in_ep = dwc->max_cfg_eps;
782         num_in_ep -= dwc->num_ep_resized;
783
784         /* Reserve at least one FIFO for the number of IN EPs */
785         min_depth = num_in_ep * (fifo + 1);
786         remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
787         remaining = max_t(int, 0, remaining);
788         /*
789          * We've already reserved 1 FIFO per EP, so check what we can fit in
790          * addition to it.  If there is not enough remaining space, allocate
791          * all the remaining space to the EP.
792          */
793         fifo_size = (num_fifos - 1) * fifo;
794         if (remaining < fifo_size)
795                 fifo_size = remaining;
796
797         fifo_size += fifo;
798         /* Last increment according to the TX FIFO size equation */
799         fifo_size++;
800
801         /* Check if TXFIFOs start at non-zero addr */
802         tmp = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
803         fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(tmp);
804
805         fifo_size |= (fifo_0_start + (dwc->last_fifo_depth << 16));
806         if (DWC3_IP_IS(DWC3))
807                 dwc->last_fifo_depth += DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
808         else
809                 dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
810
811         /* Check fifo size allocation doesn't exceed available RAM size. */
812         if (dwc->last_fifo_depth >= ram1_depth) {
813                 dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
814                         dwc->last_fifo_depth, ram1_depth,
815                         dep->endpoint.name, fifo_size);
816                 if (DWC3_IP_IS(DWC3))
817                         fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
818                 else
819                         fifo_size = DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
820
821                 dwc->last_fifo_depth -= fifo_size;
822                 return -ENOMEM;
823         }
824
825         dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size);
826         dep->flags |= DWC3_EP_TXFIFO_RESIZED;
827         dwc->num_ep_resized++;
828
829         return 0;
830 }
831
832 /**
833  * __dwc3_gadget_ep_enable - initializes a hw endpoint
834  * @dep: endpoint to be initialized
835  * @action: one of INIT, MODIFY or RESTORE
836  *
837  * Caller should take care of locking. Execute all necessary commands to
838  * initialize a HW endpoint so it can be used by a gadget driver.
839  */
840 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
841 {
842         const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
843         struct dwc3             *dwc = dep->dwc;
844
845         u32                     reg;
846         int                     ret;
847
848         if (!(dep->flags & DWC3_EP_ENABLED)) {
849                 ret = dwc3_gadget_resize_tx_fifos(dep);
850                 if (ret)
851                         return ret;
852
853                 ret = dwc3_gadget_start_config(dep);
854                 if (ret)
855                         return ret;
856         }
857
858         ret = dwc3_gadget_set_ep_config(dep, action);
859         if (ret)
860                 return ret;
861
862         if (!(dep->flags & DWC3_EP_ENABLED)) {
863                 struct dwc3_trb *trb_st_hw;
864                 struct dwc3_trb *trb_link;
865
866                 dep->type = usb_endpoint_type(desc);
867                 dep->flags |= DWC3_EP_ENABLED;
868
869                 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
870                 reg |= DWC3_DALEPENA_EP(dep->number);
871                 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
872
873                 if (usb_endpoint_xfer_control(desc))
874                         goto out;
875
876                 /* Initialize the TRB ring */
877                 dep->trb_dequeue = 0;
878                 dep->trb_enqueue = 0;
879                 memset(dep->trb_pool, 0,
880                        sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
881
882                 /* Link TRB. The HWO bit is never reset */
883                 trb_st_hw = &dep->trb_pool[0];
884
885                 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
886                 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
887                 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
888                 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
889                 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
890         }
891
892         /*
893          * Issue StartTransfer here with no-op TRB so we can always rely on No
894          * Response Update Transfer command.
895          */
896         if (usb_endpoint_xfer_bulk(desc) ||
897                         usb_endpoint_xfer_int(desc)) {
898                 struct dwc3_gadget_ep_cmd_params params;
899                 struct dwc3_trb *trb;
900                 dma_addr_t trb_dma;
901                 u32 cmd;
902
903                 memset(&params, 0, sizeof(params));
904                 trb = &dep->trb_pool[0];
905                 trb_dma = dwc3_trb_dma_offset(dep, trb);
906
907                 params.param0 = upper_32_bits(trb_dma);
908                 params.param1 = lower_32_bits(trb_dma);
909
910                 cmd = DWC3_DEPCMD_STARTTRANSFER;
911
912                 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
913                 if (ret < 0)
914                         return ret;
915
916                 if (dep->stream_capable) {
917                         /*
918                          * For streams, at start, there maybe a race where the
919                          * host primes the endpoint before the function driver
920                          * queues a request to initiate a stream. In that case,
921                          * the controller will not see the prime to generate the
922                          * ERDY and start stream. To workaround this, issue a
923                          * no-op TRB as normal, but end it immediately. As a
924                          * result, when the function driver queues the request,
925                          * the next START_TRANSFER command will cause the
926                          * controller to generate an ERDY to initiate the
927                          * stream.
928                          */
929                         dwc3_stop_active_transfer(dep, true, true);
930
931                         /*
932                          * All stream eps will reinitiate stream on NoStream
933                          * rejection until we can determine that the host can
934                          * prime after the first transfer.
935                          *
936                          * However, if the controller is capable of
937                          * TXF_FLUSH_BYPASS, then IN direction endpoints will
938                          * automatically restart the stream without the driver
939                          * initiation.
940                          */
941                         if (!dep->direction ||
942                             !(dwc->hwparams.hwparams9 &
943                               DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS))
944                                 dep->flags |= DWC3_EP_FORCE_RESTART_STREAM;
945                 }
946         }
947
948 out:
949         trace_dwc3_gadget_ep_enable(dep);
950
951         return 0;
952 }
953
954 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
955 {
956         struct dwc3_request             *req;
957
958         dwc3_stop_active_transfer(dep, true, false);
959
960         /* - giveback all requests to gadget driver */
961         while (!list_empty(&dep->started_list)) {
962                 req = next_request(&dep->started_list);
963
964                 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
965         }
966
967         while (!list_empty(&dep->pending_list)) {
968                 req = next_request(&dep->pending_list);
969
970                 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
971         }
972
973         while (!list_empty(&dep->cancelled_list)) {
974                 req = next_request(&dep->cancelled_list);
975
976                 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
977         }
978 }
979
980 /**
981  * __dwc3_gadget_ep_disable - disables a hw endpoint
982  * @dep: the endpoint to disable
983  *
984  * This function undoes what __dwc3_gadget_ep_enable did and also removes
985  * requests which are currently being processed by the hardware and those which
986  * are not yet scheduled.
987  *
988  * Caller should take care of locking.
989  */
990 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
991 {
992         struct dwc3             *dwc = dep->dwc;
993         u32                     reg;
994
995         trace_dwc3_gadget_ep_disable(dep);
996
997         /* make sure HW endpoint isn't stalled */
998         if (dep->flags & DWC3_EP_STALL)
999                 __dwc3_gadget_ep_set_halt(dep, 0, false);
1000
1001         reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1002         reg &= ~DWC3_DALEPENA_EP(dep->number);
1003         dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1004
1005         /* Clear out the ep descriptors for non-ep0 */
1006         if (dep->number > 1) {
1007                 dep->endpoint.comp_desc = NULL;
1008                 dep->endpoint.desc = NULL;
1009         }
1010
1011         dwc3_remove_requests(dwc, dep);
1012
1013         dep->stream_capable = false;
1014         dep->type = 0;
1015         dep->flags &= DWC3_EP_TXFIFO_RESIZED;
1016
1017         return 0;
1018 }
1019
1020 /* -------------------------------------------------------------------------- */
1021
1022 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
1023                 const struct usb_endpoint_descriptor *desc)
1024 {
1025         return -EINVAL;
1026 }
1027
1028 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
1029 {
1030         return -EINVAL;
1031 }
1032
1033 /* -------------------------------------------------------------------------- */
1034
1035 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
1036                 const struct usb_endpoint_descriptor *desc)
1037 {
1038         struct dwc3_ep                  *dep;
1039         struct dwc3                     *dwc;
1040         unsigned long                   flags;
1041         int                             ret;
1042
1043         if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
1044                 pr_debug("dwc3: invalid parameters\n");
1045                 return -EINVAL;
1046         }
1047
1048         if (!desc->wMaxPacketSize) {
1049                 pr_debug("dwc3: missing wMaxPacketSize\n");
1050                 return -EINVAL;
1051         }
1052
1053         dep = to_dwc3_ep(ep);
1054         dwc = dep->dwc;
1055
1056         if (dev_WARN_ONCE(dwc->dev, dep->flags & DWC3_EP_ENABLED,
1057                                         "%s is already enabled\n",
1058                                         dep->name))
1059                 return 0;
1060
1061         spin_lock_irqsave(&dwc->lock, flags);
1062         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
1063         spin_unlock_irqrestore(&dwc->lock, flags);
1064
1065         return ret;
1066 }
1067
1068 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
1069 {
1070         struct dwc3_ep                  *dep;
1071         struct dwc3                     *dwc;
1072         unsigned long                   flags;
1073         int                             ret;
1074
1075         if (!ep) {
1076                 pr_debug("dwc3: invalid parameters\n");
1077                 return -EINVAL;
1078         }
1079
1080         dep = to_dwc3_ep(ep);
1081         dwc = dep->dwc;
1082
1083         if (dev_WARN_ONCE(dwc->dev, !(dep->flags & DWC3_EP_ENABLED),
1084                                         "%s is already disabled\n",
1085                                         dep->name))
1086                 return 0;
1087
1088         spin_lock_irqsave(&dwc->lock, flags);
1089         ret = __dwc3_gadget_ep_disable(dep);
1090         spin_unlock_irqrestore(&dwc->lock, flags);
1091
1092         return ret;
1093 }
1094
1095 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
1096                 gfp_t gfp_flags)
1097 {
1098         struct dwc3_request             *req;
1099         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1100
1101         req = kzalloc(sizeof(*req), gfp_flags);
1102         if (!req)
1103                 return NULL;
1104
1105         req->direction  = dep->direction;
1106         req->epnum      = dep->number;
1107         req->dep        = dep;
1108         req->status     = DWC3_REQUEST_STATUS_UNKNOWN;
1109
1110         trace_dwc3_alloc_request(req);
1111
1112         return &req->request;
1113 }
1114
1115 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
1116                 struct usb_request *request)
1117 {
1118         struct dwc3_request             *req = to_dwc3_request(request);
1119
1120         trace_dwc3_free_request(req);
1121         kfree(req);
1122 }
1123
1124 /**
1125  * dwc3_ep_prev_trb - returns the previous TRB in the ring
1126  * @dep: The endpoint with the TRB ring
1127  * @index: The index of the current TRB in the ring
1128  *
1129  * Returns the TRB prior to the one pointed to by the index. If the
1130  * index is 0, we will wrap backwards, skip the link TRB, and return
1131  * the one just before that.
1132  */
1133 static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
1134 {
1135         u8 tmp = index;
1136
1137         if (!tmp)
1138                 tmp = DWC3_TRB_NUM - 1;
1139
1140         return &dep->trb_pool[tmp - 1];
1141 }
1142
1143 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
1144 {
1145         u8                      trbs_left;
1146
1147         /*
1148          * If the enqueue & dequeue are equal then the TRB ring is either full
1149          * or empty. It's considered full when there are DWC3_TRB_NUM-1 of TRBs
1150          * pending to be processed by the driver.
1151          */
1152         if (dep->trb_enqueue == dep->trb_dequeue) {
1153                 /*
1154                  * If there is any request remained in the started_list at
1155                  * this point, that means there is no TRB available.
1156                  */
1157                 if (!list_empty(&dep->started_list))
1158                         return 0;
1159
1160                 return DWC3_TRB_NUM - 1;
1161         }
1162
1163         trbs_left = dep->trb_dequeue - dep->trb_enqueue;
1164         trbs_left &= (DWC3_TRB_NUM - 1);
1165
1166         if (dep->trb_dequeue < dep->trb_enqueue)
1167                 trbs_left--;
1168
1169         return trbs_left;
1170 }
1171
1172 static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
1173                 dma_addr_t dma, unsigned int length, unsigned int chain,
1174                 unsigned int node, unsigned int stream_id,
1175                 unsigned int short_not_ok, unsigned int no_interrupt,
1176                 unsigned int is_last, bool must_interrupt)
1177 {
1178         struct dwc3             *dwc = dep->dwc;
1179         struct usb_gadget       *gadget = dwc->gadget;
1180         enum usb_device_speed   speed = gadget->speed;
1181
1182         trb->size = DWC3_TRB_SIZE_LENGTH(length);
1183         trb->bpl = lower_32_bits(dma);
1184         trb->bph = upper_32_bits(dma);
1185
1186         switch (usb_endpoint_type(dep->endpoint.desc)) {
1187         case USB_ENDPOINT_XFER_CONTROL:
1188                 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
1189                 break;
1190
1191         case USB_ENDPOINT_XFER_ISOC:
1192                 if (!node) {
1193                         trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
1194
1195                         /*
1196                          * USB Specification 2.0 Section 5.9.2 states that: "If
1197                          * there is only a single transaction in the microframe,
1198                          * only a DATA0 data packet PID is used.  If there are
1199                          * two transactions per microframe, DATA1 is used for
1200                          * the first transaction data packet and DATA0 is used
1201                          * for the second transaction data packet.  If there are
1202                          * three transactions per microframe, DATA2 is used for
1203                          * the first transaction data packet, DATA1 is used for
1204                          * the second, and DATA0 is used for the third."
1205                          *
1206                          * IOW, we should satisfy the following cases:
1207                          *
1208                          * 1) length <= maxpacket
1209                          *      - DATA0
1210                          *
1211                          * 2) maxpacket < length <= (2 * maxpacket)
1212                          *      - DATA1, DATA0
1213                          *
1214                          * 3) (2 * maxpacket) < length <= (3 * maxpacket)
1215                          *      - DATA2, DATA1, DATA0
1216                          */
1217                         if (speed == USB_SPEED_HIGH) {
1218                                 struct usb_ep *ep = &dep->endpoint;
1219                                 unsigned int mult = 2;
1220                                 unsigned int maxp = usb_endpoint_maxp(ep->desc);
1221
1222                                 if (length <= (2 * maxp))
1223                                         mult--;
1224
1225                                 if (length <= maxp)
1226                                         mult--;
1227
1228                                 trb->size |= DWC3_TRB_SIZE_PCM1(mult);
1229                         }
1230                 } else {
1231                         trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
1232                 }
1233
1234                 /* always enable Interrupt on Missed ISOC */
1235                 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1236                 break;
1237
1238         case USB_ENDPOINT_XFER_BULK:
1239         case USB_ENDPOINT_XFER_INT:
1240                 trb->ctrl = DWC3_TRBCTL_NORMAL;
1241                 break;
1242         default:
1243                 /*
1244                  * This is only possible with faulty memory because we
1245                  * checked it already :)
1246                  */
1247                 dev_WARN(dwc->dev, "Unknown endpoint type %d\n",
1248                                 usb_endpoint_type(dep->endpoint.desc));
1249         }
1250
1251         /*
1252          * Enable Continue on Short Packet
1253          * when endpoint is not a stream capable
1254          */
1255         if (usb_endpoint_dir_out(dep->endpoint.desc)) {
1256                 if (!dep->stream_capable)
1257                         trb->ctrl |= DWC3_TRB_CTRL_CSP;
1258
1259                 if (short_not_ok)
1260                         trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
1261         }
1262
1263         if ((!no_interrupt && !chain) || must_interrupt)
1264                 trb->ctrl |= DWC3_TRB_CTRL_IOC;
1265
1266         if (chain)
1267                 trb->ctrl |= DWC3_TRB_CTRL_CHN;
1268         else if (dep->stream_capable && is_last)
1269                 trb->ctrl |= DWC3_TRB_CTRL_LST;
1270
1271         if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
1272                 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
1273
1274         trb->ctrl |= DWC3_TRB_CTRL_HWO;
1275
1276         dwc3_ep_inc_enq(dep);
1277
1278         trace_dwc3_prepare_trb(dep, trb);
1279 }
1280
1281 /**
1282  * dwc3_prepare_one_trb - setup one TRB from one request
1283  * @dep: endpoint for which this request is prepared
1284  * @req: dwc3_request pointer
1285  * @trb_length: buffer size of the TRB
1286  * @chain: should this TRB be chained to the next?
1287  * @node: only for isochronous endpoints. First TRB needs different type.
1288  * @use_bounce_buffer: set to use bounce buffer
1289  * @must_interrupt: set to interrupt on TRB completion
1290  */
1291 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
1292                 struct dwc3_request *req, unsigned int trb_length,
1293                 unsigned int chain, unsigned int node, bool use_bounce_buffer,
1294                 bool must_interrupt)
1295 {
1296         struct dwc3_trb         *trb;
1297         dma_addr_t              dma;
1298         unsigned int            stream_id = req->request.stream_id;
1299         unsigned int            short_not_ok = req->request.short_not_ok;
1300         unsigned int            no_interrupt = req->request.no_interrupt;
1301         unsigned int            is_last = req->request.is_last;
1302
1303         if (use_bounce_buffer)
1304                 dma = dep->dwc->bounce_addr;
1305         else if (req->request.num_sgs > 0)
1306                 dma = sg_dma_address(req->start_sg);
1307         else
1308                 dma = req->request.dma;
1309
1310         trb = &dep->trb_pool[dep->trb_enqueue];
1311
1312         if (!req->trb) {
1313                 dwc3_gadget_move_started_request(req);
1314                 req->trb = trb;
1315                 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
1316         }
1317
1318         req->num_trbs++;
1319
1320         __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
1321                         stream_id, short_not_ok, no_interrupt, is_last,
1322                         must_interrupt);
1323 }
1324
1325 static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req)
1326 {
1327         unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1328         unsigned int rem = req->request.length % maxp;
1329
1330         if ((req->request.length && req->request.zero && !rem &&
1331                         !usb_endpoint_xfer_isoc(dep->endpoint.desc)) ||
1332                         (!req->direction && rem))
1333                 return true;
1334
1335         return false;
1336 }
1337
1338 /**
1339  * dwc3_prepare_last_sg - prepare TRBs for the last SG entry
1340  * @dep: The endpoint that the request belongs to
1341  * @req: The request to prepare
1342  * @entry_length: The last SG entry size
1343  * @node: Indicates whether this is not the first entry (for isoc only)
1344  *
1345  * Return the number of TRBs prepared.
1346  */
1347 static int dwc3_prepare_last_sg(struct dwc3_ep *dep,
1348                 struct dwc3_request *req, unsigned int entry_length,
1349                 unsigned int node)
1350 {
1351         unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
1352         unsigned int rem = req->request.length % maxp;
1353         unsigned int num_trbs = 1;
1354
1355         if (dwc3_needs_extra_trb(dep, req))
1356                 num_trbs++;
1357
1358         if (dwc3_calc_trbs_left(dep) < num_trbs)
1359                 return 0;
1360
1361         req->needs_extra_trb = num_trbs > 1;
1362
1363         /* Prepare a normal TRB */
1364         if (req->direction || req->request.length)
1365                 dwc3_prepare_one_trb(dep, req, entry_length,
1366                                 req->needs_extra_trb, node, false, false);
1367
1368         /* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */
1369         if ((!req->direction && !req->request.length) || req->needs_extra_trb)
1370                 dwc3_prepare_one_trb(dep, req,
1371                                 req->direction ? 0 : maxp - rem,
1372                                 false, 1, true, false);
1373
1374         return num_trbs;
1375 }
1376
1377 static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep,
1378                 struct dwc3_request *req)
1379 {
1380         struct scatterlist *sg = req->start_sg;
1381         struct scatterlist *s;
1382         int             i;
1383         unsigned int length = req->request.length;
1384         unsigned int remaining = req->request.num_mapped_sgs
1385                 - req->num_queued_sgs;
1386         unsigned int num_trbs = req->num_trbs;
1387         bool needs_extra_trb = dwc3_needs_extra_trb(dep, req);
1388
1389         /*
1390          * If we resume preparing the request, then get the remaining length of
1391          * the request and resume where we left off.
1392          */
1393         for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
1394                 length -= sg_dma_len(s);
1395
1396         for_each_sg(sg, s, remaining, i) {
1397                 unsigned int num_trbs_left = dwc3_calc_trbs_left(dep);
1398                 unsigned int trb_length;
1399                 bool must_interrupt = false;
1400                 bool last_sg = false;
1401
1402                 trb_length = min_t(unsigned int, length, sg_dma_len(s));
1403
1404                 length -= trb_length;
1405
1406                 /*
1407                  * IOMMU driver is coalescing the list of sgs which shares a
1408                  * page boundary into one and giving it to USB driver. With
1409                  * this the number of sgs mapped is not equal to the number of
1410                  * sgs passed. So mark the chain bit to false if it isthe last
1411                  * mapped sg.
1412                  */
1413                 if ((i == remaining - 1) || !length)
1414                         last_sg = true;
1415
1416                 if (!num_trbs_left)
1417                         break;
1418
1419                 if (last_sg) {
1420                         if (!dwc3_prepare_last_sg(dep, req, trb_length, i))
1421                                 break;
1422                 } else {
1423                         /*
1424                          * Look ahead to check if we have enough TRBs for the
1425                          * next SG entry. If not, set interrupt on this TRB to
1426                          * resume preparing the next SG entry when more TRBs are
1427                          * free.
1428                          */
1429                         if (num_trbs_left == 1 || (needs_extra_trb &&
1430                                         num_trbs_left <= 2 &&
1431                                         sg_dma_len(sg_next(s)) >= length))
1432                                 must_interrupt = true;
1433
1434                         dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false,
1435                                         must_interrupt);
1436                 }
1437
1438                 /*
1439                  * There can be a situation where all sgs in sglist are not
1440                  * queued because of insufficient trb number. To handle this
1441                  * case, update start_sg to next sg to be queued, so that
1442                  * we have free trbs we can continue queuing from where we
1443                  * previously stopped
1444                  */
1445                 if (!last_sg)
1446                         req->start_sg = sg_next(s);
1447
1448                 req->num_queued_sgs++;
1449                 req->num_pending_sgs--;
1450
1451                 /*
1452                  * The number of pending SG entries may not correspond to the
1453                  * number of mapped SG entries. If all the data are queued, then
1454                  * don't include unused SG entries.
1455                  */
1456                 if (length == 0) {
1457                         req->num_pending_sgs = 0;
1458                         break;
1459                 }
1460
1461                 if (must_interrupt)
1462                         break;
1463         }
1464
1465         return req->num_trbs - num_trbs;
1466 }
1467
1468 static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep,
1469                 struct dwc3_request *req)
1470 {
1471         return dwc3_prepare_last_sg(dep, req, req->request.length, 0);
1472 }
1473
1474 /*
1475  * dwc3_prepare_trbs - setup TRBs from requests
1476  * @dep: endpoint for which requests are being prepared
1477  *
1478  * The function goes through the requests list and sets up TRBs for the
1479  * transfers. The function returns once there are no more TRBs available or
1480  * it runs out of requests.
1481  *
1482  * Returns the number of TRBs prepared or negative errno.
1483  */
1484 static int dwc3_prepare_trbs(struct dwc3_ep *dep)
1485 {
1486         struct dwc3_request     *req, *n;
1487         int                     ret = 0;
1488
1489         BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
1490
1491         /*
1492          * We can get in a situation where there's a request in the started list
1493          * but there weren't enough TRBs to fully kick it in the first time
1494          * around, so it has been waiting for more TRBs to be freed up.
1495          *
1496          * In that case, we should check if we have a request with pending_sgs
1497          * in the started list and prepare TRBs for that request first,
1498          * otherwise we will prepare TRBs completely out of order and that will
1499          * break things.
1500          */
1501         list_for_each_entry(req, &dep->started_list, list) {
1502                 if (req->num_pending_sgs > 0) {
1503                         ret = dwc3_prepare_trbs_sg(dep, req);
1504                         if (!ret || req->num_pending_sgs)
1505                                 return ret;
1506                 }
1507
1508                 if (!dwc3_calc_trbs_left(dep))
1509                         return ret;
1510
1511                 /*
1512                  * Don't prepare beyond a transfer. In DWC_usb32, its transfer
1513                  * burst capability may try to read and use TRBs beyond the
1514                  * active transfer instead of stopping.
1515                  */
1516                 if (dep->stream_capable && req->request.is_last)
1517                         return ret;
1518         }
1519
1520         list_for_each_entry_safe(req, n, &dep->pending_list, list) {
1521                 struct dwc3     *dwc = dep->dwc;
1522
1523                 ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request,
1524                                                     dep->direction);
1525                 if (ret)
1526                         return ret;
1527
1528                 req->sg                 = req->request.sg;
1529                 req->start_sg           = req->sg;
1530                 req->num_queued_sgs     = 0;
1531                 req->num_pending_sgs    = req->request.num_mapped_sgs;
1532
1533                 if (req->num_pending_sgs > 0) {
1534                         ret = dwc3_prepare_trbs_sg(dep, req);
1535                         if (req->num_pending_sgs)
1536                                 return ret;
1537                 } else {
1538                         ret = dwc3_prepare_trbs_linear(dep, req);
1539                 }
1540
1541                 if (!ret || !dwc3_calc_trbs_left(dep))
1542                         return ret;
1543
1544                 /*
1545                  * Don't prepare beyond a transfer. In DWC_usb32, its transfer
1546                  * burst capability may try to read and use TRBs beyond the
1547                  * active transfer instead of stopping.
1548                  */
1549                 if (dep->stream_capable && req->request.is_last)
1550                         return ret;
1551         }
1552
1553         return ret;
1554 }
1555
1556 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep);
1557
1558 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
1559 {
1560         struct dwc3_gadget_ep_cmd_params params;
1561         struct dwc3_request             *req;
1562         int                             starting;
1563         int                             ret;
1564         u32                             cmd;
1565
1566         /*
1567          * Note that it's normal to have no new TRBs prepared (i.e. ret == 0).
1568          * This happens when we need to stop and restart a transfer such as in
1569          * the case of reinitiating a stream or retrying an isoc transfer.
1570          */
1571         ret = dwc3_prepare_trbs(dep);
1572         if (ret < 0)
1573                 return ret;
1574
1575         starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED);
1576
1577         /*
1578          * If there's no new TRB prepared and we don't need to restart a
1579          * transfer, there's no need to update the transfer.
1580          */
1581         if (!ret && !starting)
1582                 return ret;
1583
1584         req = next_request(&dep->started_list);
1585         if (!req) {
1586                 dep->flags |= DWC3_EP_PENDING_REQUEST;
1587                 return 0;
1588         }
1589
1590         memset(&params, 0, sizeof(params));
1591
1592         if (starting) {
1593                 params.param0 = upper_32_bits(req->trb_dma);
1594                 params.param1 = lower_32_bits(req->trb_dma);
1595                 cmd = DWC3_DEPCMD_STARTTRANSFER;
1596
1597                 if (dep->stream_capable)
1598                         cmd |= DWC3_DEPCMD_PARAM(req->request.stream_id);
1599
1600                 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
1601                         cmd |= DWC3_DEPCMD_PARAM(dep->frame_number);
1602         } else {
1603                 cmd = DWC3_DEPCMD_UPDATETRANSFER |
1604                         DWC3_DEPCMD_PARAM(dep->resource_index);
1605         }
1606
1607         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1608         if (ret < 0) {
1609                 struct dwc3_request *tmp;
1610
1611                 if (ret == -EAGAIN)
1612                         return ret;
1613
1614                 dwc3_stop_active_transfer(dep, true, true);
1615
1616                 list_for_each_entry_safe(req, tmp, &dep->started_list, list)
1617                         dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_DEQUEUED);
1618
1619                 /* If ep isn't started, then there's no end transfer pending */
1620                 if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING))
1621                         dwc3_gadget_ep_cleanup_cancelled_requests(dep);
1622
1623                 return ret;
1624         }
1625
1626         if (dep->stream_capable && req->request.is_last)
1627                 dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
1628
1629         return 0;
1630 }
1631
1632 static int __dwc3_gadget_get_frame(struct dwc3 *dwc)
1633 {
1634         u32                     reg;
1635
1636         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1637         return DWC3_DSTS_SOFFN(reg);
1638 }
1639
1640 /**
1641  * dwc3_gadget_start_isoc_quirk - workaround invalid frame number
1642  * @dep: isoc endpoint
1643  *
1644  * This function tests for the correct combination of BIT[15:14] from the 16-bit
1645  * microframe number reported by the XferNotReady event for the future frame
1646  * number to start the isoc transfer.
1647  *
1648  * In DWC_usb31 version 1.70a-ea06 and prior, for highspeed and fullspeed
1649  * isochronous IN, BIT[15:14] of the 16-bit microframe number reported by the
1650  * XferNotReady event are invalid. The driver uses this number to schedule the
1651  * isochronous transfer and passes it to the START TRANSFER command. Because
1652  * this number is invalid, the command may fail. If BIT[15:14] matches the
1653  * internal 16-bit microframe, the START TRANSFER command will pass and the
1654  * transfer will start at the scheduled time, if it is off by 1, the command
1655  * will still pass, but the transfer will start 2 seconds in the future. For all
1656  * other conditions, the START TRANSFER command will fail with bus-expiry.
1657  *
1658  * In order to workaround this issue, we can test for the correct combination of
1659  * BIT[15:14] by sending START TRANSFER commands with different values of
1660  * BIT[15:14]: 'b00, 'b01, 'b10, and 'b11. Each combination is 2^14 uframe apart
1661  * (or 2 seconds). 4 seconds into the future will result in a bus-expiry status.
1662  * As the result, within the 4 possible combinations for BIT[15:14], there will
1663  * be 2 successful and 2 failure START COMMAND status. One of the 2 successful
1664  * command status will result in a 2-second delay start. The smaller BIT[15:14]
1665  * value is the correct combination.
1666  *
1667  * Since there are only 4 outcomes and the results are ordered, we can simply
1668  * test 2 START TRANSFER commands with BIT[15:14] combinations 'b00 and 'b01 to
1669  * deduce the smaller successful combination.
1670  *
1671  * Let test0 = test status for combination 'b00 and test1 = test status for 'b01
1672  * of BIT[15:14]. The correct combination is as follow:
1673  *
1674  * if test0 fails and test1 passes, BIT[15:14] is 'b01
1675  * if test0 fails and test1 fails, BIT[15:14] is 'b10
1676  * if test0 passes and test1 fails, BIT[15:14] is 'b11
1677  * if test0 passes and test1 passes, BIT[15:14] is 'b00
1678  *
1679  * Synopsys STAR 9001202023: Wrong microframe number for isochronous IN
1680  * endpoints.
1681  */
1682 static int dwc3_gadget_start_isoc_quirk(struct dwc3_ep *dep)
1683 {
1684         int cmd_status = 0;
1685         bool test0;
1686         bool test1;
1687
1688         while (dep->combo_num < 2) {
1689                 struct dwc3_gadget_ep_cmd_params params;
1690                 u32 test_frame_number;
1691                 u32 cmd;
1692
1693                 /*
1694                  * Check if we can start isoc transfer on the next interval or
1695                  * 4 uframes in the future with BIT[15:14] as dep->combo_num
1696                  */
1697                 test_frame_number = dep->frame_number & DWC3_FRNUMBER_MASK;
1698                 test_frame_number |= dep->combo_num << 14;
1699                 test_frame_number += max_t(u32, 4, dep->interval);
1700
1701                 params.param0 = upper_32_bits(dep->dwc->bounce_addr);
1702                 params.param1 = lower_32_bits(dep->dwc->bounce_addr);
1703
1704                 cmd = DWC3_DEPCMD_STARTTRANSFER;
1705                 cmd |= DWC3_DEPCMD_PARAM(test_frame_number);
1706                 cmd_status = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1707
1708                 /* Redo if some other failure beside bus-expiry is received */
1709                 if (cmd_status && cmd_status != -EAGAIN) {
1710                         dep->start_cmd_status = 0;
1711                         dep->combo_num = 0;
1712                         return 0;
1713                 }
1714
1715                 /* Store the first test status */
1716                 if (dep->combo_num == 0)
1717                         dep->start_cmd_status = cmd_status;
1718
1719                 dep->combo_num++;
1720
1721                 /*
1722                  * End the transfer if the START_TRANSFER command is successful
1723                  * to wait for the next XferNotReady to test the command again
1724                  */
1725                 if (cmd_status == 0) {
1726                         dwc3_stop_active_transfer(dep, true, true);
1727                         return 0;
1728                 }
1729         }
1730
1731         /* test0 and test1 are both completed at this point */
1732         test0 = (dep->start_cmd_status == 0);
1733         test1 = (cmd_status == 0);
1734
1735         if (!test0 && test1)
1736                 dep->combo_num = 1;
1737         else if (!test0 && !test1)
1738                 dep->combo_num = 2;
1739         else if (test0 && !test1)
1740                 dep->combo_num = 3;
1741         else if (test0 && test1)
1742                 dep->combo_num = 0;
1743
1744         dep->frame_number &= DWC3_FRNUMBER_MASK;
1745         dep->frame_number |= dep->combo_num << 14;
1746         dep->frame_number += max_t(u32, 4, dep->interval);
1747
1748         /* Reinitialize test variables */
1749         dep->start_cmd_status = 0;
1750         dep->combo_num = 0;
1751
1752         return __dwc3_gadget_kick_transfer(dep);
1753 }
1754
1755 static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep)
1756 {
1757         const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
1758         struct dwc3 *dwc = dep->dwc;
1759         int ret;
1760         int i;
1761
1762         if (list_empty(&dep->pending_list) &&
1763             list_empty(&dep->started_list)) {
1764                 dep->flags |= DWC3_EP_PENDING_REQUEST;
1765                 return -EAGAIN;
1766         }
1767
1768         if (!dwc->dis_start_transfer_quirk &&
1769             (DWC3_VER_IS_PRIOR(DWC31, 170A) ||
1770              DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) {
1771                 if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction)
1772                         return dwc3_gadget_start_isoc_quirk(dep);
1773         }
1774
1775         if (desc->bInterval <= 14 &&
1776             dwc->gadget->speed >= USB_SPEED_HIGH) {
1777                 u32 frame = __dwc3_gadget_get_frame(dwc);
1778                 bool rollover = frame <
1779                                 (dep->frame_number & DWC3_FRNUMBER_MASK);
1780
1781                 /*
1782                  * frame_number is set from XferNotReady and may be already
1783                  * out of date. DSTS only provides the lower 14 bit of the
1784                  * current frame number. So add the upper two bits of
1785                  * frame_number and handle a possible rollover.
1786                  * This will provide the correct frame_number unless more than
1787                  * rollover has happened since XferNotReady.
1788                  */
1789
1790                 dep->frame_number = (dep->frame_number & ~DWC3_FRNUMBER_MASK) |
1791                                      frame;
1792                 if (rollover)
1793                         dep->frame_number += BIT(14);
1794         }
1795
1796         for (i = 0; i < DWC3_ISOC_MAX_RETRIES; i++) {
1797                 dep->frame_number = DWC3_ALIGN_FRAME(dep, i + 1);
1798
1799                 ret = __dwc3_gadget_kick_transfer(dep);
1800                 if (ret != -EAGAIN)
1801                         break;
1802         }
1803
1804         /*
1805          * After a number of unsuccessful start attempts due to bus-expiry
1806          * status, issue END_TRANSFER command and retry on the next XferNotReady
1807          * event.
1808          */
1809         if (ret == -EAGAIN) {
1810                 struct dwc3_gadget_ep_cmd_params params;
1811                 u32 cmd;
1812
1813                 cmd = DWC3_DEPCMD_ENDTRANSFER |
1814                         DWC3_DEPCMD_CMDIOC |
1815                         DWC3_DEPCMD_PARAM(dep->resource_index);
1816
1817                 dep->resource_index = 0;
1818                 memset(&params, 0, sizeof(params));
1819
1820                 ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
1821                 if (!ret)
1822                         dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
1823         }
1824
1825         return ret;
1826 }
1827
1828 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1829 {
1830         struct dwc3             *dwc = dep->dwc;
1831
1832         if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
1833                 dev_dbg(dwc->dev, "%s: can't queue to disabled endpoint\n",
1834                                 dep->name);
1835                 return -ESHUTDOWN;
1836         }
1837
1838         if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
1839                                 &req->request, req->dep->name))
1840                 return -EINVAL;
1841
1842         if (WARN(req->status < DWC3_REQUEST_STATUS_COMPLETED,
1843                                 "%s: request %pK already in flight\n",
1844                                 dep->name, &req->request))
1845                 return -EINVAL;
1846
1847         pm_runtime_get(dwc->dev);
1848
1849         req->request.actual     = 0;
1850         req->request.status     = -EINPROGRESS;
1851
1852         trace_dwc3_ep_queue(req);
1853
1854         list_add_tail(&req->list, &dep->pending_list);
1855         req->status = DWC3_REQUEST_STATUS_QUEUED;
1856
1857         if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)
1858                 return 0;
1859
1860         /*
1861          * Start the transfer only after the END_TRANSFER is completed
1862          * and endpoint STALL is cleared.
1863          */
1864         if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) ||
1865             (dep->flags & DWC3_EP_WEDGE) ||
1866             (dep->flags & DWC3_EP_STALL)) {
1867                 dep->flags |= DWC3_EP_DELAY_START;
1868                 return 0;
1869         }
1870
1871         /*
1872          * NOTICE: Isochronous endpoints should NEVER be prestarted. We must
1873          * wait for a XferNotReady event so we will know what's the current
1874          * (micro-)frame number.
1875          *
1876          * Without this trick, we are very, very likely gonna get Bus Expiry
1877          * errors which will force us issue EndTransfer command.
1878          */
1879         if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1880                 if (!(dep->flags & DWC3_EP_PENDING_REQUEST) &&
1881                                 !(dep->flags & DWC3_EP_TRANSFER_STARTED))
1882                         return 0;
1883
1884                 if ((dep->flags & DWC3_EP_PENDING_REQUEST)) {
1885                         if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
1886                                 return __dwc3_gadget_start_isoc(dep);
1887                 }
1888         }
1889
1890         __dwc3_gadget_kick_transfer(dep);
1891
1892         return 0;
1893 }
1894
1895 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1896         gfp_t gfp_flags)
1897 {
1898         struct dwc3_request             *req = to_dwc3_request(request);
1899         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1900         struct dwc3                     *dwc = dep->dwc;
1901
1902         unsigned long                   flags;
1903
1904         int                             ret;
1905
1906         spin_lock_irqsave(&dwc->lock, flags);
1907         ret = __dwc3_gadget_ep_queue(dep, req);
1908         spin_unlock_irqrestore(&dwc->lock, flags);
1909
1910         return ret;
1911 }
1912
1913 static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *req)
1914 {
1915         int i;
1916
1917         /* If req->trb is not set, then the request has not started */
1918         if (!req->trb)
1919                 return;
1920
1921         /*
1922          * If request was already started, this means we had to
1923          * stop the transfer. With that we also need to ignore
1924          * all TRBs used by the request, however TRBs can only
1925          * be modified after completion of END_TRANSFER
1926          * command. So what we do here is that we wait for
1927          * END_TRANSFER completion and only after that, we jump
1928          * over TRBs by clearing HWO and incrementing dequeue
1929          * pointer.
1930          */
1931         for (i = 0; i < req->num_trbs; i++) {
1932                 struct dwc3_trb *trb;
1933
1934                 trb = &dep->trb_pool[dep->trb_dequeue];
1935                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1936                 dwc3_ep_inc_deq(dep);
1937         }
1938
1939         req->num_trbs = 0;
1940 }
1941
1942 static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep)
1943 {
1944         struct dwc3_request             *req;
1945         struct dwc3_request             *tmp;
1946         struct dwc3                     *dwc = dep->dwc;
1947
1948         list_for_each_entry_safe(req, tmp, &dep->cancelled_list, list) {
1949                 dwc3_gadget_ep_skip_trbs(dep, req);
1950                 switch (req->status) {
1951                 case DWC3_REQUEST_STATUS_DISCONNECTED:
1952                         dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
1953                         break;
1954                 case DWC3_REQUEST_STATUS_DEQUEUED:
1955                         dwc3_gadget_giveback(dep, req, -ECONNRESET);
1956                         break;
1957                 case DWC3_REQUEST_STATUS_STALLED:
1958                         dwc3_gadget_giveback(dep, req, -EPIPE);
1959                         break;
1960                 default:
1961                         dev_err(dwc->dev, "request cancelled with wrong reason:%d\n", req->status);
1962                         dwc3_gadget_giveback(dep, req, -ECONNRESET);
1963                         break;
1964                 }
1965         }
1966 }
1967
1968 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1969                 struct usb_request *request)
1970 {
1971         struct dwc3_request             *req = to_dwc3_request(request);
1972         struct dwc3_request             *r = NULL;
1973
1974         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
1975         struct dwc3                     *dwc = dep->dwc;
1976
1977         unsigned long                   flags;
1978         int                             ret = 0;
1979
1980         trace_dwc3_ep_dequeue(req);
1981
1982         spin_lock_irqsave(&dwc->lock, flags);
1983
1984         list_for_each_entry(r, &dep->cancelled_list, list) {
1985                 if (r == req)
1986                         goto out;
1987         }
1988
1989         list_for_each_entry(r, &dep->pending_list, list) {
1990                 if (r == req) {
1991                         dwc3_gadget_giveback(dep, req, -ECONNRESET);
1992                         goto out;
1993                 }
1994         }
1995
1996         list_for_each_entry(r, &dep->started_list, list) {
1997                 if (r == req) {
1998                         struct dwc3_request *t;
1999
2000                         /* wait until it is processed */
2001                         dwc3_stop_active_transfer(dep, true, true);
2002
2003                         /*
2004                          * Remove any started request if the transfer is
2005                          * cancelled.
2006                          */
2007                         list_for_each_entry_safe(r, t, &dep->started_list, list)
2008                                 dwc3_gadget_move_cancelled_request(r,
2009                                                 DWC3_REQUEST_STATUS_DEQUEUED);
2010
2011                         dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
2012
2013                         goto out;
2014                 }
2015         }
2016
2017         dev_err(dwc->dev, "request %pK was not queued to %s\n",
2018                 request, ep->name);
2019         ret = -EINVAL;
2020 out:
2021         spin_unlock_irqrestore(&dwc->lock, flags);
2022
2023         return ret;
2024 }
2025
2026 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
2027 {
2028         struct dwc3_gadget_ep_cmd_params        params;
2029         struct dwc3                             *dwc = dep->dwc;
2030         struct dwc3_request                     *req;
2031         struct dwc3_request                     *tmp;
2032         int                                     ret;
2033
2034         if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2035                 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
2036                 return -EINVAL;
2037         }
2038
2039         memset(&params, 0x00, sizeof(params));
2040
2041         if (value) {
2042                 struct dwc3_trb *trb;
2043
2044                 unsigned int transfer_in_flight;
2045                 unsigned int started;
2046
2047                 if (dep->number > 1)
2048                         trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
2049                 else
2050                         trb = &dwc->ep0_trb[dep->trb_enqueue];
2051
2052                 transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
2053                 started = !list_empty(&dep->started_list);
2054
2055                 if (!protocol && ((dep->direction && transfer_in_flight) ||
2056                                 (!dep->direction && started))) {
2057                         return -EAGAIN;
2058                 }
2059
2060                 ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
2061                                 &params);
2062                 if (ret)
2063                         dev_err(dwc->dev, "failed to set STALL on %s\n",
2064                                         dep->name);
2065                 else
2066                         dep->flags |= DWC3_EP_STALL;
2067         } else {
2068                 /*
2069                  * Don't issue CLEAR_STALL command to control endpoints. The
2070                  * controller automatically clears the STALL when it receives
2071                  * the SETUP token.
2072                  */
2073                 if (dep->number <= 1) {
2074                         dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
2075                         return 0;
2076                 }
2077
2078                 dwc3_stop_active_transfer(dep, true, true);
2079
2080                 list_for_each_entry_safe(req, tmp, &dep->started_list, list)
2081                         dwc3_gadget_move_cancelled_request(req, DWC3_REQUEST_STATUS_STALLED);
2082
2083                 if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) {
2084                         dep->flags |= DWC3_EP_PENDING_CLEAR_STALL;
2085                         return 0;
2086                 }
2087
2088                 dwc3_gadget_ep_cleanup_cancelled_requests(dep);
2089
2090                 ret = dwc3_send_clear_stall_ep_cmd(dep);
2091                 if (ret) {
2092                         dev_err(dwc->dev, "failed to clear STALL on %s\n",
2093                                         dep->name);
2094                         return ret;
2095                 }
2096
2097                 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
2098
2099                 if ((dep->flags & DWC3_EP_DELAY_START) &&
2100                     !usb_endpoint_xfer_isoc(dep->endpoint.desc))
2101                         __dwc3_gadget_kick_transfer(dep);
2102
2103                 dep->flags &= ~DWC3_EP_DELAY_START;
2104         }
2105
2106         return ret;
2107 }
2108
2109 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
2110 {
2111         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
2112         struct dwc3                     *dwc = dep->dwc;
2113
2114         unsigned long                   flags;
2115
2116         int                             ret;
2117
2118         spin_lock_irqsave(&dwc->lock, flags);
2119         ret = __dwc3_gadget_ep_set_halt(dep, value, false);
2120         spin_unlock_irqrestore(&dwc->lock, flags);
2121
2122         return ret;
2123 }
2124
2125 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
2126 {
2127         struct dwc3_ep                  *dep = to_dwc3_ep(ep);
2128         struct dwc3                     *dwc = dep->dwc;
2129         unsigned long                   flags;
2130         int                             ret;
2131
2132         spin_lock_irqsave(&dwc->lock, flags);
2133         dep->flags |= DWC3_EP_WEDGE;
2134
2135         if (dep->number == 0 || dep->number == 1)
2136                 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
2137         else
2138                 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
2139         spin_unlock_irqrestore(&dwc->lock, flags);
2140
2141         return ret;
2142 }
2143
2144 /* -------------------------------------------------------------------------- */
2145
2146 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
2147         .bLength        = USB_DT_ENDPOINT_SIZE,
2148         .bDescriptorType = USB_DT_ENDPOINT,
2149         .bmAttributes   = USB_ENDPOINT_XFER_CONTROL,
2150 };
2151
2152 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
2153         .enable         = dwc3_gadget_ep0_enable,
2154         .disable        = dwc3_gadget_ep0_disable,
2155         .alloc_request  = dwc3_gadget_ep_alloc_request,
2156         .free_request   = dwc3_gadget_ep_free_request,
2157         .queue          = dwc3_gadget_ep0_queue,
2158         .dequeue        = dwc3_gadget_ep_dequeue,
2159         .set_halt       = dwc3_gadget_ep0_set_halt,
2160         .set_wedge      = dwc3_gadget_ep_set_wedge,
2161 };
2162
2163 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
2164         .enable         = dwc3_gadget_ep_enable,
2165         .disable        = dwc3_gadget_ep_disable,
2166         .alloc_request  = dwc3_gadget_ep_alloc_request,
2167         .free_request   = dwc3_gadget_ep_free_request,
2168         .queue          = dwc3_gadget_ep_queue,
2169         .dequeue        = dwc3_gadget_ep_dequeue,
2170         .set_halt       = dwc3_gadget_ep_set_halt,
2171         .set_wedge      = dwc3_gadget_ep_set_wedge,
2172 };
2173
2174 /* -------------------------------------------------------------------------- */
2175
2176 static int dwc3_gadget_get_frame(struct usb_gadget *g)
2177 {
2178         struct dwc3             *dwc = gadget_to_dwc(g);
2179
2180         return __dwc3_gadget_get_frame(dwc);
2181 }
2182
2183 static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
2184 {
2185         int                     retries;
2186
2187         int                     ret;
2188         u32                     reg;
2189
2190         u8                      link_state;
2191
2192         /*
2193          * According to the Databook Remote wakeup request should
2194          * be issued only when the device is in early suspend state.
2195          *
2196          * We can check that via USB Link State bits in DSTS register.
2197          */
2198         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2199
2200         link_state = DWC3_DSTS_USBLNKST(reg);
2201
2202         switch (link_state) {
2203         case DWC3_LINK_STATE_RESET:
2204         case DWC3_LINK_STATE_RX_DET:    /* in HS, means Early Suspend */
2205         case DWC3_LINK_STATE_U3:        /* in HS, means SUSPEND */
2206         case DWC3_LINK_STATE_U2:        /* in HS, means Sleep (L1) */
2207         case DWC3_LINK_STATE_U1:
2208         case DWC3_LINK_STATE_RESUME:
2209                 break;
2210         default:
2211                 return -EINVAL;
2212         }
2213
2214         ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
2215         if (ret < 0) {
2216                 dev_err(dwc->dev, "failed to put link in Recovery\n");
2217                 return ret;
2218         }
2219
2220         /* Recent versions do this automatically */
2221         if (DWC3_VER_IS_PRIOR(DWC3, 194A)) {
2222                 /* write zeroes to Link Change Request */
2223                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2224                 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
2225                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2226         }
2227
2228         /* poll until Link State changes to ON */
2229         retries = 20000;
2230
2231         while (retries--) {
2232                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2233
2234                 /* in HS, means ON */
2235                 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
2236                         break;
2237         }
2238
2239         if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
2240                 dev_err(dwc->dev, "failed to send remote wakeup\n");
2241                 return -EINVAL;
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int dwc3_gadget_wakeup(struct usb_gadget *g)
2248 {
2249         struct dwc3             *dwc = gadget_to_dwc(g);
2250         unsigned long           flags;
2251         int                     ret;
2252
2253         spin_lock_irqsave(&dwc->lock, flags);
2254         ret = __dwc3_gadget_wakeup(dwc);
2255         spin_unlock_irqrestore(&dwc->lock, flags);
2256
2257         return ret;
2258 }
2259
2260 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
2261                 int is_selfpowered)
2262 {
2263         struct dwc3             *dwc = gadget_to_dwc(g);
2264         unsigned long           flags;
2265
2266         spin_lock_irqsave(&dwc->lock, flags);
2267         g->is_selfpowered = !!is_selfpowered;
2268         spin_unlock_irqrestore(&dwc->lock, flags);
2269
2270         return 0;
2271 }
2272
2273 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2274 {
2275         u32 epnum;
2276
2277         for (epnum = 2; epnum < dwc->num_eps; epnum++) {
2278                 struct dwc3_ep *dep;
2279
2280                 dep = dwc->eps[epnum];
2281                 if (!dep)
2282                         continue;
2283
2284                 dwc3_remove_requests(dwc, dep);
2285         }
2286 }
2287
2288 static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc)
2289 {
2290         enum usb_ssp_rate       ssp_rate = dwc->gadget_ssp_rate;
2291         u32                     reg;
2292
2293         if (ssp_rate == USB_SSP_GEN_UNKNOWN)
2294                 ssp_rate = dwc->max_ssp_rate;
2295
2296         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2297         reg &= ~DWC3_DCFG_SPEED_MASK;
2298         reg &= ~DWC3_DCFG_NUMLANES(~0);
2299
2300         if (ssp_rate == USB_SSP_GEN_1x2)
2301                 reg |= DWC3_DCFG_SUPERSPEED;
2302         else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2)
2303                 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
2304
2305         if (ssp_rate != USB_SSP_GEN_2x1 &&
2306             dwc->max_ssp_rate != USB_SSP_GEN_2x1)
2307                 reg |= DWC3_DCFG_NUMLANES(1);
2308
2309         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2310 }
2311
2312 static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
2313 {
2314         enum usb_device_speed   speed;
2315         u32                     reg;
2316
2317         speed = dwc->gadget_max_speed;
2318         if (speed == USB_SPEED_UNKNOWN || speed > dwc->maximum_speed)
2319                 speed = dwc->maximum_speed;
2320
2321         if (speed == USB_SPEED_SUPER_PLUS &&
2322             DWC3_IP_IS(DWC32)) {
2323                 __dwc3_gadget_set_ssp_rate(dwc);
2324                 return;
2325         }
2326
2327         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2328         reg &= ~(DWC3_DCFG_SPEED_MASK);
2329
2330         /*
2331          * WORKAROUND: DWC3 revision < 2.20a have an issue
2332          * which would cause metastability state on Run/Stop
2333          * bit if we try to force the IP to USB2-only mode.
2334          *
2335          * Because of that, we cannot configure the IP to any
2336          * speed other than the SuperSpeed
2337          *
2338          * Refers to:
2339          *
2340          * STAR#9000525659: Clock Domain Crossing on DCTL in
2341          * USB 2.0 Mode
2342          */
2343         if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
2344             !dwc->dis_metastability_quirk) {
2345                 reg |= DWC3_DCFG_SUPERSPEED;
2346         } else {
2347                 switch (speed) {
2348                 case USB_SPEED_FULL:
2349                         reg |= DWC3_DCFG_FULLSPEED;
2350                         break;
2351                 case USB_SPEED_HIGH:
2352                         reg |= DWC3_DCFG_HIGHSPEED;
2353                         break;
2354                 case USB_SPEED_SUPER:
2355                         reg |= DWC3_DCFG_SUPERSPEED;
2356                         break;
2357                 case USB_SPEED_SUPER_PLUS:
2358                         if (DWC3_IP_IS(DWC3))
2359                                 reg |= DWC3_DCFG_SUPERSPEED;
2360                         else
2361                                 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
2362                         break;
2363                 default:
2364                         dev_err(dwc->dev, "invalid speed (%d)\n", speed);
2365
2366                         if (DWC3_IP_IS(DWC3))
2367                                 reg |= DWC3_DCFG_SUPERSPEED;
2368                         else
2369                                 reg |= DWC3_DCFG_SUPERSPEED_PLUS;
2370                 }
2371         }
2372
2373         if (DWC3_IP_IS(DWC32) &&
2374             speed > USB_SPEED_UNKNOWN &&
2375             speed < USB_SPEED_SUPER_PLUS)
2376                 reg &= ~DWC3_DCFG_NUMLANES(~0);
2377
2378         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2379 }
2380
2381 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
2382 {
2383         u32                     reg;
2384         u32                     timeout = 500;
2385
2386         if (pm_runtime_suspended(dwc->dev))
2387                 return 0;
2388
2389         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2390         if (is_on) {
2391                 if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
2392                         reg &= ~DWC3_DCTL_TRGTULST_MASK;
2393                         reg |= DWC3_DCTL_TRGTULST_RX_DET;
2394                 }
2395
2396                 if (!DWC3_VER_IS_PRIOR(DWC3, 194A))
2397                         reg &= ~DWC3_DCTL_KEEP_CONNECT;
2398                 reg |= DWC3_DCTL_RUN_STOP;
2399
2400                 if (dwc->has_hibernation)
2401                         reg |= DWC3_DCTL_KEEP_CONNECT;
2402
2403                 __dwc3_gadget_set_speed(dwc);
2404                 dwc->pullups_connected = true;
2405         } else {
2406                 reg &= ~DWC3_DCTL_RUN_STOP;
2407
2408                 if (dwc->has_hibernation && !suspend)
2409                         reg &= ~DWC3_DCTL_KEEP_CONNECT;
2410
2411                 dwc->pullups_connected = false;
2412         }
2413
2414         dwc3_gadget_dctl_write_safe(dwc, reg);
2415
2416         do {
2417                 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2418                 reg &= DWC3_DSTS_DEVCTRLHLT;
2419         } while (--timeout && !(!is_on ^ !reg));
2420
2421         if (!timeout)
2422                 return -ETIMEDOUT;
2423
2424         return 0;
2425 }
2426
2427 static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
2428 static void __dwc3_gadget_stop(struct dwc3 *dwc);
2429 static int __dwc3_gadget_start(struct dwc3 *dwc);
2430
2431 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
2432 {
2433         struct dwc3             *dwc = gadget_to_dwc(g);
2434         unsigned long           flags;
2435         int                     ret;
2436
2437         is_on = !!is_on;
2438         dwc->softconnect = is_on;
2439         /*
2440          * Per databook, when we want to stop the gadget, if a control transfer
2441          * is still in process, complete it and get the core into setup phase.
2442          */
2443         if (!is_on && dwc->ep0state != EP0_SETUP_PHASE) {
2444                 reinit_completion(&dwc->ep0_in_setup);
2445
2446                 ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
2447                                 msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
2448                 if (ret == 0)
2449                         dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
2450         }
2451
2452         /*
2453          * Avoid issuing a runtime resume if the device is already in the
2454          * suspended state during gadget disconnect.  DWC3 gadget was already
2455          * halted/stopped during runtime suspend.
2456          */
2457         if (!is_on) {
2458                 pm_runtime_barrier(dwc->dev);
2459                 if (pm_runtime_suspended(dwc->dev))
2460                         return 0;
2461         }
2462
2463         /*
2464          * Check the return value for successful resume, or error.  For a
2465          * successful resume, the DWC3 runtime PM resume routine will handle
2466          * the run stop sequence, so avoid duplicate operations here.
2467          */
2468         ret = pm_runtime_get_sync(dwc->dev);
2469         if (!ret || ret < 0) {
2470                 pm_runtime_put(dwc->dev);
2471                 return 0;
2472         }
2473
2474         /*
2475          * Synchronize and disable any further event handling while controller
2476          * is being enabled/disabled.
2477          */
2478         disable_irq(dwc->irq_gadget);
2479
2480         spin_lock_irqsave(&dwc->lock, flags);
2481
2482         if (!is_on) {
2483                 u32 count;
2484
2485                 dwc->connected = false;
2486                 /*
2487                  * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
2488                  * Section 4.1.8 Table 4-7, it states that for a device-initiated
2489                  * disconnect, the SW needs to ensure that it sends "a DEPENDXFER
2490                  * command for any active transfers" before clearing the RunStop
2491                  * bit.
2492                  */
2493                 dwc3_stop_active_transfers(dwc);
2494                 __dwc3_gadget_stop(dwc);
2495
2496                 /*
2497                  * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
2498                  * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the
2499                  * "software needs to acknowledge the events that are generated
2500                  * (by writing to GEVNTCOUNTn) while it is waiting for this bit
2501                  * to be set to '1'."
2502                  */
2503                 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
2504                 count &= DWC3_GEVNTCOUNT_MASK;
2505                 if (count > 0) {
2506                         dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
2507                         dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
2508                                                 dwc->ev_buf->length;
2509                 }
2510         } else {
2511                 __dwc3_gadget_start(dwc);
2512         }
2513
2514         ret = dwc3_gadget_run_stop(dwc, is_on, false);
2515         spin_unlock_irqrestore(&dwc->lock, flags);
2516         enable_irq(dwc->irq_gadget);
2517
2518         pm_runtime_put(dwc->dev);
2519
2520         return ret;
2521 }
2522
2523 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
2524 {
2525         u32                     reg;
2526
2527         /* Enable all but Start and End of Frame IRQs */
2528         reg = (DWC3_DEVTEN_EVNTOVERFLOWEN |
2529                         DWC3_DEVTEN_CMDCMPLTEN |
2530                         DWC3_DEVTEN_ERRTICERREN |
2531                         DWC3_DEVTEN_WKUPEVTEN |
2532                         DWC3_DEVTEN_CONNECTDONEEN |
2533                         DWC3_DEVTEN_USBRSTEN |
2534                         DWC3_DEVTEN_DISCONNEVTEN);
2535
2536         if (DWC3_VER_IS_PRIOR(DWC3, 250A))
2537                 reg |= DWC3_DEVTEN_ULSTCNGEN;
2538
2539         /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
2540         if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
2541                 reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
2542
2543         dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
2544 }
2545
2546 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
2547 {
2548         /* mask all interrupts */
2549         dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
2550 }
2551
2552 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
2553 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
2554
2555 /**
2556  * dwc3_gadget_setup_nump - calculate and initialize NUMP field of %DWC3_DCFG
2557  * @dwc: pointer to our context structure
2558  *
2559  * The following looks like complex but it's actually very simple. In order to
2560  * calculate the number of packets we can burst at once on OUT transfers, we're
2561  * gonna use RxFIFO size.
2562  *
2563  * To calculate RxFIFO size we need two numbers:
2564  * MDWIDTH = size, in bits, of the internal memory bus
2565  * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
2566  *
2567  * Given these two numbers, the formula is simple:
2568  *
2569  * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
2570  *
2571  * 24 bytes is for 3x SETUP packets
2572  * 16 bytes is a clock domain crossing tolerance
2573  *
2574  * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
2575  */
2576 static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
2577 {
2578         u32 ram2_depth;
2579         u32 mdwidth;
2580         u32 nump;
2581         u32 reg;
2582
2583         ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
2584         mdwidth = dwc3_mdwidth(dwc);
2585
2586         nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
2587         nump = min_t(u32, nump, 16);
2588
2589         /* update NumP */
2590         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2591         reg &= ~DWC3_DCFG_NUMP_MASK;
2592         reg |= nump << DWC3_DCFG_NUMP_SHIFT;
2593         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2594 }
2595
2596 static int __dwc3_gadget_start(struct dwc3 *dwc)
2597 {
2598         struct dwc3_ep          *dep;
2599         int                     ret = 0;
2600         u32                     reg;
2601
2602         /*
2603          * Use IMOD if enabled via dwc->imod_interval. Otherwise, if
2604          * the core supports IMOD, disable it.
2605          */
2606         if (dwc->imod_interval) {
2607                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
2608                 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
2609         } else if (dwc3_has_imod(dwc)) {
2610                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), 0);
2611         }
2612
2613         /*
2614          * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
2615          * field instead of letting dwc3 itself calculate that automatically.
2616          *
2617          * This way, we maximize the chances that we'll be able to get several
2618          * bursts of data without going through any sort of endpoint throttling.
2619          */
2620         reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
2621         if (DWC3_IP_IS(DWC3))
2622                 reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
2623         else
2624                 reg &= ~DWC31_GRXTHRCFG_PKTCNTSEL;
2625
2626         dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
2627
2628         dwc3_gadget_setup_nump(dwc);
2629
2630         /*
2631          * Currently the controller handles single stream only. So, Ignore
2632          * Packet Pending bit for stream selection and don't search for another
2633          * stream if the host sends Data Packet with PP=0 (for OUT direction) or
2634          * ACK with NumP=0 and PP=0 (for IN direction). This slightly improves
2635          * the stream performance.
2636          */
2637         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2638         reg |= DWC3_DCFG_IGNSTRMPP;
2639         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2640
2641         /* Start with SuperSpeed Default */
2642         dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2643
2644         dep = dwc->eps[0];
2645         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
2646         if (ret) {
2647                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2648                 goto err0;
2649         }
2650
2651         dep = dwc->eps[1];
2652         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
2653         if (ret) {
2654                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2655                 goto err1;
2656         }
2657
2658         /* begin to receive SETUP packets */
2659         dwc->ep0state = EP0_SETUP_PHASE;
2660         dwc->link_state = DWC3_LINK_STATE_SS_DIS;
2661         dwc->delayed_status = false;
2662         dwc3_ep0_out_start(dwc);
2663
2664         dwc3_gadget_enable_irq(dwc);
2665
2666         return 0;
2667
2668 err1:
2669         __dwc3_gadget_ep_disable(dwc->eps[0]);
2670
2671 err0:
2672         return ret;
2673 }
2674
2675 static int dwc3_gadget_start(struct usb_gadget *g,
2676                 struct usb_gadget_driver *driver)
2677 {
2678         struct dwc3             *dwc = gadget_to_dwc(g);
2679         unsigned long           flags;
2680         int                     ret;
2681         int                     irq;
2682
2683         irq = dwc->irq_gadget;
2684         ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
2685                         IRQF_SHARED, "dwc3", dwc->ev_buf);
2686         if (ret) {
2687                 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2688                                 irq, ret);
2689                 return ret;
2690         }
2691
2692         spin_lock_irqsave(&dwc->lock, flags);
2693         dwc->gadget_driver      = driver;
2694         spin_unlock_irqrestore(&dwc->lock, flags);
2695
2696         return 0;
2697 }
2698
2699 static void __dwc3_gadget_stop(struct dwc3 *dwc)
2700 {
2701         dwc3_gadget_disable_irq(dwc);
2702         __dwc3_gadget_ep_disable(dwc->eps[0]);
2703         __dwc3_gadget_ep_disable(dwc->eps[1]);
2704 }
2705
2706 static int dwc3_gadget_stop(struct usb_gadget *g)
2707 {
2708         struct dwc3             *dwc = gadget_to_dwc(g);
2709         unsigned long           flags;
2710
2711         spin_lock_irqsave(&dwc->lock, flags);
2712         dwc->gadget_driver      = NULL;
2713         dwc->max_cfg_eps = 0;
2714         spin_unlock_irqrestore(&dwc->lock, flags);
2715
2716         free_irq(dwc->irq_gadget, dwc->ev_buf);
2717
2718         return 0;
2719 }
2720
2721 static void dwc3_gadget_config_params(struct usb_gadget *g,
2722                                       struct usb_dcd_config_params *params)
2723 {
2724         struct dwc3             *dwc = gadget_to_dwc(g);
2725
2726         params->besl_baseline = USB_DEFAULT_BESL_UNSPECIFIED;
2727         params->besl_deep = USB_DEFAULT_BESL_UNSPECIFIED;
2728
2729         /* Recommended BESL */
2730         if (!dwc->dis_enblslpm_quirk) {
2731                 /*
2732                  * If the recommended BESL baseline is 0 or if the BESL deep is
2733                  * less than 2, Microsoft's Windows 10 host usb stack will issue
2734                  * a usb reset immediately after it receives the extended BOS
2735                  * descriptor and the enumeration will fail. To maintain
2736                  * compatibility with the Windows' usb stack, let's set the
2737                  * recommended BESL baseline to 1 and clamp the BESL deep to be
2738                  * within 2 to 15.
2739                  */
2740                 params->besl_baseline = 1;
2741                 if (dwc->is_utmi_l1_suspend)
2742                         params->besl_deep =
2743                                 clamp_t(u8, dwc->hird_threshold, 2, 15);
2744         }
2745
2746         /* U1 Device exit Latency */
2747         if (dwc->dis_u1_entry_quirk)
2748                 params->bU1devExitLat = 0;
2749         else
2750                 params->bU1devExitLat = DWC3_DEFAULT_U1_DEV_EXIT_LAT;
2751
2752         /* U2 Device exit Latency */
2753         if (dwc->dis_u2_entry_quirk)
2754                 params->bU2DevExitLat = 0;
2755         else
2756                 params->bU2DevExitLat =
2757                                 cpu_to_le16(DWC3_DEFAULT_U2_DEV_EXIT_LAT);
2758 }
2759
2760 static void dwc3_gadget_set_speed(struct usb_gadget *g,
2761                                   enum usb_device_speed speed)
2762 {
2763         struct dwc3             *dwc = gadget_to_dwc(g);
2764         unsigned long           flags;
2765
2766         spin_lock_irqsave(&dwc->lock, flags);
2767         dwc->gadget_max_speed = speed;
2768         spin_unlock_irqrestore(&dwc->lock, flags);
2769 }
2770
2771 static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
2772                                      enum usb_ssp_rate rate)
2773 {
2774         struct dwc3             *dwc = gadget_to_dwc(g);
2775         unsigned long           flags;
2776
2777         spin_lock_irqsave(&dwc->lock, flags);
2778         dwc->gadget_max_speed = USB_SPEED_SUPER_PLUS;
2779         dwc->gadget_ssp_rate = rate;
2780         spin_unlock_irqrestore(&dwc->lock, flags);
2781 }
2782
2783 static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
2784 {
2785         struct dwc3             *dwc = gadget_to_dwc(g);
2786         union power_supply_propval      val = {0};
2787         int                             ret;
2788
2789         if (dwc->usb2_phy)
2790                 return usb_phy_set_power(dwc->usb2_phy, mA);
2791
2792         if (!dwc->usb_psy)
2793                 return -EOPNOTSUPP;
2794
2795         val.intval = 1000 * mA;
2796         ret = power_supply_set_property(dwc->usb_psy, POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, &val);
2797
2798         return ret;
2799 }
2800
2801 /**
2802  * dwc3_gadget_check_config - ensure dwc3 can support the USB configuration
2803  * @g: pointer to the USB gadget
2804  *
2805  * Used to record the maximum number of endpoints being used in a USB composite
2806  * device. (across all configurations)  This is to be used in the calculation
2807  * of the TXFIFO sizes when resizing internal memory for individual endpoints.
2808  * It will help ensured that the resizing logic reserves enough space for at
2809  * least one max packet.
2810  */
2811 static int dwc3_gadget_check_config(struct usb_gadget *g)
2812 {
2813         struct dwc3 *dwc = gadget_to_dwc(g);
2814         struct usb_ep *ep;
2815         int fifo_size = 0;
2816         int ram1_depth;
2817         int ep_num = 0;
2818
2819         if (!dwc->do_fifo_resize)
2820                 return 0;
2821
2822         list_for_each_entry(ep, &g->ep_list, ep_list) {
2823                 /* Only interested in the IN endpoints */
2824                 if (ep->claimed && (ep->address & USB_DIR_IN))
2825                         ep_num++;
2826         }
2827
2828         if (ep_num <= dwc->max_cfg_eps)
2829                 return 0;
2830
2831         /* Update the max number of eps in the composition */
2832         dwc->max_cfg_eps = ep_num;
2833
2834         fifo_size = dwc3_gadget_calc_tx_fifo_size(dwc, dwc->max_cfg_eps);
2835         /* Based on the equation, increment by one for every ep */
2836         fifo_size += dwc->max_cfg_eps;
2837
2838         /* Check if we can fit a single fifo per endpoint */
2839         ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
2840         if (fifo_size > ram1_depth)
2841                 return -ENOMEM;
2842
2843         return 0;
2844 }
2845
2846 static void dwc3_gadget_async_callbacks(struct usb_gadget *g, bool enable)
2847 {
2848         struct dwc3             *dwc = gadget_to_dwc(g);
2849         unsigned long           flags;
2850
2851         spin_lock_irqsave(&dwc->lock, flags);
2852         dwc->async_callbacks = enable;
2853         spin_unlock_irqrestore(&dwc->lock, flags);
2854 }
2855
2856 static const struct usb_gadget_ops dwc3_gadget_ops = {
2857         .get_frame              = dwc3_gadget_get_frame,
2858         .wakeup                 = dwc3_gadget_wakeup,
2859         .set_selfpowered        = dwc3_gadget_set_selfpowered,
2860         .pullup                 = dwc3_gadget_pullup,
2861         .udc_start              = dwc3_gadget_start,
2862         .udc_stop               = dwc3_gadget_stop,
2863         .udc_set_speed          = dwc3_gadget_set_speed,
2864         .udc_set_ssp_rate       = dwc3_gadget_set_ssp_rate,
2865         .get_config_params      = dwc3_gadget_config_params,
2866         .vbus_draw              = dwc3_gadget_vbus_draw,
2867         .check_config           = dwc3_gadget_check_config,
2868         .udc_async_callbacks    = dwc3_gadget_async_callbacks,
2869 };
2870
2871 /* -------------------------------------------------------------------------- */
2872
2873 static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep)
2874 {
2875         struct dwc3 *dwc = dep->dwc;
2876
2877         usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
2878         dep->endpoint.maxburst = 1;
2879         dep->endpoint.ops = &dwc3_gadget_ep0_ops;
2880         if (!dep->direction)
2881                 dwc->gadget->ep0 = &dep->endpoint;
2882
2883         dep->endpoint.caps.type_control = true;
2884
2885         return 0;
2886 }
2887
2888 static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep)
2889 {
2890         struct dwc3 *dwc = dep->dwc;
2891         u32 mdwidth;
2892         int size;
2893
2894         mdwidth = dwc3_mdwidth(dwc);
2895
2896         /* MDWIDTH is represented in bits, we need it in bytes */
2897         mdwidth /= 8;
2898
2899         size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1));
2900         if (DWC3_IP_IS(DWC3))
2901                 size = DWC3_GTXFIFOSIZ_TXFDEP(size);
2902         else
2903                 size = DWC31_GTXFIFOSIZ_TXFDEP(size);
2904
2905         /* FIFO Depth is in MDWDITH bytes. Multiply */
2906         size *= mdwidth;
2907
2908         /*
2909          * To meet performance requirement, a minimum TxFIFO size of 3x
2910          * MaxPacketSize is recommended for endpoints that support burst and a
2911          * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't
2912          * support burst. Use those numbers and we can calculate the max packet
2913          * limit as below.
2914          */
2915         if (dwc->maximum_speed >= USB_SPEED_SUPER)
2916                 size /= 3;
2917         else
2918                 size /= 2;
2919
2920         usb_ep_set_maxpacket_limit(&dep->endpoint, size);
2921
2922         dep->endpoint.max_streams = 16;
2923         dep->endpoint.ops = &dwc3_gadget_ep_ops;
2924         list_add_tail(&dep->endpoint.ep_list,
2925                         &dwc->gadget->ep_list);
2926         dep->endpoint.caps.type_iso = true;
2927         dep->endpoint.caps.type_bulk = true;
2928         dep->endpoint.caps.type_int = true;
2929
2930         return dwc3_alloc_trb_pool(dep);
2931 }
2932
2933 static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep)
2934 {
2935         struct dwc3 *dwc = dep->dwc;
2936         u32 mdwidth;
2937         int size;
2938
2939         mdwidth = dwc3_mdwidth(dwc);
2940
2941         /* MDWIDTH is represented in bits, convert to bytes */
2942         mdwidth /= 8;
2943
2944         /* All OUT endpoints share a single RxFIFO space */
2945         size = dwc3_readl(dwc->regs, DWC3_GRXFIFOSIZ(0));
2946         if (DWC3_IP_IS(DWC3))
2947                 size = DWC3_GRXFIFOSIZ_RXFDEP(size);
2948         else
2949                 size = DWC31_GRXFIFOSIZ_RXFDEP(size);
2950
2951         /* FIFO depth is in MDWDITH bytes */
2952         size *= mdwidth;
2953
2954         /*
2955          * To meet performance requirement, a minimum recommended RxFIFO size
2956          * is defined as follow:
2957          * RxFIFO size >= (3 x MaxPacketSize) +
2958          * (3 x 8 bytes setup packets size) + (16 bytes clock crossing margin)
2959          *
2960          * Then calculate the max packet limit as below.
2961          */
2962         size -= (3 * 8) + 16;
2963         if (size < 0)
2964                 size = 0;
2965         else
2966                 size /= 3;
2967
2968         usb_ep_set_maxpacket_limit(&dep->endpoint, size);
2969         dep->endpoint.max_streams = 16;
2970         dep->endpoint.ops = &dwc3_gadget_ep_ops;
2971         list_add_tail(&dep->endpoint.ep_list,
2972                         &dwc->gadget->ep_list);
2973         dep->endpoint.caps.type_iso = true;
2974         dep->endpoint.caps.type_bulk = true;
2975         dep->endpoint.caps.type_int = true;
2976
2977         return dwc3_alloc_trb_pool(dep);
2978 }
2979
2980 static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
2981 {
2982         struct dwc3_ep                  *dep;
2983         bool                            direction = epnum & 1;
2984         int                             ret;
2985         u8                              num = epnum >> 1;
2986
2987         dep = kzalloc(sizeof(*dep), GFP_KERNEL);
2988         if (!dep)
2989                 return -ENOMEM;
2990
2991         dep->dwc = dwc;
2992         dep->number = epnum;
2993         dep->direction = direction;
2994         dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
2995         dwc->eps[epnum] = dep;
2996         dep->combo_num = 0;
2997         dep->start_cmd_status = 0;
2998
2999         snprintf(dep->name, sizeof(dep->name), "ep%u%s", num,
3000                         direction ? "in" : "out");
3001
3002         dep->endpoint.name = dep->name;
3003
3004         if (!(dep->number > 1)) {
3005                 dep->endpoint.desc = &dwc3_gadget_ep0_desc;
3006                 dep->endpoint.comp_desc = NULL;
3007         }
3008
3009         if (num == 0)
3010                 ret = dwc3_gadget_init_control_endpoint(dep);
3011         else if (direction)
3012                 ret = dwc3_gadget_init_in_endpoint(dep);
3013         else
3014                 ret = dwc3_gadget_init_out_endpoint(dep);
3015
3016         if (ret)
3017                 return ret;
3018
3019         dep->endpoint.caps.dir_in = direction;
3020         dep->endpoint.caps.dir_out = !direction;
3021
3022         INIT_LIST_HEAD(&dep->pending_list);
3023         INIT_LIST_HEAD(&dep->started_list);
3024         INIT_LIST_HEAD(&dep->cancelled_list);
3025
3026         dwc3_debugfs_create_endpoint_dir(dep);
3027
3028         return 0;
3029 }
3030
3031 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total)
3032 {
3033         u8                              epnum;
3034
3035         INIT_LIST_HEAD(&dwc->gadget->ep_list);
3036
3037         for (epnum = 0; epnum < total; epnum++) {
3038                 int                     ret;
3039
3040                 ret = dwc3_gadget_init_endpoint(dwc, epnum);
3041                 if (ret)
3042                         return ret;
3043         }
3044
3045         return 0;
3046 }
3047
3048 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
3049 {
3050         struct dwc3_ep                  *dep;
3051         u8                              epnum;
3052
3053         for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
3054                 dep = dwc->eps[epnum];
3055                 if (!dep)
3056                         continue;
3057                 /*
3058                  * Physical endpoints 0 and 1 are special; they form the
3059                  * bi-directional USB endpoint 0.
3060                  *
3061                  * For those two physical endpoints, we don't allocate a TRB
3062                  * pool nor do we add them the endpoints list. Due to that, we
3063                  * shouldn't do these two operations otherwise we would end up
3064                  * with all sorts of bugs when removing dwc3.ko.
3065                  */
3066                 if (epnum != 0 && epnum != 1) {
3067                         dwc3_free_trb_pool(dep);
3068                         list_del(&dep->endpoint.ep_list);
3069                 }
3070
3071                 debugfs_remove_recursive(debugfs_lookup(dep->name,
3072                                 debugfs_lookup(dev_name(dep->dwc->dev),
3073                                                usb_debug_root)));
3074                 kfree(dep);
3075         }
3076 }
3077
3078 /* -------------------------------------------------------------------------- */
3079
3080 static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
3081                 struct dwc3_request *req, struct dwc3_trb *trb,
3082                 const struct dwc3_event_depevt *event, int status, int chain)
3083 {
3084         unsigned int            count;
3085
3086         dwc3_ep_inc_deq(dep);
3087
3088         trace_dwc3_complete_trb(dep, trb);
3089         req->num_trbs--;
3090
3091         /*
3092          * If we're in the middle of series of chained TRBs and we
3093          * receive a short transfer along the way, DWC3 will skip
3094          * through all TRBs including the last TRB in the chain (the
3095          * where CHN bit is zero. DWC3 will also avoid clearing HWO
3096          * bit and SW has to do it manually.
3097          *
3098          * We're going to do that here to avoid problems of HW trying
3099          * to use bogus TRBs for transfers.
3100          */
3101         if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
3102                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
3103
3104         /*
3105          * For isochronous transfers, the first TRB in a service interval must
3106          * have the Isoc-First type. Track and report its interval frame number.
3107          */
3108         if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
3109             (trb->ctrl & DWC3_TRBCTL_ISOCHRONOUS_FIRST)) {
3110                 unsigned int frame_number;
3111
3112                 frame_number = DWC3_TRB_CTRL_GET_SID_SOFN(trb->ctrl);
3113                 frame_number &= ~(dep->interval - 1);
3114                 req->request.frame_number = frame_number;
3115         }
3116
3117         /*
3118          * We use bounce buffer for requests that needs extra TRB or OUT ZLP. If
3119          * this TRB points to the bounce buffer address, it's a MPS alignment
3120          * TRB. Don't add it to req->remaining calculation.
3121          */
3122         if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) &&
3123             trb->bph == upper_32_bits(dep->dwc->bounce_addr)) {
3124                 trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
3125                 return 1;
3126         }
3127
3128         count = trb->size & DWC3_TRB_SIZE_MASK;
3129         req->remaining += count;
3130
3131         if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
3132                 return 1;
3133
3134         if (event->status & DEPEVT_STATUS_SHORT && !chain)
3135                 return 1;
3136
3137         if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
3138             (trb->ctrl & DWC3_TRB_CTRL_LST))
3139                 return 1;
3140
3141         return 0;
3142 }
3143
3144 static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
3145                 struct dwc3_request *req, const struct dwc3_event_depevt *event,
3146                 int status)
3147 {
3148         struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
3149         struct scatterlist *sg = req->sg;
3150         struct scatterlist *s;
3151         unsigned int num_queued = req->num_queued_sgs;
3152         unsigned int i;
3153         int ret = 0;
3154
3155         for_each_sg(sg, s, num_queued, i) {
3156                 trb = &dep->trb_pool[dep->trb_dequeue];
3157
3158                 req->sg = sg_next(s);
3159                 req->num_queued_sgs--;
3160
3161                 ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req,
3162                                 trb, event, status, true);
3163                 if (ret)
3164                         break;
3165         }
3166
3167         return ret;
3168 }
3169
3170 static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep,
3171                 struct dwc3_request *req, const struct dwc3_event_depevt *event,
3172                 int status)
3173 {
3174         struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
3175
3176         return dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb,
3177                         event, status, false);
3178 }
3179
3180 static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req)
3181 {
3182         return req->num_pending_sgs == 0 && req->num_queued_sgs == 0;
3183 }
3184
3185 static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
3186                 const struct dwc3_event_depevt *event,
3187                 struct dwc3_request *req, int status)
3188 {
3189         int ret;
3190
3191         if (req->request.num_mapped_sgs)
3192                 ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event,
3193                                 status);
3194         else
3195                 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
3196                                 status);
3197
3198         req->request.actual = req->request.length - req->remaining;
3199
3200         if (!dwc3_gadget_ep_request_completed(req))
3201                 goto out;
3202
3203         if (req->needs_extra_trb) {
3204                 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
3205                                 status);
3206                 req->needs_extra_trb = false;
3207         }
3208
3209         dwc3_gadget_giveback(dep, req, status);
3210
3211 out:
3212         return ret;
3213 }
3214
3215 static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
3216                 const struct dwc3_event_depevt *event, int status)
3217 {
3218         struct dwc3_request     *req;
3219         struct dwc3_request     *tmp;
3220
3221         list_for_each_entry_safe(req, tmp, &dep->started_list, list) {
3222                 int ret;
3223
3224                 ret = dwc3_gadget_ep_cleanup_completed_request(dep, event,
3225                                 req, status);
3226                 if (ret)
3227                         break;
3228         }
3229 }
3230
3231 static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
3232 {
3233         struct dwc3_request     *req;
3234         struct dwc3             *dwc = dep->dwc;
3235
3236         if (!dep->endpoint.desc || !dwc->pullups_connected ||
3237             !dwc->connected)
3238                 return false;
3239
3240         if (!list_empty(&dep->pending_list))
3241                 return true;
3242
3243         /*
3244          * We only need to check the first entry of the started list. We can
3245          * assume the completed requests are removed from the started list.
3246          */
3247         req = next_request(&dep->started_list);
3248         if (!req)
3249                 return false;
3250
3251         return !dwc3_gadget_ep_request_completed(req);
3252 }
3253
3254 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
3255                 const struct dwc3_event_depevt *event)
3256 {
3257         dep->frame_number = event->parameters;
3258 }
3259
3260 static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
3261                 const struct dwc3_event_depevt *event, int status)
3262 {
3263         struct dwc3             *dwc = dep->dwc;
3264         bool                    no_started_trb = true;
3265
3266         if (!dep->endpoint.desc)
3267                 return no_started_trb;
3268
3269         dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
3270
3271         if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
3272                 goto out;
3273
3274         if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
3275                 list_empty(&dep->started_list) &&
3276                 (list_empty(&dep->pending_list) || status == -EXDEV))
3277                 dwc3_stop_active_transfer(dep, true, true);
3278         else if (dwc3_gadget_ep_should_continue(dep))
3279                 if (__dwc3_gadget_kick_transfer(dep) == 0)
3280                         no_started_trb = false;
3281
3282 out:
3283         /*
3284          * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
3285          * See dwc3_gadget_linksts_change_interrupt() for 1st half.
3286          */
3287         if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
3288                 u32             reg;
3289                 int             i;
3290
3291                 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
3292                         dep = dwc->eps[i];
3293
3294                         if (!(dep->flags & DWC3_EP_ENABLED))
3295                                 continue;
3296
3297                         if (!list_empty(&dep->started_list))
3298                                 return no_started_trb;
3299                 }
3300
3301                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3302                 reg |= dwc->u1u2;
3303                 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3304
3305                 dwc->u1u2 = 0;
3306         }
3307
3308         return no_started_trb;
3309 }
3310
3311 static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
3312                 const struct dwc3_event_depevt *event)
3313 {
3314         int status = 0;
3315
3316         if (!dep->endpoint.desc)
3317                 return;
3318
3319         if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
3320                 dwc3_gadget_endpoint_frame_from_event(dep, event);
3321
3322         if (event->status & DEPEVT_STATUS_BUSERR)
3323                 status = -ECONNRESET;
3324
3325         if (event->status & DEPEVT_STATUS_MISSED_ISOC)
3326                 status = -EXDEV;
3327
3328         dwc3_gadget_endpoint_trbs_complete(dep, event, status);
3329 }
3330
3331 static void dwc3_gadget_endpoint_transfer_complete(struct dwc3_ep *dep,
3332                 const struct dwc3_event_depevt *event)
3333 {
3334         int status = 0;
3335
3336         dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3337
3338         if (event->status & DEPEVT_STATUS_BUSERR)
3339                 status = -ECONNRESET;
3340
3341         if (dwc3_gadget_endpoint_trbs_complete(dep, event, status))
3342                 dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
3343 }
3344
3345 static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep,
3346                 const struct dwc3_event_depevt *event)
3347 {
3348         dwc3_gadget_endpoint_frame_from_event(dep, event);
3349
3350         /*
3351          * The XferNotReady event is generated only once before the endpoint
3352          * starts. It will be generated again when END_TRANSFER command is
3353          * issued. For some controller versions, the XferNotReady event may be
3354          * generated while the END_TRANSFER command is still in process. Ignore
3355          * it and wait for the next XferNotReady event after the command is
3356          * completed.
3357          */
3358         if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
3359                 return;
3360
3361         (void) __dwc3_gadget_start_isoc(dep);
3362 }
3363
3364 static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
3365                 const struct dwc3_event_depevt *event)
3366 {
3367         u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters);
3368
3369         if (cmd != DWC3_DEPCMD_ENDTRANSFER)
3370                 return;
3371
3372         /*
3373          * The END_TRANSFER command will cause the controller to generate a
3374          * NoStream Event, and it's not due to the host DP NoStream rejection.
3375          * Ignore the next NoStream event.
3376          */
3377         if (dep->stream_capable)
3378                 dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
3379
3380         dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
3381         dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3382         dwc3_gadget_ep_cleanup_cancelled_requests(dep);
3383
3384         if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) {
3385                 struct dwc3 *dwc = dep->dwc;
3386
3387                 dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL;
3388                 if (dwc3_send_clear_stall_ep_cmd(dep)) {
3389                         struct usb_ep *ep0 = &dwc->eps[0]->endpoint;
3390
3391                         dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name);
3392                         if (dwc->delayed_status)
3393                                 __dwc3_gadget_ep0_set_halt(ep0, 1);
3394                         return;
3395                 }
3396
3397                 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
3398                 if (dwc->delayed_status)
3399                         dwc3_ep0_send_delayed_status(dwc);
3400         }
3401
3402         if ((dep->flags & DWC3_EP_DELAY_START) &&
3403             !usb_endpoint_xfer_isoc(dep->endpoint.desc))
3404                 __dwc3_gadget_kick_transfer(dep);
3405
3406         dep->flags &= ~DWC3_EP_DELAY_START;
3407 }
3408
3409 static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
3410                 const struct dwc3_event_depevt *event)
3411 {
3412         struct dwc3 *dwc = dep->dwc;
3413
3414         if (event->status == DEPEVT_STREAMEVT_FOUND) {
3415                 dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
3416                 goto out;
3417         }
3418
3419         /* Note: NoStream rejection event param value is 0 and not 0xFFFF */
3420         switch (event->parameters) {
3421         case DEPEVT_STREAM_PRIME:
3422                 /*
3423                  * If the host can properly transition the endpoint state from
3424                  * idle to prime after a NoStream rejection, there's no need to
3425                  * force restarting the endpoint to reinitiate the stream. To
3426                  * simplify the check, assume the host follows the USB spec if
3427                  * it primed the endpoint more than once.
3428                  */
3429                 if (dep->flags & DWC3_EP_FORCE_RESTART_STREAM) {
3430                         if (dep->flags & DWC3_EP_FIRST_STREAM_PRIMED)
3431                                 dep->flags &= ~DWC3_EP_FORCE_RESTART_STREAM;
3432                         else
3433                                 dep->flags |= DWC3_EP_FIRST_STREAM_PRIMED;
3434                 }
3435
3436                 break;
3437         case DEPEVT_STREAM_NOSTREAM:
3438                 if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
3439                     !(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
3440                     !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE))
3441                         break;
3442
3443                 /*
3444                  * If the host rejects a stream due to no active stream, by the
3445                  * USB and xHCI spec, the endpoint will be put back to idle
3446                  * state. When the host is ready (buffer added/updated), it will
3447                  * prime the endpoint to inform the usb device controller. This
3448                  * triggers the device controller to issue ERDY to restart the
3449                  * stream. However, some hosts don't follow this and keep the
3450                  * endpoint in the idle state. No prime will come despite host
3451                  * streams are updated, and the device controller will not be
3452                  * triggered to generate ERDY to move the next stream data. To
3453                  * workaround this and maintain compatibility with various
3454                  * hosts, force to reinitate the stream until the host is ready
3455                  * instead of waiting for the host to prime the endpoint.
3456                  */
3457                 if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) {
3458                         unsigned int cmd = DWC3_DGCMD_SET_ENDPOINT_PRIME;
3459
3460                         dwc3_send_gadget_generic_command(dwc, cmd, dep->number);
3461                 } else {
3462                         dep->flags |= DWC3_EP_DELAY_START;
3463                         dwc3_stop_active_transfer(dep, true, true);
3464                         return;
3465                 }
3466                 break;
3467         }
3468
3469 out:
3470         dep->flags &= ~DWC3_EP_IGNORE_NEXT_NOSTREAM;
3471 }
3472
3473 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
3474                 const struct dwc3_event_depevt *event)
3475 {
3476         struct dwc3_ep          *dep;
3477         u8                      epnum = event->endpoint_number;
3478
3479         dep = dwc->eps[epnum];
3480
3481         if (!(dep->flags & DWC3_EP_ENABLED)) {
3482                 if (!(dep->flags & DWC3_EP_TRANSFER_STARTED))
3483                         return;
3484
3485                 /* Handle only EPCMDCMPLT when EP disabled */
3486                 if (event->endpoint_event != DWC3_DEPEVT_EPCMDCMPLT)
3487                         return;
3488         }
3489
3490         if (epnum == 0 || epnum == 1) {
3491                 dwc3_ep0_interrupt(dwc, event);
3492                 return;
3493         }
3494
3495         switch (event->endpoint_event) {
3496         case DWC3_DEPEVT_XFERINPROGRESS:
3497                 dwc3_gadget_endpoint_transfer_in_progress(dep, event);
3498                 break;
3499         case DWC3_DEPEVT_XFERNOTREADY:
3500                 dwc3_gadget_endpoint_transfer_not_ready(dep, event);
3501                 break;
3502         case DWC3_DEPEVT_EPCMDCMPLT:
3503                 dwc3_gadget_endpoint_command_complete(dep, event);
3504                 break;
3505         case DWC3_DEPEVT_XFERCOMPLETE:
3506                 dwc3_gadget_endpoint_transfer_complete(dep, event);
3507                 break;
3508         case DWC3_DEPEVT_STREAMEVT:
3509                 dwc3_gadget_endpoint_stream_event(dep, event);
3510                 break;
3511         case DWC3_DEPEVT_RXTXFIFOEVT:
3512                 break;
3513         }
3514 }
3515
3516 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
3517 {
3518         if (dwc->async_callbacks && dwc->gadget_driver->disconnect) {
3519                 spin_unlock(&dwc->lock);
3520                 dwc->gadget_driver->disconnect(dwc->gadget);
3521                 spin_lock(&dwc->lock);
3522         }
3523 }
3524
3525 static void dwc3_suspend_gadget(struct dwc3 *dwc)
3526 {
3527         if (dwc->async_callbacks && dwc->gadget_driver->suspend) {
3528                 spin_unlock(&dwc->lock);
3529                 dwc->gadget_driver->suspend(dwc->gadget);
3530                 spin_lock(&dwc->lock);
3531         }
3532 }
3533
3534 static void dwc3_resume_gadget(struct dwc3 *dwc)
3535 {
3536         if (dwc->async_callbacks && dwc->gadget_driver->resume) {
3537                 spin_unlock(&dwc->lock);
3538                 dwc->gadget_driver->resume(dwc->gadget);
3539                 spin_lock(&dwc->lock);
3540         }
3541 }
3542
3543 static void dwc3_reset_gadget(struct dwc3 *dwc)
3544 {
3545         if (!dwc->gadget_driver)
3546                 return;
3547
3548         if (dwc->async_callbacks && dwc->gadget->speed != USB_SPEED_UNKNOWN) {
3549                 spin_unlock(&dwc->lock);
3550                 usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver);
3551                 spin_lock(&dwc->lock);
3552         }
3553 }
3554
3555 static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
3556         bool interrupt)
3557 {
3558         struct dwc3_gadget_ep_cmd_params params;
3559         u32 cmd;
3560         int ret;
3561
3562         if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
3563             (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
3564                 return;
3565
3566         /*
3567          * NOTICE: We are violating what the Databook says about the
3568          * EndTransfer command. Ideally we would _always_ wait for the
3569          * EndTransfer Command Completion IRQ, but that's causing too
3570          * much trouble synchronizing between us and gadget driver.
3571          *
3572          * We have discussed this with the IP Provider and it was
3573          * suggested to giveback all requests here.
3574          *
3575          * Note also that a similar handling was tested by Synopsys
3576          * (thanks a lot Paul) and nothing bad has come out of it.
3577          * In short, what we're doing is issuing EndTransfer with
3578          * CMDIOC bit set and delay kicking transfer until the
3579          * EndTransfer command had completed.
3580          *
3581          * As of IP version 3.10a of the DWC_usb3 IP, the controller
3582          * supports a mode to work around the above limitation. The
3583          * software can poll the CMDACT bit in the DEPCMD register
3584          * after issuing a EndTransfer command. This mode is enabled
3585          * by writing GUCTL2[14]. This polling is already done in the
3586          * dwc3_send_gadget_ep_cmd() function so if the mode is
3587          * enabled, the EndTransfer command will have completed upon
3588          * returning from this function.
3589          *
3590          * This mode is NOT available on the DWC_usb31 IP.
3591          */
3592
3593         cmd = DWC3_DEPCMD_ENDTRANSFER;
3594         cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
3595         cmd |= interrupt ? DWC3_DEPCMD_CMDIOC : 0;
3596         cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3597         memset(&params, 0, sizeof(params));
3598         ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
3599         WARN_ON_ONCE(ret);
3600         dep->resource_index = 0;
3601
3602         if (!interrupt)
3603                 dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
3604         else
3605                 dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
3606 }
3607
3608 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
3609 {
3610         u32 epnum;
3611
3612         for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
3613                 struct dwc3_ep *dep;
3614                 int ret;
3615
3616                 dep = dwc->eps[epnum];
3617                 if (!dep)
3618                         continue;
3619
3620                 if (!(dep->flags & DWC3_EP_STALL))
3621                         continue;
3622
3623                 dep->flags &= ~DWC3_EP_STALL;
3624
3625                 ret = dwc3_send_clear_stall_ep_cmd(dep);
3626                 WARN_ON_ONCE(ret);
3627         }
3628 }
3629
3630 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
3631 {
3632         int                     reg;
3633
3634         dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
3635
3636         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3637         reg &= ~DWC3_DCTL_INITU1ENA;
3638         reg &= ~DWC3_DCTL_INITU2ENA;
3639         dwc3_gadget_dctl_write_safe(dwc, reg);
3640
3641         dwc3_disconnect_gadget(dwc);
3642
3643         dwc->gadget->speed = USB_SPEED_UNKNOWN;
3644         dwc->setup_packet_pending = false;
3645         usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
3646
3647         dwc->connected = false;
3648 }
3649
3650 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
3651 {
3652         u32                     reg;
3653
3654         /*
3655          * Ideally, dwc3_reset_gadget() would trigger the function
3656          * drivers to stop any active transfers through ep disable.
3657          * However, for functions which defer ep disable, such as mass
3658          * storage, we will need to rely on the call to stop active
3659          * transfers here, and avoid allowing of request queuing.
3660          */
3661         dwc->connected = false;
3662
3663         /*
3664          * WORKAROUND: DWC3 revisions <1.88a have an issue which
3665          * would cause a missing Disconnect Event if there's a
3666          * pending Setup Packet in the FIFO.
3667          *
3668          * There's no suggested workaround on the official Bug
3669          * report, which states that "unless the driver/application
3670          * is doing any special handling of a disconnect event,
3671          * there is no functional issue".
3672          *
3673          * Unfortunately, it turns out that we _do_ some special
3674          * handling of a disconnect event, namely complete all
3675          * pending transfers, notify gadget driver of the
3676          * disconnection, and so on.
3677          *
3678          * Our suggested workaround is to follow the Disconnect
3679          * Event steps here, instead, based on a setup_packet_pending
3680          * flag. Such flag gets set whenever we have a SETUP_PENDING
3681          * status for EP0 TRBs and gets cleared on XferComplete for the
3682          * same endpoint.
3683          *
3684          * Refers to:
3685          *
3686          * STAR#9000466709: RTL: Device : Disconnect event not
3687          * generated if setup packet pending in FIFO
3688          */
3689         if (DWC3_VER_IS_PRIOR(DWC3, 188A)) {
3690                 if (dwc->setup_packet_pending)
3691                         dwc3_gadget_disconnect_interrupt(dwc);
3692         }
3693
3694         dwc3_reset_gadget(dwc);
3695         /*
3696          * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
3697          * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW
3698          * needs to ensure that it sends "a DEPENDXFER command for any active
3699          * transfers."
3700          */
3701         dwc3_stop_active_transfers(dwc);
3702         dwc->connected = true;
3703
3704         reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3705         reg &= ~DWC3_DCTL_TSTCTRL_MASK;
3706         dwc3_gadget_dctl_write_safe(dwc, reg);
3707         dwc->test_mode = false;
3708         dwc3_clear_stall_all_ep(dwc);
3709
3710         /* Reset device address to zero */
3711         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3712         reg &= ~(DWC3_DCFG_DEVADDR_MASK);
3713         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3714 }
3715
3716 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
3717 {
3718         struct dwc3_ep          *dep;
3719         int                     ret;
3720         u32                     reg;
3721         u8                      lanes = 1;
3722         u8                      speed;
3723
3724         reg = dwc3_readl(dwc->regs, DWC3_DSTS);
3725         speed = reg & DWC3_DSTS_CONNECTSPD;
3726         dwc->speed = speed;
3727
3728         if (DWC3_IP_IS(DWC32))
3729                 lanes = DWC3_DSTS_CONNLANES(reg) + 1;
3730
3731         dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
3732
3733         /*
3734          * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
3735          * each time on Connect Done.
3736          *
3737          * Currently we always use the reset value. If any platform
3738          * wants to set this to a different value, we need to add a
3739          * setting and update GCTL.RAMCLKSEL here.
3740          */
3741
3742         switch (speed) {
3743         case DWC3_DSTS_SUPERSPEED_PLUS:
3744                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
3745                 dwc->gadget->ep0->maxpacket = 512;
3746                 dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
3747
3748                 if (lanes > 1)
3749                         dwc->gadget->ssp_rate = USB_SSP_GEN_2x2;
3750                 else
3751                         dwc->gadget->ssp_rate = USB_SSP_GEN_2x1;
3752                 break;
3753         case DWC3_DSTS_SUPERSPEED:
3754                 /*
3755                  * WORKAROUND: DWC3 revisions <1.90a have an issue which
3756                  * would cause a missing USB3 Reset event.
3757                  *
3758                  * In such situations, we should force a USB3 Reset
3759                  * event by calling our dwc3_gadget_reset_interrupt()
3760                  * routine.
3761                  *
3762                  * Refers to:
3763                  *
3764                  * STAR#9000483510: RTL: SS : USB3 reset event may
3765                  * not be generated always when the link enters poll
3766                  */
3767                 if (DWC3_VER_IS_PRIOR(DWC3, 190A))
3768                         dwc3_gadget_reset_interrupt(dwc);
3769
3770                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
3771                 dwc->gadget->ep0->maxpacket = 512;
3772                 dwc->gadget->speed = USB_SPEED_SUPER;
3773
3774                 if (lanes > 1) {
3775                         dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
3776                         dwc->gadget->ssp_rate = USB_SSP_GEN_1x2;
3777                 }
3778                 break;
3779         case DWC3_DSTS_HIGHSPEED:
3780                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
3781                 dwc->gadget->ep0->maxpacket = 64;
3782                 dwc->gadget->speed = USB_SPEED_HIGH;
3783                 break;
3784         case DWC3_DSTS_FULLSPEED:
3785                 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
3786                 dwc->gadget->ep0->maxpacket = 64;
3787                 dwc->gadget->speed = USB_SPEED_FULL;
3788                 break;
3789         }
3790
3791         dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket;
3792
3793         /* Enable USB2 LPM Capability */
3794
3795         if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A) &&
3796             !dwc->usb2_gadget_lpm_disable &&
3797             (speed != DWC3_DSTS_SUPERSPEED) &&
3798             (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
3799                 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3800                 reg |= DWC3_DCFG_LPM_CAP;
3801                 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3802
3803                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3804                 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
3805
3806                 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold |
3807                                             (dwc->is_utmi_l1_suspend << 4));
3808
3809                 /*
3810                  * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
3811                  * DCFG.LPMCap is set, core responses with an ACK and the
3812                  * BESL value in the LPM token is less than or equal to LPM
3813                  * NYET threshold.
3814                  */
3815                 WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum,
3816                                 "LPM Erratum not available on dwc3 revisions < 2.40a\n");
3817
3818                 if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A))
3819                         reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold);
3820
3821                 dwc3_gadget_dctl_write_safe(dwc, reg);
3822         } else {
3823                 if (dwc->usb2_gadget_lpm_disable) {
3824                         reg = dwc3_readl(dwc->regs, DWC3_DCFG);
3825                         reg &= ~DWC3_DCFG_LPM_CAP;
3826                         dwc3_writel(dwc->regs, DWC3_DCFG, reg);
3827                 }
3828
3829                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3830                 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
3831                 dwc3_gadget_dctl_write_safe(dwc, reg);
3832         }
3833
3834         dep = dwc->eps[0];
3835         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
3836         if (ret) {
3837                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3838                 return;
3839         }
3840
3841         dep = dwc->eps[1];
3842         ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_MODIFY);
3843         if (ret) {
3844                 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
3845                 return;
3846         }
3847
3848         /*
3849          * Configure PHY via GUSB3PIPECTLn if required.
3850          *
3851          * Update GTXFIFOSIZn
3852          *
3853          * In both cases reset values should be sufficient.
3854          */
3855 }
3856
3857 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
3858 {
3859         /*
3860          * TODO take core out of low power mode when that's
3861          * implemented.
3862          */
3863
3864         if (dwc->async_callbacks && dwc->gadget_driver->resume) {
3865                 spin_unlock(&dwc->lock);
3866                 dwc->gadget_driver->resume(dwc->gadget);
3867                 spin_lock(&dwc->lock);
3868         }
3869 }
3870
3871 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
3872                 unsigned int evtinfo)
3873 {
3874         enum dwc3_link_state    next = evtinfo & DWC3_LINK_STATE_MASK;
3875         unsigned int            pwropt;
3876
3877         /*
3878          * WORKAROUND: DWC3 < 2.50a have an issue when configured without
3879          * Hibernation mode enabled which would show up when device detects
3880          * host-initiated U3 exit.
3881          *
3882          * In that case, device will generate a Link State Change Interrupt
3883          * from U3 to RESUME which is only necessary if Hibernation is
3884          * configured in.
3885          *
3886          * There are no functional changes due to such spurious event and we
3887          * just need to ignore it.
3888          *
3889          * Refers to:
3890          *
3891          * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
3892          * operational mode
3893          */
3894         pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
3895         if (DWC3_VER_IS_PRIOR(DWC3, 250A) &&
3896                         (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
3897                 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
3898                                 (next == DWC3_LINK_STATE_RESUME)) {
3899                         return;
3900                 }
3901         }
3902
3903         /*
3904          * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
3905          * on the link partner, the USB session might do multiple entry/exit
3906          * of low power states before a transfer takes place.
3907          *
3908          * Due to this problem, we might experience lower throughput. The
3909          * suggested workaround is to disable DCTL[12:9] bits if we're
3910          * transitioning from U1/U2 to U0 and enable those bits again
3911          * after a transfer completes and there are no pending transfers
3912          * on any of the enabled endpoints.
3913          *
3914          * This is the first half of that workaround.
3915          *
3916          * Refers to:
3917          *
3918          * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
3919          * core send LGO_Ux entering U0
3920          */
3921         if (DWC3_VER_IS_PRIOR(DWC3, 183A)) {
3922                 if (next == DWC3_LINK_STATE_U0) {
3923                         u32     u1u2;
3924                         u32     reg;
3925
3926                         switch (dwc->link_state) {
3927                         case DWC3_LINK_STATE_U1:
3928                         case DWC3_LINK_STATE_U2:
3929                                 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
3930                                 u1u2 = reg & (DWC3_DCTL_INITU2ENA
3931                                                 | DWC3_DCTL_ACCEPTU2ENA
3932                                                 | DWC3_DCTL_INITU1ENA
3933                                                 | DWC3_DCTL_ACCEPTU1ENA);
3934
3935                                 if (!dwc->u1u2)
3936                                         dwc->u1u2 = reg & u1u2;
3937
3938                                 reg &= ~u1u2;
3939
3940                                 dwc3_gadget_dctl_write_safe(dwc, reg);
3941                                 break;
3942                         default:
3943                                 /* do nothing */
3944                                 break;
3945                         }
3946                 }
3947         }
3948
3949         switch (next) {
3950         case DWC3_LINK_STATE_U1:
3951                 if (dwc->speed == USB_SPEED_SUPER)
3952                         dwc3_suspend_gadget(dwc);
3953                 break;
3954         case DWC3_LINK_STATE_U2:
3955         case DWC3_LINK_STATE_U3:
3956                 dwc3_suspend_gadget(dwc);
3957                 break;
3958         case DWC3_LINK_STATE_RESUME:
3959                 dwc3_resume_gadget(dwc);
3960                 break;
3961         default:
3962                 /* do nothing */
3963                 break;
3964         }
3965
3966         dwc->link_state = next;
3967 }
3968
3969 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
3970                                           unsigned int evtinfo)
3971 {
3972         enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
3973
3974         if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
3975                 dwc3_suspend_gadget(dwc);
3976
3977         dwc->link_state = next;
3978 }
3979
3980 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
3981                 unsigned int evtinfo)
3982 {
3983         unsigned int is_ss = evtinfo & BIT(4);
3984
3985         /*
3986          * WORKAROUND: DWC3 revison 2.20a with hibernation support
3987          * have a known issue which can cause USB CV TD.9.23 to fail
3988          * randomly.
3989          *
3990          * Because of this issue, core could generate bogus hibernation
3991          * events which SW needs to ignore.
3992          *
3993          * Refers to:
3994          *
3995          * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
3996          * Device Fallback from SuperSpeed
3997          */
3998         if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
3999                 return;
4000
4001         /* enter hibernation here */
4002 }
4003
4004 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
4005                 const struct dwc3_event_devt *event)
4006 {
4007         switch (event->type) {
4008         case DWC3_DEVICE_EVENT_DISCONNECT:
4009                 dwc3_gadget_disconnect_interrupt(dwc);
4010                 break;
4011         case DWC3_DEVICE_EVENT_RESET:
4012                 dwc3_gadget_reset_interrupt(dwc);
4013                 break;
4014         case DWC3_DEVICE_EVENT_CONNECT_DONE:
4015                 dwc3_gadget_conndone_interrupt(dwc);
4016                 break;
4017         case DWC3_DEVICE_EVENT_WAKEUP:
4018                 dwc3_gadget_wakeup_interrupt(dwc);
4019                 break;
4020         case DWC3_DEVICE_EVENT_HIBER_REQ:
4021                 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
4022                                         "unexpected hibernation event\n"))
4023                         break;
4024
4025                 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
4026                 break;
4027         case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
4028                 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
4029                 break;
4030         case DWC3_DEVICE_EVENT_SUSPEND:
4031                 /* It changed to be suspend event for version 2.30a and above */
4032                 if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
4033                         /*
4034                          * Ignore suspend event until the gadget enters into
4035                          * USB_STATE_CONFIGURED state.
4036                          */
4037                         if (dwc->gadget->state >= USB_STATE_CONFIGURED)
4038                                 dwc3_gadget_suspend_interrupt(dwc,
4039                                                 event->event_info);
4040                 }
4041                 break;
4042         case DWC3_DEVICE_EVENT_SOF:
4043         case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
4044         case DWC3_DEVICE_EVENT_CMD_CMPL:
4045         case DWC3_DEVICE_EVENT_OVERFLOW:
4046                 break;
4047         default:
4048                 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
4049         }
4050 }
4051
4052 static void dwc3_process_event_entry(struct dwc3 *dwc,
4053                 const union dwc3_event *event)
4054 {
4055         trace_dwc3_event(event->raw, dwc);
4056
4057         if (!event->type.is_devspec)
4058                 dwc3_endpoint_interrupt(dwc, &event->depevt);
4059         else if (event->type.type == DWC3_EVENT_TYPE_DEV)
4060                 dwc3_gadget_interrupt(dwc, &event->devt);
4061         else
4062                 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
4063 }
4064
4065 static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
4066 {
4067         struct dwc3 *dwc = evt->dwc;
4068         irqreturn_t ret = IRQ_NONE;
4069         int left;
4070         u32 reg;
4071
4072         left = evt->count;
4073
4074         if (!(evt->flags & DWC3_EVENT_PENDING))
4075                 return IRQ_NONE;
4076
4077         while (left > 0) {
4078                 union dwc3_event event;
4079
4080                 event.raw = *(u32 *) (evt->cache + evt->lpos);
4081
4082                 dwc3_process_event_entry(dwc, &event);
4083
4084                 /*
4085                  * FIXME we wrap around correctly to the next entry as
4086                  * almost all entries are 4 bytes in size. There is one
4087                  * entry which has 12 bytes which is a regular entry
4088                  * followed by 8 bytes data. ATM I don't know how
4089                  * things are organized if we get next to the a
4090                  * boundary so I worry about that once we try to handle
4091                  * that.
4092                  */
4093                 evt->lpos = (evt->lpos + 4) % evt->length;
4094                 left -= 4;
4095         }
4096
4097         evt->count = 0;
4098         evt->flags &= ~DWC3_EVENT_PENDING;
4099         ret = IRQ_HANDLED;
4100
4101         /* Unmask interrupt */
4102         reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
4103         reg &= ~DWC3_GEVNTSIZ_INTMASK;
4104         dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
4105
4106         if (dwc->imod_interval) {
4107                 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
4108                 dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
4109         }
4110
4111         return ret;
4112 }
4113
4114 static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
4115 {
4116         struct dwc3_event_buffer *evt = _evt;
4117         struct dwc3 *dwc = evt->dwc;
4118         unsigned long flags;
4119         irqreturn_t ret = IRQ_NONE;
4120
4121         spin_lock_irqsave(&dwc->lock, flags);
4122         ret = dwc3_process_event_buf(evt);
4123         spin_unlock_irqrestore(&dwc->lock, flags);
4124
4125         return ret;
4126 }
4127
4128 static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
4129 {
4130         struct dwc3 *dwc = evt->dwc;
4131         u32 amount;
4132         u32 count;
4133         u32 reg;
4134
4135         if (pm_runtime_suspended(dwc->dev)) {
4136                 pm_runtime_get(dwc->dev);
4137                 disable_irq_nosync(dwc->irq_gadget);
4138                 dwc->pending_events = true;
4139                 return IRQ_HANDLED;
4140         }
4141
4142         /*
4143          * With PCIe legacy interrupt, test shows that top-half irq handler can
4144          * be called again after HW interrupt deassertion. Check if bottom-half
4145          * irq event handler completes before caching new event to prevent
4146          * losing events.
4147          */
4148         if (evt->flags & DWC3_EVENT_PENDING)
4149                 return IRQ_HANDLED;
4150
4151         count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
4152         count &= DWC3_GEVNTCOUNT_MASK;
4153         if (!count)
4154                 return IRQ_NONE;
4155
4156         evt->count = count;
4157         evt->flags |= DWC3_EVENT_PENDING;
4158
4159         /* Mask interrupt */
4160         reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
4161         reg |= DWC3_GEVNTSIZ_INTMASK;
4162         dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
4163
4164         amount = min(count, evt->length - evt->lpos);
4165         memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
4166
4167         if (amount < count)
4168                 memcpy(evt->cache, evt->buf, count - amount);
4169
4170         dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
4171
4172         return IRQ_WAKE_THREAD;
4173 }
4174
4175 static irqreturn_t dwc3_interrupt(int irq, void *_evt)
4176 {
4177         struct dwc3_event_buffer        *evt = _evt;
4178
4179         return dwc3_check_event_buf(evt);
4180 }
4181
4182 static int dwc3_gadget_get_irq(struct dwc3 *dwc)
4183 {
4184         struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
4185         int irq;
4186
4187         irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
4188         if (irq > 0)
4189                 goto out;
4190
4191         if (irq == -EPROBE_DEFER)
4192                 goto out;
4193
4194         irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
4195         if (irq > 0)
4196                 goto out;
4197
4198         if (irq == -EPROBE_DEFER)
4199                 goto out;
4200
4201         irq = platform_get_irq(dwc3_pdev, 0);
4202         if (irq > 0)
4203                 goto out;
4204
4205         if (!irq)
4206                 irq = -EINVAL;
4207
4208 out:
4209         return irq;
4210 }
4211
4212 static void dwc_gadget_release(struct device *dev)
4213 {
4214         struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev);
4215
4216         kfree(gadget);
4217 }
4218
4219 /**
4220  * dwc3_gadget_init - initializes gadget related registers
4221  * @dwc: pointer to our controller context structure
4222  *
4223  * Returns 0 on success otherwise negative errno.
4224  */
4225 int dwc3_gadget_init(struct dwc3 *dwc)
4226 {
4227         int ret;
4228         int irq;
4229         struct device *dev;
4230
4231         irq = dwc3_gadget_get_irq(dwc);
4232         if (irq < 0) {
4233                 ret = irq;
4234                 goto err0;
4235         }
4236
4237         dwc->irq_gadget = irq;
4238
4239         dwc->ep0_trb = dma_alloc_coherent(dwc->sysdev,
4240                                           sizeof(*dwc->ep0_trb) * 2,
4241                                           &dwc->ep0_trb_addr, GFP_KERNEL);
4242         if (!dwc->ep0_trb) {
4243                 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
4244                 ret = -ENOMEM;
4245                 goto err0;
4246         }
4247
4248         dwc->setup_buf = kzalloc(DWC3_EP0_SETUP_SIZE, GFP_KERNEL);
4249         if (!dwc->setup_buf) {
4250                 ret = -ENOMEM;
4251                 goto err1;
4252         }
4253
4254         dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE,
4255                         &dwc->bounce_addr, GFP_KERNEL);
4256         if (!dwc->bounce) {
4257                 ret = -ENOMEM;
4258                 goto err2;
4259         }
4260
4261         init_completion(&dwc->ep0_in_setup);
4262         dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL);
4263         if (!dwc->gadget) {
4264                 ret = -ENOMEM;
4265                 goto err3;
4266         }
4267
4268
4269         usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release);
4270         dev                             = &dwc->gadget->dev;
4271         dev->platform_data              = dwc;
4272         dwc->gadget->ops                = &dwc3_gadget_ops;
4273         dwc->gadget->speed              = USB_SPEED_UNKNOWN;
4274         dwc->gadget->ssp_rate           = USB_SSP_GEN_UNKNOWN;
4275         dwc->gadget->sg_supported       = true;
4276         dwc->gadget->name               = "dwc3-gadget";
4277         dwc->gadget->lpm_capable        = !dwc->usb2_gadget_lpm_disable;
4278
4279         /*
4280          * FIXME We might be setting max_speed to <SUPER, however versions
4281          * <2.20a of dwc3 have an issue with metastability (documented
4282          * elsewhere in this driver) which tells us we can't set max speed to
4283          * anything lower than SUPER.
4284          *
4285          * Because gadget.max_speed is only used by composite.c and function
4286          * drivers (i.e. it won't go into dwc3's registers) we are allowing this
4287          * to happen so we avoid sending SuperSpeed Capability descriptor
4288          * together with our BOS descriptor as that could confuse host into
4289          * thinking we can handle super speed.
4290          *
4291          * Note that, in fact, we won't even support GetBOS requests when speed
4292          * is less than super speed because we don't have means, yet, to tell
4293          * composite.c that we are USB 2.0 + LPM ECN.
4294          */
4295         if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
4296             !dwc->dis_metastability_quirk)
4297                 dev_info(dwc->dev, "changing max_speed on rev %08x\n",
4298                                 dwc->revision);
4299
4300         dwc->gadget->max_speed          = dwc->maximum_speed;
4301         dwc->gadget->max_ssp_rate       = dwc->max_ssp_rate;
4302
4303         /*
4304          * REVISIT: Here we should clear all pending IRQs to be
4305          * sure we're starting from a well known location.
4306          */
4307
4308         ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps);
4309         if (ret)
4310                 goto err4;
4311
4312         ret = usb_add_gadget(dwc->gadget);
4313         if (ret) {
4314                 dev_err(dwc->dev, "failed to add gadget\n");
4315                 goto err5;
4316         }
4317
4318         if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS)
4319                 dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);
4320         else
4321                 dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
4322
4323         return 0;
4324
4325 err5:
4326         dwc3_gadget_free_endpoints(dwc);
4327 err4:
4328         usb_put_gadget(dwc->gadget);
4329         dwc->gadget = NULL;
4330 err3:
4331         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
4332                         dwc->bounce_addr);
4333
4334 err2:
4335         kfree(dwc->setup_buf);
4336
4337 err1:
4338         dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
4339                         dwc->ep0_trb, dwc->ep0_trb_addr);
4340
4341 err0:
4342         return ret;
4343 }
4344
4345 /* -------------------------------------------------------------------------- */
4346
4347 void dwc3_gadget_exit(struct dwc3 *dwc)
4348 {
4349         if (!dwc->gadget)
4350                 return;
4351
4352         usb_del_gadget(dwc->gadget);
4353         dwc3_gadget_free_endpoints(dwc);
4354         usb_put_gadget(dwc->gadget);
4355         dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
4356                           dwc->bounce_addr);
4357         kfree(dwc->setup_buf);
4358         dma_free_coherent(dwc->sysdev, sizeof(*dwc->ep0_trb) * 2,
4359                           dwc->ep0_trb, dwc->ep0_trb_addr);
4360 }
4361
4362 int dwc3_gadget_suspend(struct dwc3 *dwc)
4363 {
4364         if (!dwc->gadget_driver)
4365                 return 0;
4366
4367         dwc3_gadget_run_stop(dwc, false, false);
4368         dwc3_disconnect_gadget(dwc);
4369         __dwc3_gadget_stop(dwc);
4370
4371         return 0;
4372 }
4373
4374 int dwc3_gadget_resume(struct dwc3 *dwc)
4375 {
4376         int                     ret;
4377
4378         if (!dwc->gadget_driver || !dwc->softconnect)
4379                 return 0;
4380
4381         ret = __dwc3_gadget_start(dwc);
4382         if (ret < 0)
4383                 goto err0;
4384
4385         ret = dwc3_gadget_run_stop(dwc, true, false);
4386         if (ret < 0)
4387                 goto err1;
4388
4389         return 0;
4390
4391 err1:
4392         __dwc3_gadget_stop(dwc);
4393
4394 err0:
4395         return ret;
4396 }
4397
4398 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
4399 {
4400         if (dwc->pending_events) {
4401                 dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
4402                 dwc->pending_events = false;
4403                 enable_irq(dwc->irq_gadget);
4404         }
4405 }