Merge tag 'usb-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[linux-2.6-microblaze.git] / drivers / usb / gadget / udc / aspeed-vhub / ep0.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * aspeed-vhub -- Driver for Aspeed SoC "vHub" USB gadget
4  *
5  * ep0.c - Endpoint 0 handling
6  *
7  * Copyright 2017 IBM Corporation
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/delay.h>
14 #include <linux/ioport.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/list.h>
18 #include <linux/interrupt.h>
19 #include <linux/proc_fs.h>
20 #include <linux/prefetch.h>
21 #include <linux/clk.h>
22 #include <linux/usb/gadget.h>
23 #include <linux/of.h>
24 #include <linux/of_gpio.h>
25 #include <linux/regmap.h>
26 #include <linux/dma-mapping.h>
27
28 #include "vhub.h"
29
30 int ast_vhub_reply(struct ast_vhub_ep *ep, char *ptr, int len)
31 {
32         struct usb_request *req = &ep->ep0.req.req;
33         int rc;
34
35         if (WARN_ON(ep->d_idx != 0))
36                 return std_req_stall;
37         if (WARN_ON(!ep->ep0.dir_in))
38                 return std_req_stall;
39         if (WARN_ON(len > AST_VHUB_EP0_MAX_PACKET))
40                 return std_req_stall;
41         if (WARN_ON(req->status == -EINPROGRESS))
42                 return std_req_stall;
43
44         req->buf = ptr;
45         req->length = len;
46         req->complete = NULL;
47         req->zero = true;
48
49         /*
50          * Call internal queue directly after dropping the lock. This is
51          * safe to do as the reply is always the last thing done when
52          * processing a SETUP packet, usually as a tail call
53          */
54         spin_unlock(&ep->vhub->lock);
55         if (ep->ep.ops->queue(&ep->ep, req, GFP_ATOMIC))
56                 rc = std_req_stall;
57         else
58                 rc = std_req_data;
59         spin_lock(&ep->vhub->lock);
60         return rc;
61 }
62
63 int __ast_vhub_simple_reply(struct ast_vhub_ep *ep, int len, ...)
64 {
65         u8 *buffer = ep->buf;
66         unsigned int i;
67         va_list args;
68
69         va_start(args, len);
70
71         /* Copy data directly into EP buffer */
72         for (i = 0; i < len; i++)
73                 buffer[i] = va_arg(args, int);
74         va_end(args);
75
76         /* req->buf NULL means data is already there */
77         return ast_vhub_reply(ep, NULL, len);
78 }
79
80 void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
81 {
82         struct usb_ctrlrequest crq;
83         enum std_req_rc std_req_rc;
84         int rc = -ENODEV;
85
86         if (WARN_ON(ep->d_idx != 0))
87                 return;
88
89         /*
90          * Grab the setup packet from the chip and byteswap
91          * interesting fields
92          */
93         memcpy_fromio(&crq, ep->ep0.setup, sizeof(crq));
94
95         EPDBG(ep, "SETUP packet %02x/%02x/%04x/%04x/%04x [%s] st=%d\n",
96               crq.bRequestType, crq.bRequest,
97                le16_to_cpu(crq.wValue),
98                le16_to_cpu(crq.wIndex),
99                le16_to_cpu(crq.wLength),
100                (crq.bRequestType & USB_DIR_IN) ? "in" : "out",
101                ep->ep0.state);
102
103         /*
104          * Check our state, cancel pending requests if needed
105          *
106          * Note: Under some circumstances, we can get a new setup
107          * packet while waiting for the stall ack, just accept it.
108          *
109          * In any case, a SETUP packet in wrong state should have
110          * reset the HW state machine, so let's just log, nuke
111          * requests, move on.
112          */
113         if (ep->ep0.state != ep0_state_token &&
114             ep->ep0.state != ep0_state_stall) {
115                 EPDBG(ep, "wrong state\n");
116                 ast_vhub_nuke(ep, -EIO);
117         }
118
119         /* Calculate next state for EP0 */
120         ep->ep0.state = ep0_state_data;
121         ep->ep0.dir_in = !!(crq.bRequestType & USB_DIR_IN);
122
123         /* If this is the vHub, we handle requests differently */
124         std_req_rc = std_req_driver;
125         if (ep->dev == NULL) {
126                 if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
127                         std_req_rc = ast_vhub_std_hub_request(ep, &crq);
128                 else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
129                         std_req_rc = ast_vhub_class_hub_request(ep, &crq);
130                 else
131                         std_req_rc = std_req_stall;
132         } else if ((crq.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
133                 std_req_rc = ast_vhub_std_dev_request(ep, &crq);
134
135         /* Act upon result */
136         switch(std_req_rc) {
137         case std_req_complete:
138                 goto complete;
139         case std_req_stall:
140                 goto stall;
141         case std_req_driver:
142                 break;
143         case std_req_data:
144                 return;
145         }
146
147         /* Pass request up to the gadget driver */
148         if (WARN_ON(!ep->dev))
149                 goto stall;
150         if (ep->dev->driver) {
151                 EPDBG(ep, "forwarding to gadget...\n");
152                 spin_unlock(&ep->vhub->lock);
153                 rc = ep->dev->driver->setup(&ep->dev->gadget, &crq);
154                 spin_lock(&ep->vhub->lock);
155                 EPDBG(ep, "driver returned %d\n", rc);
156         } else {
157                 EPDBG(ep, "no gadget for request !\n");
158         }
159         if (rc >= 0)
160                 return;
161
162  stall:
163         EPDBG(ep, "stalling\n");
164         writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
165         ep->ep0.state = ep0_state_stall;
166         ep->ep0.dir_in = false;
167         return;
168
169  complete:
170         EPVDBG(ep, "sending [in] status with no data\n");
171         writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
172         ep->ep0.state = ep0_state_status;
173         ep->ep0.dir_in = false;
174 }
175
176
177 static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
178                                  struct ast_vhub_req *req)
179 {
180         unsigned int chunk;
181         u32 reg;
182
183         /* If this is a 0-length request, it's the gadget trying to
184          * send a status on our behalf. We take it from here.
185          */
186         if (req->req.length == 0)
187                 req->last_desc = 1;
188
189         /* Are we done ? Complete request, otherwise wait for next interrupt */
190         if (req->last_desc >= 0) {
191                 EPVDBG(ep, "complete send %d/%d\n",
192                        req->req.actual, req->req.length);
193                 ep->ep0.state = ep0_state_status;
194                 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
195                 ast_vhub_done(ep, req, 0);
196                 return;
197         }
198
199         /*
200          * Next chunk cropped to max packet size. Also check if this
201          * is the last packet
202          */
203         chunk = req->req.length - req->req.actual;
204         if (chunk > ep->ep.maxpacket)
205                 chunk = ep->ep.maxpacket;
206         else if ((chunk < ep->ep.maxpacket) || !req->req.zero)
207                 req->last_desc = 1;
208
209         EPVDBG(ep, "send chunk=%d last=%d, req->act=%d mp=%d\n",
210                chunk, req->last_desc, req->req.actual, ep->ep.maxpacket);
211
212         /*
213          * Copy data if any (internal requests already have data
214          * in the EP buffer)
215          */
216         if (chunk && req->req.buf)
217                 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
218
219         vhub_dma_workaround(ep->buf);
220
221         /* Remember chunk size and trigger send */
222         reg = VHUB_EP0_SET_TX_LEN(chunk);
223         writel(reg, ep->ep0.ctlstat);
224         writel(reg | VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
225         req->req.actual += chunk;
226 }
227
228 static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
229 {
230         EPVDBG(ep, "rx prime\n");
231
232         /* Prime endpoint for receiving data */
233         writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
234 }
235
236 static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
237                                     unsigned int len)
238 {
239         unsigned int remain;
240         int rc = 0;
241
242         /* We are receiving... grab request */
243         remain = req->req.length - req->req.actual;
244
245         EPVDBG(ep, "receive got=%d remain=%d\n", len, remain);
246
247         /* Are we getting more than asked ? */
248         if (len > remain) {
249                 EPDBG(ep, "receiving too much (ovf: %d) !\n",
250                       len - remain);
251                 len = remain;
252                 rc = -EOVERFLOW;
253         }
254         if (len && req->req.buf)
255                 memcpy(req->req.buf + req->req.actual, ep->buf, len);
256         req->req.actual += len;
257
258         /* Done ? */
259         if (len < ep->ep.maxpacket || len == remain) {
260                 ep->ep0.state = ep0_state_status;
261                 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
262                 ast_vhub_done(ep, req, rc);
263         } else
264                 ast_vhub_ep0_rx_prime(ep);
265 }
266
267 void ast_vhub_ep0_handle_ack(struct ast_vhub_ep *ep, bool in_ack)
268 {
269         struct ast_vhub_req *req;
270         struct ast_vhub *vhub = ep->vhub;
271         struct device *dev = &vhub->pdev->dev;
272         bool stall = false;
273         u32 stat;
274
275         /* Read EP0 status */
276         stat = readl(ep->ep0.ctlstat);
277
278         /* Grab current request if any */
279         req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
280
281         EPVDBG(ep, "ACK status=%08x,state=%d is_in=%d in_ack=%d req=%p\n",
282                 stat, ep->ep0.state, ep->ep0.dir_in, in_ack, req);
283
284         switch(ep->ep0.state) {
285         case ep0_state_token:
286                 /* There should be no request queued in that state... */
287                 if (req) {
288                         dev_warn(dev, "request present while in TOKEN state\n");
289                         ast_vhub_nuke(ep, -EINVAL);
290                 }
291                 dev_warn(dev, "ack while in TOKEN state\n");
292                 stall = true;
293                 break;
294         case ep0_state_data:
295                 /* Check the state bits corresponding to our direction */
296                 if ((ep->ep0.dir_in && (stat & VHUB_EP0_TX_BUFF_RDY)) ||
297                     (!ep->ep0.dir_in && (stat & VHUB_EP0_RX_BUFF_RDY)) ||
298                     (ep->ep0.dir_in != in_ack)) {
299                         /* In that case, ignore interrupt */
300                         dev_warn(dev, "irq state mismatch");
301                         break;
302                 }
303                 /*
304                  * We are in data phase and there's no request, something is
305                  * wrong, stall
306                  */
307                 if (!req) {
308                         dev_warn(dev, "data phase, no request\n");
309                         stall = true;
310                         break;
311                 }
312
313                 /* We have a request, handle data transfers */
314                 if (ep->ep0.dir_in)
315                         ast_vhub_ep0_do_send(ep, req);
316                 else
317                         ast_vhub_ep0_do_receive(ep, req, VHUB_EP0_RX_LEN(stat));
318                 return;
319         case ep0_state_status:
320                 /* Nuke stale requests */
321                 if (req) {
322                         dev_warn(dev, "request present while in STATUS state\n");
323                         ast_vhub_nuke(ep, -EINVAL);
324                 }
325
326                 /*
327                  * If the status phase completes with the wrong ack, stall
328                  * the endpoint just in case, to abort whatever the host
329                  * was doing.
330                  */
331                 if (ep->ep0.dir_in == in_ack) {
332                         dev_warn(dev, "status direction mismatch\n");
333                         stall = true;
334                 }
335                 break;
336         case ep0_state_stall:
337                 /*
338                  * There shouldn't be any request left, but nuke just in case
339                  * otherwise the stale request will block subsequent ones
340                  */
341                 ast_vhub_nuke(ep, -EIO);
342                 break;
343         }
344
345         /* Reset to token state or stall */
346         if (stall) {
347                 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
348                 ep->ep0.state = ep0_state_stall;
349         } else
350                 ep->ep0.state = ep0_state_token;
351 }
352
353 static int ast_vhub_ep0_queue(struct usb_ep* u_ep, struct usb_request *u_req,
354                               gfp_t gfp_flags)
355 {
356         struct ast_vhub_req *req = to_ast_req(u_req);
357         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
358         struct ast_vhub *vhub = ep->vhub;
359         struct device *dev = &vhub->pdev->dev;
360         unsigned long flags;
361
362         /* Paranoid cheks */
363         if (!u_req || (!u_req->complete && !req->internal)) {
364                 dev_warn(dev, "Bogus EP0 request ! u_req=%p\n", u_req);
365                 if (u_req) {
366                         dev_warn(dev, "complete=%p internal=%d\n",
367                                  u_req->complete, req->internal);
368                 }
369                 return -EINVAL;
370         }
371
372         /* Not endpoint 0 ? */
373         if (WARN_ON(ep->d_idx != 0))
374                 return -EINVAL;
375
376         /* Disabled device */
377         if (ep->dev && !ep->dev->enabled)
378                 return -ESHUTDOWN;
379
380         /* Data, no buffer and not internal ? */
381         if (u_req->length && !u_req->buf && !req->internal) {
382                 dev_warn(dev, "Request with no buffer !\n");
383                 return -EINVAL;
384         }
385
386         EPVDBG(ep, "enqueue req @%p\n", req);
387         EPVDBG(ep, "  l=%d zero=%d noshort=%d is_in=%d\n",
388                u_req->length, u_req->zero,
389                u_req->short_not_ok, ep->ep0.dir_in);
390
391         /* Initialize request progress fields */
392         u_req->status = -EINPROGRESS;
393         u_req->actual = 0;
394         req->last_desc = -1;
395         req->active = false;
396
397         spin_lock_irqsave(&vhub->lock, flags);
398
399         /* EP0 can only support a single request at a time */
400         if (!list_empty(&ep->queue) ||
401             ep->ep0.state == ep0_state_token ||
402             ep->ep0.state == ep0_state_stall) {
403                 dev_warn(dev, "EP0: Request in wrong state\n");
404                 EPVDBG(ep, "EP0: list_empty=%d state=%d\n",
405                        list_empty(&ep->queue), ep->ep0.state);
406                 spin_unlock_irqrestore(&vhub->lock, flags);
407                 return -EBUSY;
408         }
409
410         /* Add request to list and kick processing if empty */
411         list_add_tail(&req->queue, &ep->queue);
412
413         if (ep->ep0.dir_in) {
414                 /* IN request, send data */
415                 ast_vhub_ep0_do_send(ep, req);
416         } else if (u_req->length == 0) {
417                 /* 0-len request, send completion as rx */
418                 EPVDBG(ep, "0-length rx completion\n");
419                 ep->ep0.state = ep0_state_status;
420                 writel(VHUB_EP0_TX_BUFF_RDY, ep->ep0.ctlstat);
421                 ast_vhub_done(ep, req, 0);
422         } else {
423                 /* OUT request, start receiver */
424                 ast_vhub_ep0_rx_prime(ep);
425         }
426
427         spin_unlock_irqrestore(&vhub->lock, flags);
428
429         return 0;
430 }
431
432 static int ast_vhub_ep0_dequeue(struct usb_ep* u_ep, struct usb_request *u_req)
433 {
434         struct ast_vhub_ep *ep = to_ast_ep(u_ep);
435         struct ast_vhub *vhub = ep->vhub;
436         struct ast_vhub_req *req;
437         unsigned long flags;
438         int rc = -EINVAL;
439
440         spin_lock_irqsave(&vhub->lock, flags);
441
442         /* Only one request can be in the queue */
443         req = list_first_entry_or_null(&ep->queue, struct ast_vhub_req, queue);
444
445         /* Is it ours ? */
446         if (req && u_req == &req->req) {
447                 EPVDBG(ep, "dequeue req @%p\n", req);
448
449                 /*
450                  * We don't have to deal with "active" as all
451                  * DMAs go to the EP buffers, not the request.
452                  */
453                 ast_vhub_done(ep, req, -ECONNRESET);
454
455                 /* We do stall the EP to clean things up in HW */
456                 writel(VHUB_EP0_CTRL_STALL, ep->ep0.ctlstat);
457                 ep->ep0.state = ep0_state_status;
458                 ep->ep0.dir_in = false;
459                 rc = 0;
460         }
461         spin_unlock_irqrestore(&vhub->lock, flags);
462         return rc;
463 }
464
465
466 static const struct usb_ep_ops ast_vhub_ep0_ops = {
467         .queue          = ast_vhub_ep0_queue,
468         .dequeue        = ast_vhub_ep0_dequeue,
469         .alloc_request  = ast_vhub_alloc_request,
470         .free_request   = ast_vhub_free_request,
471 };
472
473 void ast_vhub_reset_ep0(struct ast_vhub_dev *dev)
474 {
475         struct ast_vhub_ep *ep = &dev->ep0;
476
477         ast_vhub_nuke(ep, -EIO);
478         ep->ep0.state = ep0_state_token;
479 }
480
481
482 void ast_vhub_init_ep0(struct ast_vhub *vhub, struct ast_vhub_ep *ep,
483                        struct ast_vhub_dev *dev)
484 {
485         memset(ep, 0, sizeof(*ep));
486
487         INIT_LIST_HEAD(&ep->ep.ep_list);
488         INIT_LIST_HEAD(&ep->queue);
489         ep->ep.ops = &ast_vhub_ep0_ops;
490         ep->ep.name = "ep0";
491         ep->ep.caps.type_control = true;
492         usb_ep_set_maxpacket_limit(&ep->ep, AST_VHUB_EP0_MAX_PACKET);
493         ep->d_idx = 0;
494         ep->dev = dev;
495         ep->vhub = vhub;
496         ep->ep0.state = ep0_state_token;
497         INIT_LIST_HEAD(&ep->ep0.req.queue);
498         ep->ep0.req.internal = true;
499
500         /* Small difference between vHub and devices */
501         if (dev) {
502                 ep->ep0.ctlstat = dev->regs + AST_VHUB_DEV_EP0_CTRL;
503                 ep->ep0.setup = vhub->regs +
504                         AST_VHUB_SETUP0 + 8 * (dev->index + 1);
505                 ep->buf = vhub->ep0_bufs +
506                         AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
507                 ep->buf_dma = vhub->ep0_bufs_dma +
508                         AST_VHUB_EP0_MAX_PACKET * (dev->index + 1);
509         } else {
510                 ep->ep0.ctlstat = vhub->regs + AST_VHUB_EP0_CTRL;
511                 ep->ep0.setup = vhub->regs + AST_VHUB_SETUP0;
512                 ep->buf = vhub->ep0_bufs;
513                 ep->buf_dma = vhub->ep0_bufs_dma;
514         }
515 }