Merge tag 'timers-core-2024-01-21' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / usb / host / xhci-dbgcap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgcap.c - xHCI debug capability support
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 #include <linux/bug.h>
10 #include <linux/device.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kstrtox.h>
14 #include <linux/list.h>
15 #include <linux/nls.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/sysfs.h>
21 #include <linux/types.h>
22 #include <linux/workqueue.h>
23
24 #include <linux/io-64-nonatomic-lo-hi.h>
25
26 #include <asm/byteorder.h>
27
28 #include "xhci.h"
29 #include "xhci-trace.h"
30 #include "xhci-dbgcap.h"
31
32 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
33 {
34         if (!ctx)
35                 return;
36         dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
37         kfree(ctx);
38 }
39
40 /* we use only one segment for DbC rings */
41 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
42 {
43         if (!ring)
44                 return;
45
46         if (ring->first_seg) {
47                 dma_free_coherent(dev, TRB_SEGMENT_SIZE,
48                                   ring->first_seg->trbs,
49                                   ring->first_seg->dma);
50                 kfree(ring->first_seg);
51         }
52         kfree(ring);
53 }
54
55 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
56 {
57         struct usb_string_descriptor    *s_desc;
58         u32                             string_length;
59
60         /* Serial string: */
61         s_desc = (struct usb_string_descriptor *)strings->serial;
62         utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
63                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
64                         DBC_MAX_STRING_LENGTH);
65
66         s_desc->bLength         = (strlen(DBC_STRING_SERIAL) + 1) * 2;
67         s_desc->bDescriptorType = USB_DT_STRING;
68         string_length           = s_desc->bLength;
69         string_length           <<= 8;
70
71         /* Product string: */
72         s_desc = (struct usb_string_descriptor *)strings->product;
73         utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
74                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
75                         DBC_MAX_STRING_LENGTH);
76
77         s_desc->bLength         = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
78         s_desc->bDescriptorType = USB_DT_STRING;
79         string_length           += s_desc->bLength;
80         string_length           <<= 8;
81
82         /* Manufacture string: */
83         s_desc = (struct usb_string_descriptor *)strings->manufacturer;
84         utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
85                         strlen(DBC_STRING_MANUFACTURER),
86                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
87                         DBC_MAX_STRING_LENGTH);
88
89         s_desc->bLength         = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
90         s_desc->bDescriptorType = USB_DT_STRING;
91         string_length           += s_desc->bLength;
92         string_length           <<= 8;
93
94         /* String0: */
95         strings->string0[0]     = 4;
96         strings->string0[1]     = USB_DT_STRING;
97         strings->string0[2]     = 0x09;
98         strings->string0[3]     = 0x04;
99         string_length           += 4;
100
101         return string_length;
102 }
103
104 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
105 {
106         struct dbc_info_context *info;
107         struct xhci_ep_ctx      *ep_ctx;
108         u32                     dev_info;
109         dma_addr_t              deq, dma;
110         unsigned int            max_burst;
111
112         if (!dbc)
113                 return;
114
115         /* Populate info Context: */
116         info                    = (struct dbc_info_context *)dbc->ctx->bytes;
117         dma                     = dbc->string_dma;
118         info->string0           = cpu_to_le64(dma);
119         info->manufacturer      = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
120         info->product           = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
121         info->serial            = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
122         info->length            = cpu_to_le32(string_length);
123
124         /* Populate bulk out endpoint context: */
125         ep_ctx                  = dbc_bulkout_ctx(dbc);
126         max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
127         deq                     = dbc_bulkout_enq(dbc);
128         ep_ctx->ep_info         = 0;
129         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
130         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
131
132         /* Populate bulk in endpoint context: */
133         ep_ctx                  = dbc_bulkin_ctx(dbc);
134         deq                     = dbc_bulkin_enq(dbc);
135         ep_ctx->ep_info         = 0;
136         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
137         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
138
139         /* Set DbC context and info registers: */
140         lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
141
142         dev_info = (dbc->idVendor << 16) | dbc->bInterfaceProtocol;
143         writel(dev_info, &dbc->regs->devinfo1);
144
145         dev_info = (dbc->bcdDevice << 16) | dbc->idProduct;
146         writel(dev_info, &dbc->regs->devinfo2);
147 }
148
149 static void xhci_dbc_giveback(struct dbc_request *req, int status)
150         __releases(&dbc->lock)
151         __acquires(&dbc->lock)
152 {
153         struct xhci_dbc         *dbc = req->dbc;
154         struct device           *dev = dbc->dev;
155
156         list_del_init(&req->list_pending);
157         req->trb_dma = 0;
158         req->trb = NULL;
159
160         if (req->status == -EINPROGRESS)
161                 req->status = status;
162
163         trace_xhci_dbc_giveback_request(req);
164
165         dma_unmap_single(dev,
166                          req->dma,
167                          req->length,
168                          dbc_ep_dma_direction(req));
169
170         /* Give back the transfer request: */
171         spin_unlock(&dbc->lock);
172         req->complete(dbc, req);
173         spin_lock(&dbc->lock);
174 }
175
176 static void xhci_dbc_flush_single_request(struct dbc_request *req)
177 {
178         union xhci_trb  *trb = req->trb;
179
180         trb->generic.field[0]   = 0;
181         trb->generic.field[1]   = 0;
182         trb->generic.field[2]   = 0;
183         trb->generic.field[3]   &= cpu_to_le32(TRB_CYCLE);
184         trb->generic.field[3]   |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
185
186         xhci_dbc_giveback(req, -ESHUTDOWN);
187 }
188
189 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
190 {
191         struct dbc_request      *req, *tmp;
192
193         list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
194                 xhci_dbc_flush_single_request(req);
195 }
196
197 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
198 {
199         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
200         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
201 }
202
203 struct dbc_request *
204 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
205 {
206         struct dbc_request      *req;
207
208         if (direction != BULK_IN &&
209             direction != BULK_OUT)
210                 return NULL;
211
212         if (!dbc)
213                 return NULL;
214
215         req = kzalloc(sizeof(*req), flags);
216         if (!req)
217                 return NULL;
218
219         req->dbc = dbc;
220         INIT_LIST_HEAD(&req->list_pending);
221         INIT_LIST_HEAD(&req->list_pool);
222         req->direction = direction;
223
224         trace_xhci_dbc_alloc_request(req);
225
226         return req;
227 }
228
229 void
230 dbc_free_request(struct dbc_request *req)
231 {
232         trace_xhci_dbc_free_request(req);
233
234         kfree(req);
235 }
236
237 static void
238 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
239                    u32 field2, u32 field3, u32 field4)
240 {
241         union xhci_trb          *trb, *next;
242
243         trb = ring->enqueue;
244         trb->generic.field[0]   = cpu_to_le32(field1);
245         trb->generic.field[1]   = cpu_to_le32(field2);
246         trb->generic.field[2]   = cpu_to_le32(field3);
247         trb->generic.field[3]   = cpu_to_le32(field4);
248
249         trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
250
251         ring->num_trbs_free--;
252         next = ++(ring->enqueue);
253         if (TRB_TYPE_LINK_LE32(next->link.control)) {
254                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
255                 ring->enqueue = ring->enq_seg->trbs;
256                 ring->cycle_state ^= 1;
257         }
258 }
259
260 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
261                                   struct dbc_request *req)
262 {
263         u64                     addr;
264         union xhci_trb          *trb;
265         unsigned int            num_trbs;
266         struct xhci_dbc         *dbc = req->dbc;
267         struct xhci_ring        *ring = dep->ring;
268         u32                     length, control, cycle;
269
270         num_trbs = count_trbs(req->dma, req->length);
271         WARN_ON(num_trbs != 1);
272         if (ring->num_trbs_free < num_trbs)
273                 return -EBUSY;
274
275         addr    = req->dma;
276         trb     = ring->enqueue;
277         cycle   = ring->cycle_state;
278         length  = TRB_LEN(req->length);
279         control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
280
281         if (cycle)
282                 control &= cpu_to_le32(~TRB_CYCLE);
283         else
284                 control |= cpu_to_le32(TRB_CYCLE);
285
286         req->trb = ring->enqueue;
287         req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
288         xhci_dbc_queue_trb(ring,
289                            lower_32_bits(addr),
290                            upper_32_bits(addr),
291                            length, control);
292
293         /*
294          * Add a barrier between writes of trb fields and flipping
295          * the cycle bit:
296          */
297         wmb();
298
299         if (cycle)
300                 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
301         else
302                 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
303
304         writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
305
306         return 0;
307 }
308
309 static int
310 dbc_ep_do_queue(struct dbc_request *req)
311 {
312         int                     ret;
313         struct xhci_dbc         *dbc = req->dbc;
314         struct device           *dev = dbc->dev;
315         struct dbc_ep           *dep = &dbc->eps[req->direction];
316
317         if (!req->length || !req->buf)
318                 return -EINVAL;
319
320         req->actual             = 0;
321         req->status             = -EINPROGRESS;
322
323         req->dma = dma_map_single(dev,
324                                   req->buf,
325                                   req->length,
326                                   dbc_ep_dma_direction(dep));
327         if (dma_mapping_error(dev, req->dma)) {
328                 dev_err(dbc->dev, "failed to map buffer\n");
329                 return -EFAULT;
330         }
331
332         ret = xhci_dbc_queue_bulk_tx(dep, req);
333         if (ret) {
334                 dev_err(dbc->dev, "failed to queue trbs\n");
335                 dma_unmap_single(dev,
336                                  req->dma,
337                                  req->length,
338                                  dbc_ep_dma_direction(dep));
339                 return -EFAULT;
340         }
341
342         list_add_tail(&req->list_pending, &dep->list_pending);
343
344         return 0;
345 }
346
347 int dbc_ep_queue(struct dbc_request *req)
348 {
349         unsigned long           flags;
350         struct xhci_dbc         *dbc = req->dbc;
351         int                     ret = -ESHUTDOWN;
352
353         if (!dbc)
354                 return -ENODEV;
355
356         if (req->direction != BULK_IN &&
357             req->direction != BULK_OUT)
358                 return -EINVAL;
359
360         spin_lock_irqsave(&dbc->lock, flags);
361         if (dbc->state == DS_CONFIGURED)
362                 ret = dbc_ep_do_queue(req);
363         spin_unlock_irqrestore(&dbc->lock, flags);
364
365         mod_delayed_work(system_wq, &dbc->event_work, 0);
366
367         trace_xhci_dbc_queue_request(req);
368
369         return ret;
370 }
371
372 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
373 {
374         struct dbc_ep           *dep;
375
376         dep                     = &dbc->eps[direction];
377         dep->dbc                = dbc;
378         dep->direction          = direction;
379         dep->ring               = direction ? dbc->ring_in : dbc->ring_out;
380
381         INIT_LIST_HEAD(&dep->list_pending);
382 }
383
384 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
385 {
386         xhci_dbc_do_eps_init(dbc, BULK_OUT);
387         xhci_dbc_do_eps_init(dbc, BULK_IN);
388 }
389
390 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
391 {
392         memset(dbc->eps, 0, sizeof_field(struct xhci_dbc, eps));
393 }
394
395 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
396                     struct xhci_erst *erst, gfp_t flags)
397 {
398         erst->entries = dma_alloc_coherent(dev, sizeof(*erst->entries),
399                                            &erst->erst_dma_addr, flags);
400         if (!erst->entries)
401                 return -ENOMEM;
402
403         erst->num_entries = 1;
404         erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
405         erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
406         erst->entries[0].rsvd = 0;
407         return 0;
408 }
409
410 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
411 {
412         dma_free_coherent(dev, sizeof(*erst->entries), erst->entries,
413                           erst->erst_dma_addr);
414         erst->entries = NULL;
415 }
416
417 static struct xhci_container_ctx *
418 dbc_alloc_ctx(struct device *dev, gfp_t flags)
419 {
420         struct xhci_container_ctx *ctx;
421
422         ctx = kzalloc(sizeof(*ctx), flags);
423         if (!ctx)
424                 return NULL;
425
426         /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
427         ctx->size = 3 * DBC_CONTEXT_SIZE;
428         ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
429         if (!ctx->bytes) {
430                 kfree(ctx);
431                 return NULL;
432         }
433         return ctx;
434 }
435
436 static struct xhci_ring *
437 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
438 {
439         struct xhci_ring *ring;
440         struct xhci_segment *seg;
441         dma_addr_t dma;
442
443         ring = kzalloc(sizeof(*ring), flags);
444         if (!ring)
445                 return NULL;
446
447         ring->num_segs = 1;
448         ring->type = type;
449
450         seg = kzalloc(sizeof(*seg), flags);
451         if (!seg)
452                 goto seg_fail;
453
454         ring->first_seg = seg;
455         ring->last_seg = seg;
456         seg->next = seg;
457
458         seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
459         if (!seg->trbs)
460                 goto dma_fail;
461
462         seg->dma = dma;
463
464         /* Only event ring does not use link TRB */
465         if (type != TYPE_EVENT) {
466                 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
467
468                 trb->link.segment_ptr = cpu_to_le64(dma);
469                 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
470         }
471         INIT_LIST_HEAD(&ring->td_list);
472         xhci_initialize_ring_info(ring, 1);
473         return ring;
474 dma_fail:
475         kfree(seg);
476 seg_fail:
477         kfree(ring);
478         return NULL;
479 }
480
481 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
482 {
483         int                     ret;
484         dma_addr_t              deq;
485         u32                     string_length;
486         struct device           *dev = dbc->dev;
487
488         /* Allocate various rings for events and transfers: */
489         dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
490         if (!dbc->ring_evt)
491                 goto evt_fail;
492
493         dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
494         if (!dbc->ring_in)
495                 goto in_fail;
496
497         dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
498         if (!dbc->ring_out)
499                 goto out_fail;
500
501         /* Allocate and populate ERST: */
502         ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
503         if (ret)
504                 goto erst_fail;
505
506         /* Allocate context data structure: */
507         dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
508         if (!dbc->ctx)
509                 goto ctx_fail;
510
511         /* Allocate the string table: */
512         dbc->string_size = sizeof(*dbc->string);
513         dbc->string = dma_alloc_coherent(dev, dbc->string_size,
514                                          &dbc->string_dma, flags);
515         if (!dbc->string)
516                 goto string_fail;
517
518         /* Setup ERST register: */
519         writel(dbc->erst.erst_size, &dbc->regs->ersts);
520
521         lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
522         deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
523                                    dbc->ring_evt->dequeue);
524         lo_hi_writeq(deq, &dbc->regs->erdp);
525
526         /* Setup strings and contexts: */
527         string_length = xhci_dbc_populate_strings(dbc->string);
528         xhci_dbc_init_contexts(dbc, string_length);
529
530         xhci_dbc_eps_init(dbc);
531         dbc->state = DS_INITIALIZED;
532
533         return 0;
534
535 string_fail:
536         dbc_free_ctx(dev, dbc->ctx);
537         dbc->ctx = NULL;
538 ctx_fail:
539         dbc_erst_free(dev, &dbc->erst);
540 erst_fail:
541         dbc_ring_free(dev, dbc->ring_out);
542         dbc->ring_out = NULL;
543 out_fail:
544         dbc_ring_free(dev, dbc->ring_in);
545         dbc->ring_in = NULL;
546 in_fail:
547         dbc_ring_free(dev, dbc->ring_evt);
548         dbc->ring_evt = NULL;
549 evt_fail:
550         return -ENOMEM;
551 }
552
553 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
554 {
555         if (!dbc)
556                 return;
557
558         xhci_dbc_eps_exit(dbc);
559
560         dma_free_coherent(dbc->dev, dbc->string_size, dbc->string, dbc->string_dma);
561         dbc->string = NULL;
562
563         dbc_free_ctx(dbc->dev, dbc->ctx);
564         dbc->ctx = NULL;
565
566         dbc_erst_free(dbc->dev, &dbc->erst);
567         dbc_ring_free(dbc->dev, dbc->ring_out);
568         dbc_ring_free(dbc->dev, dbc->ring_in);
569         dbc_ring_free(dbc->dev, dbc->ring_evt);
570         dbc->ring_in = NULL;
571         dbc->ring_out = NULL;
572         dbc->ring_evt = NULL;
573 }
574
575 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
576 {
577         int                     ret;
578         u32                     ctrl;
579
580         if (dbc->state != DS_DISABLED)
581                 return -EINVAL;
582
583         writel(0, &dbc->regs->control);
584         ret = xhci_handshake(&dbc->regs->control,
585                              DBC_CTRL_DBC_ENABLE,
586                              0, 1000);
587         if (ret)
588                 return ret;
589
590         ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
591         if (ret)
592                 return ret;
593
594         ctrl = readl(&dbc->regs->control);
595         writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
596                &dbc->regs->control);
597         ret = xhci_handshake(&dbc->regs->control,
598                              DBC_CTRL_DBC_ENABLE,
599                              DBC_CTRL_DBC_ENABLE, 1000);
600         if (ret)
601                 return ret;
602
603         dbc->state = DS_ENABLED;
604
605         return 0;
606 }
607
608 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
609 {
610         if (dbc->state == DS_DISABLED)
611                 return -EINVAL;
612
613         writel(0, &dbc->regs->control);
614         dbc->state = DS_DISABLED;
615
616         return 0;
617 }
618
619 static int xhci_dbc_start(struct xhci_dbc *dbc)
620 {
621         int                     ret;
622         unsigned long           flags;
623
624         WARN_ON(!dbc);
625
626         pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
627
628         spin_lock_irqsave(&dbc->lock, flags);
629         ret = xhci_do_dbc_start(dbc);
630         spin_unlock_irqrestore(&dbc->lock, flags);
631
632         if (ret) {
633                 pm_runtime_put(dbc->dev); /* note this was self.controller */
634                 return ret;
635         }
636
637         return mod_delayed_work(system_wq, &dbc->event_work, 1);
638 }
639
640 static void xhci_dbc_stop(struct xhci_dbc *dbc)
641 {
642         int ret;
643         unsigned long           flags;
644
645         WARN_ON(!dbc);
646
647         switch (dbc->state) {
648         case DS_DISABLED:
649                 return;
650         case DS_CONFIGURED:
651         case DS_STALLED:
652                 if (dbc->driver->disconnect)
653                         dbc->driver->disconnect(dbc);
654                 break;
655         default:
656                 break;
657         }
658
659         cancel_delayed_work_sync(&dbc->event_work);
660
661         spin_lock_irqsave(&dbc->lock, flags);
662         ret = xhci_do_dbc_stop(dbc);
663         spin_unlock_irqrestore(&dbc->lock, flags);
664         if (ret)
665                 return;
666
667         xhci_dbc_mem_cleanup(dbc);
668         pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
669 }
670
671 static void
672 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
673 {
674         u32                     portsc;
675
676         portsc = readl(&dbc->regs->portsc);
677         if (portsc & DBC_PORTSC_CONN_CHANGE)
678                 dev_info(dbc->dev, "DbC port connect change\n");
679
680         if (portsc & DBC_PORTSC_RESET_CHANGE)
681                 dev_info(dbc->dev, "DbC port reset change\n");
682
683         if (portsc & DBC_PORTSC_LINK_CHANGE)
684                 dev_info(dbc->dev, "DbC port link status change\n");
685
686         if (portsc & DBC_PORTSC_CONFIG_CHANGE)
687                 dev_info(dbc->dev, "DbC config error change\n");
688
689         /* Port reset change bit will be cleared in other place: */
690         writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
691 }
692
693 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
694 {
695         struct dbc_ep           *dep;
696         struct xhci_ring        *ring;
697         int                     ep_id;
698         int                     status;
699         u32                     comp_code;
700         size_t                  remain_length;
701         struct dbc_request      *req = NULL, *r;
702
703         comp_code       = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
704         remain_length   = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
705         ep_id           = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
706         dep             = (ep_id == EPID_OUT) ?
707                                 get_out_ep(dbc) : get_in_ep(dbc);
708         ring            = dep->ring;
709
710         switch (comp_code) {
711         case COMP_SUCCESS:
712                 remain_length = 0;
713                 fallthrough;
714         case COMP_SHORT_PACKET:
715                 status = 0;
716                 break;
717         case COMP_TRB_ERROR:
718         case COMP_BABBLE_DETECTED_ERROR:
719         case COMP_USB_TRANSACTION_ERROR:
720         case COMP_STALL_ERROR:
721                 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
722                 status = -comp_code;
723                 break;
724         default:
725                 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
726                 status = -comp_code;
727                 break;
728         }
729
730         /* Match the pending request: */
731         list_for_each_entry(r, &dep->list_pending, list_pending) {
732                 if (r->trb_dma == event->trans_event.buffer) {
733                         req = r;
734                         break;
735                 }
736         }
737
738         if (!req) {
739                 dev_warn(dbc->dev, "no matched request\n");
740                 return;
741         }
742
743         trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
744
745         ring->num_trbs_free++;
746         req->actual = req->length - remain_length;
747         xhci_dbc_giveback(req, status);
748 }
749
750 static void inc_evt_deq(struct xhci_ring *ring)
751 {
752         /* If on the last TRB of the segment go back to the beginning */
753         if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
754                 ring->cycle_state ^= 1;
755                 ring->dequeue = ring->deq_seg->trbs;
756                 return;
757         }
758         ring->dequeue++;
759 }
760
761 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
762 {
763         dma_addr_t              deq;
764         struct dbc_ep           *dep;
765         union xhci_trb          *evt;
766         u32                     ctrl, portsc;
767         bool                    update_erdp = false;
768
769         /* DbC state machine: */
770         switch (dbc->state) {
771         case DS_DISABLED:
772         case DS_INITIALIZED:
773
774                 return EVT_ERR;
775         case DS_ENABLED:
776                 portsc = readl(&dbc->regs->portsc);
777                 if (portsc & DBC_PORTSC_CONN_STATUS) {
778                         dbc->state = DS_CONNECTED;
779                         dev_info(dbc->dev, "DbC connected\n");
780                 }
781
782                 return EVT_DONE;
783         case DS_CONNECTED:
784                 ctrl = readl(&dbc->regs->control);
785                 if (ctrl & DBC_CTRL_DBC_RUN) {
786                         dbc->state = DS_CONFIGURED;
787                         dev_info(dbc->dev, "DbC configured\n");
788                         portsc = readl(&dbc->regs->portsc);
789                         writel(portsc, &dbc->regs->portsc);
790                         return EVT_GSER;
791                 }
792
793                 return EVT_DONE;
794         case DS_CONFIGURED:
795                 /* Handle cable unplug event: */
796                 portsc = readl(&dbc->regs->portsc);
797                 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
798                     !(portsc & DBC_PORTSC_CONN_STATUS)) {
799                         dev_info(dbc->dev, "DbC cable unplugged\n");
800                         dbc->state = DS_ENABLED;
801                         xhci_dbc_flush_requests(dbc);
802
803                         return EVT_DISC;
804                 }
805
806                 /* Handle debug port reset event: */
807                 if (portsc & DBC_PORTSC_RESET_CHANGE) {
808                         dev_info(dbc->dev, "DbC port reset\n");
809                         writel(portsc, &dbc->regs->portsc);
810                         dbc->state = DS_ENABLED;
811                         xhci_dbc_flush_requests(dbc);
812
813                         return EVT_DISC;
814                 }
815
816                 /* Handle endpoint stall event: */
817                 ctrl = readl(&dbc->regs->control);
818                 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
819                     (ctrl & DBC_CTRL_HALT_OUT_TR)) {
820                         dev_info(dbc->dev, "DbC Endpoint stall\n");
821                         dbc->state = DS_STALLED;
822
823                         if (ctrl & DBC_CTRL_HALT_IN_TR) {
824                                 dep = get_in_ep(dbc);
825                                 xhci_dbc_flush_endpoint_requests(dep);
826                         }
827
828                         if (ctrl & DBC_CTRL_HALT_OUT_TR) {
829                                 dep = get_out_ep(dbc);
830                                 xhci_dbc_flush_endpoint_requests(dep);
831                         }
832
833                         return EVT_DONE;
834                 }
835
836                 /* Clear DbC run change bit: */
837                 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
838                         writel(ctrl, &dbc->regs->control);
839                         ctrl = readl(&dbc->regs->control);
840                 }
841
842                 break;
843         case DS_STALLED:
844                 ctrl = readl(&dbc->regs->control);
845                 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
846                     !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
847                     (ctrl & DBC_CTRL_DBC_RUN)) {
848                         dbc->state = DS_CONFIGURED;
849                         break;
850                 }
851
852                 return EVT_DONE;
853         default:
854                 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
855                 break;
856         }
857
858         /* Handle the events in the event ring: */
859         evt = dbc->ring_evt->dequeue;
860         while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
861                         dbc->ring_evt->cycle_state) {
862                 /*
863                  * Add a barrier between reading the cycle flag and any
864                  * reads of the event's flags/data below:
865                  */
866                 rmb();
867
868                 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
869
870                 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
871                 case TRB_TYPE(TRB_PORT_STATUS):
872                         dbc_handle_port_status(dbc, evt);
873                         break;
874                 case TRB_TYPE(TRB_TRANSFER):
875                         dbc_handle_xfer_event(dbc, evt);
876                         break;
877                 default:
878                         break;
879                 }
880
881                 inc_evt_deq(dbc->ring_evt);
882
883                 evt = dbc->ring_evt->dequeue;
884                 update_erdp = true;
885         }
886
887         /* Update event ring dequeue pointer: */
888         if (update_erdp) {
889                 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
890                                            dbc->ring_evt->dequeue);
891                 lo_hi_writeq(deq, &dbc->regs->erdp);
892         }
893
894         return EVT_DONE;
895 }
896
897 static void xhci_dbc_handle_events(struct work_struct *work)
898 {
899         enum evtreturn          evtr;
900         struct xhci_dbc         *dbc;
901         unsigned long           flags;
902
903         dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
904
905         spin_lock_irqsave(&dbc->lock, flags);
906         evtr = xhci_dbc_do_handle_events(dbc);
907         spin_unlock_irqrestore(&dbc->lock, flags);
908
909         switch (evtr) {
910         case EVT_GSER:
911                 if (dbc->driver->configure)
912                         dbc->driver->configure(dbc);
913                 break;
914         case EVT_DISC:
915                 if (dbc->driver->disconnect)
916                         dbc->driver->disconnect(dbc);
917                 break;
918         case EVT_DONE:
919                 break;
920         default:
921                 dev_info(dbc->dev, "stop handling dbc events\n");
922                 return;
923         }
924
925         mod_delayed_work(system_wq, &dbc->event_work, 1);
926 }
927
928 static const char * const dbc_state_strings[DS_MAX] = {
929         [DS_DISABLED] = "disabled",
930         [DS_INITIALIZED] = "initialized",
931         [DS_ENABLED] = "enabled",
932         [DS_CONNECTED] = "connected",
933         [DS_CONFIGURED] = "configured",
934         [DS_STALLED] = "stalled",
935 };
936
937 static ssize_t dbc_show(struct device *dev,
938                         struct device_attribute *attr,
939                         char *buf)
940 {
941         struct xhci_dbc         *dbc;
942         struct xhci_hcd         *xhci;
943
944         xhci = hcd_to_xhci(dev_get_drvdata(dev));
945         dbc = xhci->dbc;
946
947         if (dbc->state >= ARRAY_SIZE(dbc_state_strings))
948                 return sysfs_emit(buf, "unknown\n");
949
950         return sysfs_emit(buf, "%s\n", dbc_state_strings[dbc->state]);
951 }
952
953 static ssize_t dbc_store(struct device *dev,
954                          struct device_attribute *attr,
955                          const char *buf, size_t count)
956 {
957         struct xhci_hcd         *xhci;
958         struct xhci_dbc         *dbc;
959
960         xhci = hcd_to_xhci(dev_get_drvdata(dev));
961         dbc = xhci->dbc;
962
963         if (sysfs_streq(buf, "enable"))
964                 xhci_dbc_start(dbc);
965         else if (sysfs_streq(buf, "disable"))
966                 xhci_dbc_stop(dbc);
967         else
968                 return -EINVAL;
969
970         return count;
971 }
972
973 static ssize_t dbc_idVendor_show(struct device *dev,
974                             struct device_attribute *attr,
975                             char *buf)
976 {
977         struct xhci_dbc         *dbc;
978         struct xhci_hcd         *xhci;
979
980         xhci = hcd_to_xhci(dev_get_drvdata(dev));
981         dbc = xhci->dbc;
982
983         return sysfs_emit(buf, "%04x\n", dbc->idVendor);
984 }
985
986 static ssize_t dbc_idVendor_store(struct device *dev,
987                              struct device_attribute *attr,
988                              const char *buf, size_t size)
989 {
990         struct xhci_dbc         *dbc;
991         struct xhci_hcd         *xhci;
992         void __iomem            *ptr;
993         u16                     value;
994         u32                     dev_info;
995         int ret;
996
997         ret = kstrtou16(buf, 0, &value);
998         if (ret)
999                 return ret;
1000
1001         xhci = hcd_to_xhci(dev_get_drvdata(dev));
1002         dbc = xhci->dbc;
1003         if (dbc->state != DS_DISABLED)
1004                 return -EBUSY;
1005
1006         dbc->idVendor = value;
1007         ptr = &dbc->regs->devinfo1;
1008         dev_info = readl(ptr);
1009         dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1010         writel(dev_info, ptr);
1011
1012         return size;
1013 }
1014
1015 static ssize_t dbc_idProduct_show(struct device *dev,
1016                             struct device_attribute *attr,
1017                             char *buf)
1018 {
1019         struct xhci_dbc         *dbc;
1020         struct xhci_hcd         *xhci;
1021
1022         xhci = hcd_to_xhci(dev_get_drvdata(dev));
1023         dbc = xhci->dbc;
1024
1025         return sysfs_emit(buf, "%04x\n", dbc->idProduct);
1026 }
1027
1028 static ssize_t dbc_idProduct_store(struct device *dev,
1029                              struct device_attribute *attr,
1030                              const char *buf, size_t size)
1031 {
1032         struct xhci_dbc         *dbc;
1033         struct xhci_hcd         *xhci;
1034         void __iomem            *ptr;
1035         u32                     dev_info;
1036         u16                     value;
1037         int ret;
1038
1039         ret = kstrtou16(buf, 0, &value);
1040         if (ret)
1041                 return ret;
1042
1043         xhci = hcd_to_xhci(dev_get_drvdata(dev));
1044         dbc = xhci->dbc;
1045         if (dbc->state != DS_DISABLED)
1046                 return -EBUSY;
1047
1048         dbc->idProduct = value;
1049         ptr = &dbc->regs->devinfo2;
1050         dev_info = readl(ptr);
1051         dev_info = (dev_info & ~(0xffffu)) | value;
1052         writel(dev_info, ptr);
1053         return size;
1054 }
1055
1056 static ssize_t dbc_bcdDevice_show(struct device *dev,
1057                                    struct device_attribute *attr,
1058                                    char *buf)
1059 {
1060         struct xhci_dbc *dbc;
1061         struct xhci_hcd *xhci;
1062
1063         xhci = hcd_to_xhci(dev_get_drvdata(dev));
1064         dbc = xhci->dbc;
1065
1066         return sysfs_emit(buf, "%04x\n", dbc->bcdDevice);
1067 }
1068
1069 static ssize_t dbc_bcdDevice_store(struct device *dev,
1070                                     struct device_attribute *attr,
1071                                     const char *buf, size_t size)
1072 {
1073         struct xhci_dbc *dbc;
1074         struct xhci_hcd *xhci;
1075         void __iomem *ptr;
1076         u32 dev_info;
1077         u16 value;
1078         int ret;
1079
1080         ret = kstrtou16(buf, 0, &value);
1081         if (ret)
1082                 return ret;
1083
1084         xhci = hcd_to_xhci(dev_get_drvdata(dev));
1085         dbc = xhci->dbc;
1086         if (dbc->state != DS_DISABLED)
1087                 return -EBUSY;
1088
1089         dbc->bcdDevice = value;
1090         ptr = &dbc->regs->devinfo2;
1091         dev_info = readl(ptr);
1092         dev_info = (dev_info & ~(0xffffu << 16)) | (value << 16);
1093         writel(dev_info, ptr);
1094
1095         return size;
1096 }
1097
1098 static ssize_t dbc_bInterfaceProtocol_show(struct device *dev,
1099                                  struct device_attribute *attr,
1100                                  char *buf)
1101 {
1102         struct xhci_dbc *dbc;
1103         struct xhci_hcd *xhci;
1104
1105         xhci = hcd_to_xhci(dev_get_drvdata(dev));
1106         dbc = xhci->dbc;
1107
1108         return sysfs_emit(buf, "%02x\n", dbc->bInterfaceProtocol);
1109 }
1110
1111 static ssize_t dbc_bInterfaceProtocol_store(struct device *dev,
1112                                   struct device_attribute *attr,
1113                                   const char *buf, size_t size)
1114 {
1115         struct xhci_dbc *dbc;
1116         struct xhci_hcd *xhci;
1117         void __iomem *ptr;
1118         u32 dev_info;
1119         u8 value;
1120         int ret;
1121
1122         /* bInterfaceProtocol is 8 bit, but... */
1123         ret = kstrtou8(buf, 0, &value);
1124         if (ret)
1125                 return ret;
1126
1127         /* ...xhci only supports values 0 and 1 */
1128         if (value > 1)
1129                 return -EINVAL;
1130
1131         xhci = hcd_to_xhci(dev_get_drvdata(dev));
1132         dbc = xhci->dbc;
1133         if (dbc->state != DS_DISABLED)
1134                 return -EBUSY;
1135
1136         dbc->bInterfaceProtocol = value;
1137         ptr = &dbc->regs->devinfo1;
1138         dev_info = readl(ptr);
1139         dev_info = (dev_info & ~(0xffu)) | value;
1140         writel(dev_info, ptr);
1141
1142         return size;
1143 }
1144
1145 static DEVICE_ATTR_RW(dbc);
1146 static DEVICE_ATTR_RW(dbc_idVendor);
1147 static DEVICE_ATTR_RW(dbc_idProduct);
1148 static DEVICE_ATTR_RW(dbc_bcdDevice);
1149 static DEVICE_ATTR_RW(dbc_bInterfaceProtocol);
1150
1151 static struct attribute *dbc_dev_attrs[] = {
1152         &dev_attr_dbc.attr,
1153         &dev_attr_dbc_idVendor.attr,
1154         &dev_attr_dbc_idProduct.attr,
1155         &dev_attr_dbc_bcdDevice.attr,
1156         &dev_attr_dbc_bInterfaceProtocol.attr,
1157         NULL
1158 };
1159 ATTRIBUTE_GROUPS(dbc_dev);
1160
1161 struct xhci_dbc *
1162 xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *driver)
1163 {
1164         struct xhci_dbc         *dbc;
1165         int                     ret;
1166
1167         dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
1168         if (!dbc)
1169                 return NULL;
1170
1171         dbc->regs = base;
1172         dbc->dev = dev;
1173         dbc->driver = driver;
1174         dbc->idProduct = DBC_PRODUCT_ID;
1175         dbc->idVendor = DBC_VENDOR_ID;
1176         dbc->bcdDevice = DBC_DEVICE_REV;
1177         dbc->bInterfaceProtocol = DBC_PROTOCOL;
1178
1179         if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
1180                 goto err;
1181
1182         INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
1183         spin_lock_init(&dbc->lock);
1184
1185         ret = sysfs_create_groups(&dev->kobj, dbc_dev_groups);
1186         if (ret)
1187                 goto err;
1188
1189         return dbc;
1190 err:
1191         kfree(dbc);
1192         return NULL;
1193 }
1194
1195 /* undo what xhci_alloc_dbc() did */
1196 void xhci_dbc_remove(struct xhci_dbc *dbc)
1197 {
1198         if (!dbc)
1199                 return;
1200         /* stop hw, stop wq and call dbc->ops->stop() */
1201         xhci_dbc_stop(dbc);
1202
1203         /* remove sysfs files */
1204         sysfs_remove_groups(&dbc->dev->kobj, dbc_dev_groups);
1205
1206         kfree(dbc);
1207 }
1208
1209
1210 int xhci_create_dbc_dev(struct xhci_hcd *xhci)
1211 {
1212         struct device           *dev;
1213         void __iomem            *base;
1214         int                     ret;
1215         int                     dbc_cap_offs;
1216
1217         /* create all parameters needed resembling a dbc device */
1218         dev = xhci_to_hcd(xhci)->self.controller;
1219         base = &xhci->cap_regs->hc_capbase;
1220
1221         dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
1222         if (!dbc_cap_offs)
1223                 return -ENODEV;
1224
1225         /* already allocated and in use */
1226         if (xhci->dbc)
1227                 return -EBUSY;
1228
1229         ret = xhci_dbc_tty_probe(dev, base + dbc_cap_offs, xhci);
1230
1231         return ret;
1232 }
1233
1234 void xhci_remove_dbc_dev(struct xhci_hcd *xhci)
1235 {
1236         unsigned long           flags;
1237
1238         if (!xhci->dbc)
1239                 return;
1240
1241         xhci_dbc_tty_remove(xhci->dbc);
1242         spin_lock_irqsave(&xhci->lock, flags);
1243         xhci->dbc = NULL;
1244         spin_unlock_irqrestore(&xhci->lock, flags);
1245 }
1246
1247 #ifdef CONFIG_PM
1248 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1249 {
1250         struct xhci_dbc         *dbc = xhci->dbc;
1251
1252         if (!dbc)
1253                 return 0;
1254
1255         if (dbc->state == DS_CONFIGURED)
1256                 dbc->resume_required = 1;
1257
1258         xhci_dbc_stop(dbc);
1259
1260         return 0;
1261 }
1262
1263 int xhci_dbc_resume(struct xhci_hcd *xhci)
1264 {
1265         int                     ret = 0;
1266         struct xhci_dbc         *dbc = xhci->dbc;
1267
1268         if (!dbc)
1269                 return 0;
1270
1271         if (dbc->resume_required) {
1272                 dbc->resume_required = 0;
1273                 xhci_dbc_start(dbc);
1274         }
1275
1276         return ret;
1277 }
1278 #endif /* CONFIG_PM */
1279
1280 int xhci_dbc_init(void)
1281 {
1282         return dbc_tty_init();
1283 }
1284
1285 void xhci_dbc_exit(void)
1286 {
1287         dbc_tty_exit();
1288 }