soundwire: sysfs: add slave status and device number before probe
[linux-2.6-microblaze.git] / drivers / usb / host / xhci-dbgcap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgcap.c - xHCI debug capability support
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 #include <linux/dma-mapping.h>
10 #include <linux/slab.h>
11 #include <linux/nls.h>
12
13 #include "xhci.h"
14 #include "xhci-trace.h"
15 #include "xhci-dbgcap.h"
16
17 static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx)
18 {
19         if (!ctx)
20                 return;
21         dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma);
22         kfree(ctx);
23 }
24
25 /* we use only one segment for DbC rings */
26 static void dbc_ring_free(struct device *dev, struct xhci_ring *ring)
27 {
28         if (!ring)
29                 return;
30
31         if (ring->first_seg && ring->first_seg->trbs) {
32                 dma_free_coherent(dev, TRB_SEGMENT_SIZE,
33                                   ring->first_seg->trbs,
34                                   ring->first_seg->dma);
35                 kfree(ring->first_seg);
36         }
37         kfree(ring);
38 }
39
40 static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings)
41 {
42         struct usb_string_descriptor    *s_desc;
43         u32                             string_length;
44
45         /* Serial string: */
46         s_desc = (struct usb_string_descriptor *)strings->serial;
47         utf8s_to_utf16s(DBC_STRING_SERIAL, strlen(DBC_STRING_SERIAL),
48                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
49                         DBC_MAX_STRING_LENGTH);
50
51         s_desc->bLength         = (strlen(DBC_STRING_SERIAL) + 1) * 2;
52         s_desc->bDescriptorType = USB_DT_STRING;
53         string_length           = s_desc->bLength;
54         string_length           <<= 8;
55
56         /* Product string: */
57         s_desc = (struct usb_string_descriptor *)strings->product;
58         utf8s_to_utf16s(DBC_STRING_PRODUCT, strlen(DBC_STRING_PRODUCT),
59                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
60                         DBC_MAX_STRING_LENGTH);
61
62         s_desc->bLength         = (strlen(DBC_STRING_PRODUCT) + 1) * 2;
63         s_desc->bDescriptorType = USB_DT_STRING;
64         string_length           += s_desc->bLength;
65         string_length           <<= 8;
66
67         /* Manufacture string: */
68         s_desc = (struct usb_string_descriptor *)strings->manufacturer;
69         utf8s_to_utf16s(DBC_STRING_MANUFACTURER,
70                         strlen(DBC_STRING_MANUFACTURER),
71                         UTF16_LITTLE_ENDIAN, (wchar_t *)s_desc->wData,
72                         DBC_MAX_STRING_LENGTH);
73
74         s_desc->bLength         = (strlen(DBC_STRING_MANUFACTURER) + 1) * 2;
75         s_desc->bDescriptorType = USB_DT_STRING;
76         string_length           += s_desc->bLength;
77         string_length           <<= 8;
78
79         /* String0: */
80         strings->string0[0]     = 4;
81         strings->string0[1]     = USB_DT_STRING;
82         strings->string0[2]     = 0x09;
83         strings->string0[3]     = 0x04;
84         string_length           += 4;
85
86         return string_length;
87 }
88
89 static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
90 {
91         struct dbc_info_context *info;
92         struct xhci_ep_ctx      *ep_ctx;
93         u32                     dev_info;
94         dma_addr_t              deq, dma;
95         unsigned int            max_burst;
96
97         if (!dbc)
98                 return;
99
100         /* Populate info Context: */
101         info                    = (struct dbc_info_context *)dbc->ctx->bytes;
102         dma                     = dbc->string_dma;
103         info->string0           = cpu_to_le64(dma);
104         info->manufacturer      = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH);
105         info->product           = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2);
106         info->serial            = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
107         info->length            = cpu_to_le32(string_length);
108
109         /* Populate bulk out endpoint context: */
110         ep_ctx                  = dbc_bulkout_ctx(dbc);
111         max_burst               = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
112         deq                     = dbc_bulkout_enq(dbc);
113         ep_ctx->ep_info         = 0;
114         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
115         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_out->cycle_state);
116
117         /* Populate bulk in endpoint context: */
118         ep_ctx                  = dbc_bulkin_ctx(dbc);
119         deq                     = dbc_bulkin_enq(dbc);
120         ep_ctx->ep_info         = 0;
121         ep_ctx->ep_info2        = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
122         ep_ctx->deq             = cpu_to_le64(deq | dbc->ring_in->cycle_state);
123
124         /* Set DbC context and info registers: */
125         lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
126
127         dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL);
128         writel(dev_info, &dbc->regs->devinfo1);
129
130         dev_info = cpu_to_le32((DBC_DEVICE_REV << 16) | DBC_PRODUCT_ID);
131         writel(dev_info, &dbc->regs->devinfo2);
132 }
133
134 static void xhci_dbc_giveback(struct dbc_request *req, int status)
135         __releases(&dbc->lock)
136         __acquires(&dbc->lock)
137 {
138         struct xhci_dbc         *dbc = req->dbc;
139         struct device           *dev = dbc->dev;
140
141         list_del_init(&req->list_pending);
142         req->trb_dma = 0;
143         req->trb = NULL;
144
145         if (req->status == -EINPROGRESS)
146                 req->status = status;
147
148         trace_xhci_dbc_giveback_request(req);
149
150         dma_unmap_single(dev,
151                          req->dma,
152                          req->length,
153                          dbc_ep_dma_direction(req));
154
155         /* Give back the transfer request: */
156         spin_unlock(&dbc->lock);
157         req->complete(dbc, req);
158         spin_lock(&dbc->lock);
159 }
160
161 static void xhci_dbc_flush_single_request(struct dbc_request *req)
162 {
163         union xhci_trb  *trb = req->trb;
164
165         trb->generic.field[0]   = 0;
166         trb->generic.field[1]   = 0;
167         trb->generic.field[2]   = 0;
168         trb->generic.field[3]   &= cpu_to_le32(TRB_CYCLE);
169         trb->generic.field[3]   |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
170
171         xhci_dbc_giveback(req, -ESHUTDOWN);
172 }
173
174 static void xhci_dbc_flush_endpoint_requests(struct dbc_ep *dep)
175 {
176         struct dbc_request      *req, *tmp;
177
178         list_for_each_entry_safe(req, tmp, &dep->list_pending, list_pending)
179                 xhci_dbc_flush_single_request(req);
180 }
181
182 static void xhci_dbc_flush_requests(struct xhci_dbc *dbc)
183 {
184         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]);
185         xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]);
186 }
187
188 struct dbc_request *
189 dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags)
190 {
191         struct dbc_request      *req;
192
193         if (direction != BULK_IN &&
194             direction != BULK_OUT)
195                 return NULL;
196
197         if (!dbc)
198                 return NULL;
199
200         req = kzalloc(sizeof(*req), flags);
201         if (!req)
202                 return NULL;
203
204         req->dbc = dbc;
205         INIT_LIST_HEAD(&req->list_pending);
206         INIT_LIST_HEAD(&req->list_pool);
207         req->direction = direction;
208
209         trace_xhci_dbc_alloc_request(req);
210
211         return req;
212 }
213
214 void
215 dbc_free_request(struct dbc_request *req)
216 {
217         trace_xhci_dbc_free_request(req);
218
219         kfree(req);
220 }
221
222 static void
223 xhci_dbc_queue_trb(struct xhci_ring *ring, u32 field1,
224                    u32 field2, u32 field3, u32 field4)
225 {
226         union xhci_trb          *trb, *next;
227
228         trb = ring->enqueue;
229         trb->generic.field[0]   = cpu_to_le32(field1);
230         trb->generic.field[1]   = cpu_to_le32(field2);
231         trb->generic.field[2]   = cpu_to_le32(field3);
232         trb->generic.field[3]   = cpu_to_le32(field4);
233
234         trace_xhci_dbc_gadget_ep_queue(ring, &trb->generic);
235
236         ring->num_trbs_free--;
237         next = ++(ring->enqueue);
238         if (TRB_TYPE_LINK_LE32(next->link.control)) {
239                 next->link.control ^= cpu_to_le32(TRB_CYCLE);
240                 ring->enqueue = ring->enq_seg->trbs;
241                 ring->cycle_state ^= 1;
242         }
243 }
244
245 static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep,
246                                   struct dbc_request *req)
247 {
248         u64                     addr;
249         union xhci_trb          *trb;
250         unsigned int            num_trbs;
251         struct xhci_dbc         *dbc = req->dbc;
252         struct xhci_ring        *ring = dep->ring;
253         u32                     length, control, cycle;
254
255         num_trbs = count_trbs(req->dma, req->length);
256         WARN_ON(num_trbs != 1);
257         if (ring->num_trbs_free < num_trbs)
258                 return -EBUSY;
259
260         addr    = req->dma;
261         trb     = ring->enqueue;
262         cycle   = ring->cycle_state;
263         length  = TRB_LEN(req->length);
264         control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
265
266         if (cycle)
267                 control &= cpu_to_le32(~TRB_CYCLE);
268         else
269                 control |= cpu_to_le32(TRB_CYCLE);
270
271         req->trb = ring->enqueue;
272         req->trb_dma = xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
273         xhci_dbc_queue_trb(ring,
274                            lower_32_bits(addr),
275                            upper_32_bits(addr),
276                            length, control);
277
278         /*
279          * Add a barrier between writes of trb fields and flipping
280          * the cycle bit:
281          */
282         wmb();
283
284         if (cycle)
285                 trb->generic.field[3] |= cpu_to_le32(TRB_CYCLE);
286         else
287                 trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE);
288
289         writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell);
290
291         return 0;
292 }
293
294 static int
295 dbc_ep_do_queue(struct dbc_request *req)
296 {
297         int                     ret;
298         struct xhci_dbc         *dbc = req->dbc;
299         struct device           *dev = dbc->dev;
300         struct dbc_ep           *dep = &dbc->eps[req->direction];
301
302         if (!req->length || !req->buf)
303                 return -EINVAL;
304
305         req->actual             = 0;
306         req->status             = -EINPROGRESS;
307
308         req->dma = dma_map_single(dev,
309                                   req->buf,
310                                   req->length,
311                                   dbc_ep_dma_direction(dep));
312         if (dma_mapping_error(dev, req->dma)) {
313                 dev_err(dbc->dev, "failed to map buffer\n");
314                 return -EFAULT;
315         }
316
317         ret = xhci_dbc_queue_bulk_tx(dep, req);
318         if (ret) {
319                 dev_err(dbc->dev, "failed to queue trbs\n");
320                 dma_unmap_single(dev,
321                                  req->dma,
322                                  req->length,
323                                  dbc_ep_dma_direction(dep));
324                 return -EFAULT;
325         }
326
327         list_add_tail(&req->list_pending, &dep->list_pending);
328
329         return 0;
330 }
331
332 int dbc_ep_queue(struct dbc_request *req)
333 {
334         unsigned long           flags;
335         struct xhci_dbc         *dbc = req->dbc;
336         int                     ret = -ESHUTDOWN;
337
338         if (!dbc)
339                 return -ENODEV;
340
341         if (req->direction != BULK_IN &&
342             req->direction != BULK_OUT)
343                 return -EINVAL;
344
345         spin_lock_irqsave(&dbc->lock, flags);
346         if (dbc->state == DS_CONFIGURED)
347                 ret = dbc_ep_do_queue(req);
348         spin_unlock_irqrestore(&dbc->lock, flags);
349
350         mod_delayed_work(system_wq, &dbc->event_work, 0);
351
352         trace_xhci_dbc_queue_request(req);
353
354         return ret;
355 }
356
357 static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction)
358 {
359         struct dbc_ep           *dep;
360
361         dep                     = &dbc->eps[direction];
362         dep->dbc                = dbc;
363         dep->direction          = direction;
364         dep->ring               = direction ? dbc->ring_in : dbc->ring_out;
365
366         INIT_LIST_HEAD(&dep->list_pending);
367 }
368
369 static void xhci_dbc_eps_init(struct xhci_dbc *dbc)
370 {
371         xhci_dbc_do_eps_init(dbc, BULK_OUT);
372         xhci_dbc_do_eps_init(dbc, BULK_IN);
373 }
374
375 static void xhci_dbc_eps_exit(struct xhci_dbc *dbc)
376 {
377         memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps));
378 }
379
380 static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring,
381                     struct xhci_erst *erst, gfp_t flags)
382 {
383         erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry),
384                                            &erst->erst_dma_addr, flags);
385         if (!erst->entries)
386                 return -ENOMEM;
387
388         erst->num_entries = 1;
389         erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma);
390         erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
391         erst->entries[0].rsvd = 0;
392         return 0;
393 }
394
395 static void dbc_erst_free(struct device *dev, struct xhci_erst *erst)
396 {
397         if (erst->entries)
398                 dma_free_coherent(dev, sizeof(struct xhci_erst_entry),
399                                   erst->entries, erst->erst_dma_addr);
400         erst->entries = NULL;
401 }
402
403 static struct xhci_container_ctx *
404 dbc_alloc_ctx(struct device *dev, gfp_t flags)
405 {
406         struct xhci_container_ctx *ctx;
407
408         ctx = kzalloc(sizeof(*ctx), flags);
409         if (!ctx)
410                 return NULL;
411
412         /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/
413         ctx->size = 3 * DBC_CONTEXT_SIZE;
414         ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags);
415         if (!ctx->bytes) {
416                 kfree(ctx);
417                 return NULL;
418         }
419         return ctx;
420 }
421
422 static struct xhci_ring *
423 xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
424 {
425         struct xhci_ring *ring;
426         struct xhci_segment *seg;
427         dma_addr_t dma;
428
429         ring = kzalloc(sizeof(*ring), flags);
430         if (!ring)
431                 return NULL;
432
433         ring->num_segs = 1;
434         ring->type = type;
435
436         seg = kzalloc(sizeof(*seg), flags);
437         if (!seg)
438                 goto seg_fail;
439
440         ring->first_seg = seg;
441         ring->last_seg = seg;
442         seg->next = seg;
443
444         seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags);
445         if (!seg->trbs)
446                 goto dma_fail;
447
448         seg->dma = dma;
449
450         /* Only event ring does not use link TRB */
451         if (type != TYPE_EVENT) {
452                 union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
453
454                 trb->link.segment_ptr = cpu_to_le64(dma);
455                 trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
456         }
457         INIT_LIST_HEAD(&ring->td_list);
458         xhci_initialize_ring_info(ring, 1);
459         return ring;
460 dma_fail:
461         kfree(seg);
462 seg_fail:
463         kfree(ring);
464         return NULL;
465 }
466
467 static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags)
468 {
469         int                     ret;
470         dma_addr_t              deq;
471         u32                     string_length;
472         struct device           *dev = dbc->dev;
473
474         /* Allocate various rings for events and transfers: */
475         dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags);
476         if (!dbc->ring_evt)
477                 goto evt_fail;
478
479         dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
480         if (!dbc->ring_in)
481                 goto in_fail;
482
483         dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags);
484         if (!dbc->ring_out)
485                 goto out_fail;
486
487         /* Allocate and populate ERST: */
488         ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags);
489         if (ret)
490                 goto erst_fail;
491
492         /* Allocate context data structure: */
493         dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */
494         if (!dbc->ctx)
495                 goto ctx_fail;
496
497         /* Allocate the string table: */
498         dbc->string_size = sizeof(struct dbc_str_descs);
499         dbc->string = dma_alloc_coherent(dev, dbc->string_size,
500                                          &dbc->string_dma, flags);
501         if (!dbc->string)
502                 goto string_fail;
503
504         /* Setup ERST register: */
505         writel(dbc->erst.erst_size, &dbc->regs->ersts);
506
507         lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba);
508         deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
509                                    dbc->ring_evt->dequeue);
510         lo_hi_writeq(deq, &dbc->regs->erdp);
511
512         /* Setup strings and contexts: */
513         string_length = xhci_dbc_populate_strings(dbc->string);
514         xhci_dbc_init_contexts(dbc, string_length);
515
516         xhci_dbc_eps_init(dbc);
517         dbc->state = DS_INITIALIZED;
518
519         return 0;
520
521 string_fail:
522         dbc_free_ctx(dev, dbc->ctx);
523         dbc->ctx = NULL;
524 ctx_fail:
525         dbc_erst_free(dev, &dbc->erst);
526 erst_fail:
527         dbc_ring_free(dev, dbc->ring_out);
528         dbc->ring_out = NULL;
529 out_fail:
530         dbc_ring_free(dev, dbc->ring_in);
531         dbc->ring_in = NULL;
532 in_fail:
533         dbc_ring_free(dev, dbc->ring_evt);
534         dbc->ring_evt = NULL;
535 evt_fail:
536         return -ENOMEM;
537 }
538
539 static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc)
540 {
541         if (!dbc)
542                 return;
543
544         xhci_dbc_eps_exit(dbc);
545
546         if (dbc->string) {
547                 dma_free_coherent(dbc->dev, dbc->string_size,
548                                   dbc->string, dbc->string_dma);
549                 dbc->string = NULL;
550         }
551
552         dbc_free_ctx(dbc->dev, dbc->ctx);
553         dbc->ctx = NULL;
554
555         dbc_erst_free(dbc->dev, &dbc->erst);
556         dbc_ring_free(dbc->dev, dbc->ring_out);
557         dbc_ring_free(dbc->dev, dbc->ring_in);
558         dbc_ring_free(dbc->dev, dbc->ring_evt);
559         dbc->ring_in = NULL;
560         dbc->ring_out = NULL;
561         dbc->ring_evt = NULL;
562 }
563
564 static int xhci_do_dbc_start(struct xhci_dbc *dbc)
565 {
566         int                     ret;
567         u32                     ctrl;
568
569         if (dbc->state != DS_DISABLED)
570                 return -EINVAL;
571
572         writel(0, &dbc->regs->control);
573         ret = xhci_handshake(&dbc->regs->control,
574                              DBC_CTRL_DBC_ENABLE,
575                              0, 1000);
576         if (ret)
577                 return ret;
578
579         ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC);
580         if (ret)
581                 return ret;
582
583         ctrl = readl(&dbc->regs->control);
584         writel(ctrl | DBC_CTRL_DBC_ENABLE | DBC_CTRL_PORT_ENABLE,
585                &dbc->regs->control);
586         ret = xhci_handshake(&dbc->regs->control,
587                              DBC_CTRL_DBC_ENABLE,
588                              DBC_CTRL_DBC_ENABLE, 1000);
589         if (ret)
590                 return ret;
591
592         dbc->state = DS_ENABLED;
593
594         return 0;
595 }
596
597 static int xhci_do_dbc_stop(struct xhci_dbc *dbc)
598 {
599         if (dbc->state == DS_DISABLED)
600                 return -1;
601
602         writel(0, &dbc->regs->control);
603         dbc->state = DS_DISABLED;
604
605         return 0;
606 }
607
608 static int xhci_dbc_start(struct xhci_dbc *dbc)
609 {
610         int                     ret;
611         unsigned long           flags;
612
613         WARN_ON(!dbc);
614
615         pm_runtime_get_sync(dbc->dev); /* note this was self.controller */
616
617         spin_lock_irqsave(&dbc->lock, flags);
618         ret = xhci_do_dbc_start(dbc);
619         spin_unlock_irqrestore(&dbc->lock, flags);
620
621         if (ret) {
622                 pm_runtime_put(dbc->dev); /* note this was self.controller */
623                 return ret;
624         }
625
626         return mod_delayed_work(system_wq, &dbc->event_work, 1);
627 }
628
629 static void xhci_dbc_stop(struct xhci_dbc *dbc)
630 {
631         int ret;
632         unsigned long           flags;
633
634         WARN_ON(!dbc);
635
636         switch (dbc->state) {
637         case DS_DISABLED:
638                 return;
639         case DS_CONFIGURED:
640         case DS_STALLED:
641                 if (dbc->driver->disconnect)
642                         dbc->driver->disconnect(dbc);
643                 break;
644         default:
645                 break;
646         }
647
648         cancel_delayed_work_sync(&dbc->event_work);
649
650         spin_lock_irqsave(&dbc->lock, flags);
651         ret = xhci_do_dbc_stop(dbc);
652         spin_unlock_irqrestore(&dbc->lock, flags);
653
654         if (!ret) {
655                 xhci_dbc_mem_cleanup(dbc);
656                 pm_runtime_put_sync(dbc->dev); /* note, was self.controller */
657         }
658 }
659
660 static void
661 dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
662 {
663         u32                     portsc;
664
665         portsc = readl(&dbc->regs->portsc);
666         if (portsc & DBC_PORTSC_CONN_CHANGE)
667                 dev_info(dbc->dev, "DbC port connect change\n");
668
669         if (portsc & DBC_PORTSC_RESET_CHANGE)
670                 dev_info(dbc->dev, "DbC port reset change\n");
671
672         if (portsc & DBC_PORTSC_LINK_CHANGE)
673                 dev_info(dbc->dev, "DbC port link status change\n");
674
675         if (portsc & DBC_PORTSC_CONFIG_CHANGE)
676                 dev_info(dbc->dev, "DbC config error change\n");
677
678         /* Port reset change bit will be cleared in other place: */
679         writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc);
680 }
681
682 static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event)
683 {
684         struct dbc_ep           *dep;
685         struct xhci_ring        *ring;
686         int                     ep_id;
687         int                     status;
688         u32                     comp_code;
689         size_t                  remain_length;
690         struct dbc_request      *req = NULL, *r;
691
692         comp_code       = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
693         remain_length   = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2]));
694         ep_id           = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
695         dep             = (ep_id == EPID_OUT) ?
696                                 get_out_ep(dbc) : get_in_ep(dbc);
697         ring            = dep->ring;
698
699         switch (comp_code) {
700         case COMP_SUCCESS:
701                 remain_length = 0;
702         /* FALLTHROUGH */
703         case COMP_SHORT_PACKET:
704                 status = 0;
705                 break;
706         case COMP_TRB_ERROR:
707         case COMP_BABBLE_DETECTED_ERROR:
708         case COMP_USB_TRANSACTION_ERROR:
709         case COMP_STALL_ERROR:
710                 dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
711                 status = -comp_code;
712                 break;
713         default:
714                 dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
715                 status = -comp_code;
716                 break;
717         }
718
719         /* Match the pending request: */
720         list_for_each_entry(r, &dep->list_pending, list_pending) {
721                 if (r->trb_dma == event->trans_event.buffer) {
722                         req = r;
723                         break;
724                 }
725         }
726
727         if (!req) {
728                 dev_warn(dbc->dev, "no matched request\n");
729                 return;
730         }
731
732         trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
733
734         ring->num_trbs_free++;
735         req->actual = req->length - remain_length;
736         xhci_dbc_giveback(req, status);
737 }
738
739 static void inc_evt_deq(struct xhci_ring *ring)
740 {
741         /* If on the last TRB of the segment go back to the beginning */
742         if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) {
743                 ring->cycle_state ^= 1;
744                 ring->dequeue = ring->deq_seg->trbs;
745                 return;
746         }
747         ring->dequeue++;
748 }
749
750 static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
751 {
752         dma_addr_t              deq;
753         struct dbc_ep           *dep;
754         union xhci_trb          *evt;
755         u32                     ctrl, portsc;
756         bool                    update_erdp = false;
757
758         /* DbC state machine: */
759         switch (dbc->state) {
760         case DS_DISABLED:
761         case DS_INITIALIZED:
762
763                 return EVT_ERR;
764         case DS_ENABLED:
765                 portsc = readl(&dbc->regs->portsc);
766                 if (portsc & DBC_PORTSC_CONN_STATUS) {
767                         dbc->state = DS_CONNECTED;
768                         dev_info(dbc->dev, "DbC connected\n");
769                 }
770
771                 return EVT_DONE;
772         case DS_CONNECTED:
773                 ctrl = readl(&dbc->regs->control);
774                 if (ctrl & DBC_CTRL_DBC_RUN) {
775                         dbc->state = DS_CONFIGURED;
776                         dev_info(dbc->dev, "DbC configured\n");
777                         portsc = readl(&dbc->regs->portsc);
778                         writel(portsc, &dbc->regs->portsc);
779                         return EVT_GSER;
780                 }
781
782                 return EVT_DONE;
783         case DS_CONFIGURED:
784                 /* Handle cable unplug event: */
785                 portsc = readl(&dbc->regs->portsc);
786                 if (!(portsc & DBC_PORTSC_PORT_ENABLED) &&
787                     !(portsc & DBC_PORTSC_CONN_STATUS)) {
788                         dev_info(dbc->dev, "DbC cable unplugged\n");
789                         dbc->state = DS_ENABLED;
790                         xhci_dbc_flush_requests(dbc);
791
792                         return EVT_DISC;
793                 }
794
795                 /* Handle debug port reset event: */
796                 if (portsc & DBC_PORTSC_RESET_CHANGE) {
797                         dev_info(dbc->dev, "DbC port reset\n");
798                         writel(portsc, &dbc->regs->portsc);
799                         dbc->state = DS_ENABLED;
800                         xhci_dbc_flush_requests(dbc);
801
802                         return EVT_DISC;
803                 }
804
805                 /* Handle endpoint stall event: */
806                 ctrl = readl(&dbc->regs->control);
807                 if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
808                     (ctrl & DBC_CTRL_HALT_OUT_TR)) {
809                         dev_info(dbc->dev, "DbC Endpoint stall\n");
810                         dbc->state = DS_STALLED;
811
812                         if (ctrl & DBC_CTRL_HALT_IN_TR) {
813                                 dep = get_in_ep(dbc);
814                                 xhci_dbc_flush_endpoint_requests(dep);
815                         }
816
817                         if (ctrl & DBC_CTRL_HALT_OUT_TR) {
818                                 dep = get_out_ep(dbc);
819                                 xhci_dbc_flush_endpoint_requests(dep);
820                         }
821
822                         return EVT_DONE;
823                 }
824
825                 /* Clear DbC run change bit: */
826                 if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
827                         writel(ctrl, &dbc->regs->control);
828                         ctrl = readl(&dbc->regs->control);
829                 }
830
831                 break;
832         case DS_STALLED:
833                 ctrl = readl(&dbc->regs->control);
834                 if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
835                     !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
836                     (ctrl & DBC_CTRL_DBC_RUN)) {
837                         dbc->state = DS_CONFIGURED;
838                         break;
839                 }
840
841                 return EVT_DONE;
842         default:
843                 dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
844                 break;
845         }
846
847         /* Handle the events in the event ring: */
848         evt = dbc->ring_evt->dequeue;
849         while ((le32_to_cpu(evt->event_cmd.flags) & TRB_CYCLE) ==
850                         dbc->ring_evt->cycle_state) {
851                 /*
852                  * Add a barrier between reading the cycle flag and any
853                  * reads of the event's flags/data below:
854                  */
855                 rmb();
856
857                 trace_xhci_dbc_handle_event(dbc->ring_evt, &evt->generic);
858
859                 switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) {
860                 case TRB_TYPE(TRB_PORT_STATUS):
861                         dbc_handle_port_status(dbc, evt);
862                         break;
863                 case TRB_TYPE(TRB_TRANSFER):
864                         dbc_handle_xfer_event(dbc, evt);
865                         break;
866                 default:
867                         break;
868                 }
869
870                 inc_evt_deq(dbc->ring_evt);
871
872                 evt = dbc->ring_evt->dequeue;
873                 update_erdp = true;
874         }
875
876         /* Update event ring dequeue pointer: */
877         if (update_erdp) {
878                 deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg,
879                                            dbc->ring_evt->dequeue);
880                 lo_hi_writeq(deq, &dbc->regs->erdp);
881         }
882
883         return EVT_DONE;
884 }
885
886 static void xhci_dbc_handle_events(struct work_struct *work)
887 {
888         enum evtreturn          evtr;
889         struct xhci_dbc         *dbc;
890         unsigned long           flags;
891
892         dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
893
894         spin_lock_irqsave(&dbc->lock, flags);
895         evtr = xhci_dbc_do_handle_events(dbc);
896         spin_unlock_irqrestore(&dbc->lock, flags);
897
898         switch (evtr) {
899         case EVT_GSER:
900                 if (dbc->driver->configure)
901                         dbc->driver->configure(dbc);
902                 break;
903         case EVT_DISC:
904                 if (dbc->driver->disconnect)
905                         dbc->driver->disconnect(dbc);
906                 break;
907         case EVT_DONE:
908                 break;
909         default:
910                 dev_info(dbc->dev, "stop handling dbc events\n");
911                 return;
912         }
913
914         mod_delayed_work(system_wq, &dbc->event_work, 1);
915 }
916
917 static void xhci_do_dbc_exit(struct xhci_hcd *xhci)
918 {
919         unsigned long           flags;
920
921         spin_lock_irqsave(&xhci->lock, flags);
922         kfree(xhci->dbc);
923         xhci->dbc = NULL;
924         spin_unlock_irqrestore(&xhci->lock, flags);
925 }
926
927 static int xhci_do_dbc_init(struct xhci_hcd *xhci)
928 {
929         u32                     reg;
930         struct xhci_dbc         *dbc;
931         unsigned long           flags;
932         void __iomem            *base;
933         int                     dbc_cap_offs;
934
935         base = &xhci->cap_regs->hc_capbase;
936         dbc_cap_offs = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_DEBUG);
937         if (!dbc_cap_offs)
938                 return -ENODEV;
939
940         dbc = kzalloc(sizeof(*dbc), GFP_KERNEL);
941         if (!dbc)
942                 return -ENOMEM;
943
944         dbc->regs = base + dbc_cap_offs;
945
946         /* We will avoid using DbC in xhci driver if it's in use. */
947         reg = readl(&dbc->regs->control);
948         if (reg & DBC_CTRL_DBC_ENABLE) {
949                 kfree(dbc);
950                 return -EBUSY;
951         }
952
953         spin_lock_irqsave(&xhci->lock, flags);
954         if (xhci->dbc) {
955                 spin_unlock_irqrestore(&xhci->lock, flags);
956                 kfree(dbc);
957                 return -EBUSY;
958         }
959         xhci->dbc = dbc;
960         spin_unlock_irqrestore(&xhci->lock, flags);
961
962         dbc->xhci = xhci;
963         dbc->dev = xhci_to_hcd(xhci)->self.sysdev;
964         INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events);
965         spin_lock_init(&dbc->lock);
966
967         return 0;
968 }
969
970 static ssize_t dbc_show(struct device *dev,
971                         struct device_attribute *attr,
972                         char *buf)
973 {
974         const char              *p;
975         struct xhci_dbc         *dbc;
976         struct xhci_hcd         *xhci;
977
978         xhci = hcd_to_xhci(dev_get_drvdata(dev));
979         dbc = xhci->dbc;
980
981         switch (dbc->state) {
982         case DS_DISABLED:
983                 p = "disabled";
984                 break;
985         case DS_INITIALIZED:
986                 p = "initialized";
987                 break;
988         case DS_ENABLED:
989                 p = "enabled";
990                 break;
991         case DS_CONNECTED:
992                 p = "connected";
993                 break;
994         case DS_CONFIGURED:
995                 p = "configured";
996                 break;
997         case DS_STALLED:
998                 p = "stalled";
999                 break;
1000         default:
1001                 p = "unknown";
1002         }
1003
1004         return sprintf(buf, "%s\n", p);
1005 }
1006
1007 static ssize_t dbc_store(struct device *dev,
1008                          struct device_attribute *attr,
1009                          const char *buf, size_t count)
1010 {
1011         struct xhci_hcd         *xhci;
1012         struct xhci_dbc         *dbc;
1013
1014         xhci = hcd_to_xhci(dev_get_drvdata(dev));
1015         dbc = xhci->dbc;
1016
1017         if (!strncmp(buf, "enable", 6))
1018                 xhci_dbc_start(dbc);
1019         else if (!strncmp(buf, "disable", 7))
1020                 xhci_dbc_stop(dbc);
1021         else
1022                 return -EINVAL;
1023
1024         return count;
1025 }
1026
1027 static DEVICE_ATTR_RW(dbc);
1028
1029 int xhci_dbc_init(struct xhci_hcd *xhci)
1030 {
1031         int                     ret;
1032         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
1033
1034         ret = xhci_do_dbc_init(xhci);
1035         if (ret)
1036                 goto init_err3;
1037
1038         ret = xhci_dbc_tty_probe(xhci);
1039         if (ret)
1040                 goto init_err2;
1041
1042         ret = device_create_file(dev, &dev_attr_dbc);
1043         if (ret)
1044                 goto init_err1;
1045
1046         return 0;
1047
1048 init_err1:
1049         xhci_dbc_tty_remove(xhci->dbc);
1050 init_err2:
1051         xhci_do_dbc_exit(xhci);
1052 init_err3:
1053         return ret;
1054 }
1055
1056 void xhci_dbc_exit(struct xhci_hcd *xhci)
1057 {
1058         struct device           *dev = xhci_to_hcd(xhci)->self.controller;
1059
1060         if (!xhci->dbc)
1061                 return;
1062
1063         device_remove_file(dev, &dev_attr_dbc);
1064         xhci_dbc_tty_remove(xhci->dbc);
1065         xhci_dbc_stop(xhci->dbc);
1066         xhci_do_dbc_exit(xhci);
1067 }
1068
1069 #ifdef CONFIG_PM
1070 int xhci_dbc_suspend(struct xhci_hcd *xhci)
1071 {
1072         struct xhci_dbc         *dbc = xhci->dbc;
1073
1074         if (!dbc)
1075                 return 0;
1076
1077         if (dbc->state == DS_CONFIGURED)
1078                 dbc->resume_required = 1;
1079
1080         xhci_dbc_stop(dbc);
1081
1082         return 0;
1083 }
1084
1085 int xhci_dbc_resume(struct xhci_hcd *xhci)
1086 {
1087         int                     ret = 0;
1088         struct xhci_dbc         *dbc = xhci->dbc;
1089
1090         if (!dbc)
1091                 return 0;
1092
1093         if (dbc->resume_required) {
1094                 dbc->resume_required = 0;
1095                 xhci_dbc_start(dbc);
1096         }
1097
1098         return ret;
1099 }
1100 #endif /* CONFIG_PM */