1 // SPDX-License-Identifier: GPL-2.0
3 * xhci-dbc.c - xHCI debug capability early driver
5 * Copyright (C) 2016 Intel Corporation
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
12 #include <linux/console.h>
13 #include <linux/pci_regs.h>
14 #include <linux/pci_ids.h>
15 #include <linux/memblock.h>
17 #include <asm/pci-direct.h>
18 #include <asm/fixmap.h>
19 #include <linux/bcd.h>
20 #include <linux/export.h>
21 #include <linux/module.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
25 #include "../host/xhci.h"
28 static struct xdbc_state xdbc;
29 static bool early_console_keep;
32 #define xdbc_trace trace_printk
34 static inline void xdbc_trace(const char *fmt, ...) { }
35 #endif /* XDBC_TRACE */
37 static void __iomem * __init xdbc_map_pci_mmio(u32 bus, u32 dev, u32 func)
39 u64 val64, sz64, mask64;
44 val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
45 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, ~0);
46 sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0);
47 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0, val);
49 if (val == 0xffffffff || sz == 0xffffffff) {
50 pr_notice("invalid mmio bar\n");
54 val64 = val & PCI_BASE_ADDRESS_MEM_MASK;
55 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
56 mask64 = PCI_BASE_ADDRESS_MEM_MASK;
58 if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) {
59 val = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
60 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, ~0);
61 sz = read_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4);
62 write_pci_config(bus, dev, func, PCI_BASE_ADDRESS_0 + 4, val);
64 val64 |= (u64)val << 32;
65 sz64 |= (u64)sz << 32;
66 mask64 |= ~0ULL << 32;
72 pr_notice("invalid mmio address\n");
76 sz64 = 1ULL << __ffs64(sz64);
78 /* Check if the mem space is enabled: */
79 byte = read_pci_config_byte(bus, dev, func, PCI_COMMAND);
80 if (!(byte & PCI_COMMAND_MEMORY)) {
81 byte |= PCI_COMMAND_MEMORY;
82 write_pci_config_byte(bus, dev, func, PCI_COMMAND, byte);
85 xdbc.xhci_start = val64;
86 xdbc.xhci_length = sz64;
87 base = early_ioremap(val64, sz64);
92 static void * __init xdbc_get_page(dma_addr_t *dma_addr)
96 virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
101 *dma_addr = (dma_addr_t)__pa(virt);
106 static u32 __init xdbc_find_dbgp(int xdbc_num, u32 *b, u32 *d, u32 *f)
108 u32 bus, dev, func, class;
110 for (bus = 0; bus < XDBC_PCI_MAX_BUSES; bus++) {
111 for (dev = 0; dev < XDBC_PCI_MAX_DEVICES; dev++) {
112 for (func = 0; func < XDBC_PCI_MAX_FUNCTION; func++) {
114 class = read_pci_config(bus, dev, func, PCI_CLASS_REVISION);
115 if ((class >> 8) != PCI_CLASS_SERIAL_USB_XHCI)
133 static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay)
149 static void __init xdbc_bios_handoff(void)
154 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_LEGACY);
155 val = readl(xdbc.xhci_base + offset);
157 if (val & XHCI_HC_BIOS_OWNED) {
158 writel(val | XHCI_HC_OS_OWNED, xdbc.xhci_base + offset);
159 timeout = handshake(xdbc.xhci_base + offset, XHCI_HC_BIOS_OWNED, 0, 5000, 10);
162 pr_notice("failed to hand over xHCI control from BIOS\n");
163 writel(val & ~XHCI_HC_BIOS_OWNED, xdbc.xhci_base + offset);
167 /* Disable BIOS SMIs and clear all SMI events: */
168 val = readl(xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
169 val &= XHCI_LEGACY_DISABLE_SMI;
170 val |= XHCI_LEGACY_SMI_EVENTS;
171 writel(val, xdbc.xhci_base + offset + XHCI_LEGACY_CONTROL_OFFSET);
175 xdbc_alloc_ring(struct xdbc_segment *seg, struct xdbc_ring *ring)
177 seg->trbs = xdbc_get_page(&seg->dma);
186 static void __init xdbc_free_ring(struct xdbc_ring *ring)
188 struct xdbc_segment *seg = ring->segment;
193 memblock_free(seg->dma, PAGE_SIZE);
194 ring->segment = NULL;
197 static void xdbc_reset_ring(struct xdbc_ring *ring)
199 struct xdbc_segment *seg = ring->segment;
200 struct xdbc_trb *link_trb;
202 memset(seg->trbs, 0, PAGE_SIZE);
204 ring->enqueue = seg->trbs;
205 ring->dequeue = seg->trbs;
206 ring->cycle_state = 1;
208 if (ring != &xdbc.evt_ring) {
209 link_trb = &seg->trbs[XDBC_TRBS_PER_SEGMENT - 1];
210 link_trb->field[0] = cpu_to_le32(lower_32_bits(seg->dma));
211 link_trb->field[1] = cpu_to_le32(upper_32_bits(seg->dma));
212 link_trb->field[3] = cpu_to_le32(TRB_TYPE(TRB_LINK)) | cpu_to_le32(LINK_TOGGLE);
216 static inline void xdbc_put_utf16(u16 *s, const char *c, size_t size)
220 for (i = 0; i < size; i++)
221 s[i] = cpu_to_le16(c[i]);
224 static void xdbc_mem_init(void)
226 struct xdbc_ep_context *ep_in, *ep_out;
227 struct usb_string_descriptor *s_desc;
228 struct xdbc_erst_entry *entry;
229 struct xdbc_strings *strings;
230 struct xdbc_context *ctx;
231 unsigned int max_burst;
236 xdbc_reset_ring(&xdbc.evt_ring);
237 xdbc_reset_ring(&xdbc.in_ring);
238 xdbc_reset_ring(&xdbc.out_ring);
239 memset(xdbc.table_base, 0, PAGE_SIZE);
240 memset(xdbc.out_buf, 0, PAGE_SIZE);
242 /* Initialize event ring segment table: */
244 xdbc.erst_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
245 xdbc.erst_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
247 index += XDBC_ERST_ENTRY_NUM;
248 entry = (struct xdbc_erst_entry *)xdbc.erst_base;
250 entry->seg_addr = cpu_to_le64(xdbc.evt_seg.dma);
251 entry->seg_size = cpu_to_le32(XDBC_TRBS_PER_SEGMENT);
252 entry->__reserved_0 = 0;
254 /* Initialize ERST registers: */
255 writel(1, &xdbc.xdbc_reg->ersts);
256 xdbc_write64(xdbc.erst_dma, &xdbc.xdbc_reg->erstba);
257 xdbc_write64(xdbc.evt_seg.dma, &xdbc.xdbc_reg->erdp);
259 /* Debug capability contexts: */
260 xdbc.dbcc_size = 64 * 3;
261 xdbc.dbcc_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
262 xdbc.dbcc_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
264 index += XDBC_DBCC_ENTRY_NUM;
266 /* Popluate the strings: */
267 xdbc.string_size = sizeof(struct xdbc_strings);
268 xdbc.string_base = xdbc.table_base + index * XDBC_TABLE_ENTRY_SIZE;
269 xdbc.string_dma = xdbc.table_dma + index * XDBC_TABLE_ENTRY_SIZE;
270 strings = (struct xdbc_strings *)xdbc.string_base;
272 index += XDBC_STRING_ENTRY_NUM;
275 s_desc = (struct usb_string_descriptor *)strings->serial;
276 s_desc->bLength = (strlen(XDBC_STRING_SERIAL) + 1) * 2;
277 s_desc->bDescriptorType = USB_DT_STRING;
279 xdbc_put_utf16(s_desc->wData, XDBC_STRING_SERIAL, strlen(XDBC_STRING_SERIAL));
280 string_length = s_desc->bLength;
283 /* Product string: */
284 s_desc = (struct usb_string_descriptor *)strings->product;
285 s_desc->bLength = (strlen(XDBC_STRING_PRODUCT) + 1) * 2;
286 s_desc->bDescriptorType = USB_DT_STRING;
288 xdbc_put_utf16(s_desc->wData, XDBC_STRING_PRODUCT, strlen(XDBC_STRING_PRODUCT));
289 string_length += s_desc->bLength;
292 /* Manufacture string: */
293 s_desc = (struct usb_string_descriptor *)strings->manufacturer;
294 s_desc->bLength = (strlen(XDBC_STRING_MANUFACTURER) + 1) * 2;
295 s_desc->bDescriptorType = USB_DT_STRING;
297 xdbc_put_utf16(s_desc->wData, XDBC_STRING_MANUFACTURER, strlen(XDBC_STRING_MANUFACTURER));
298 string_length += s_desc->bLength;
302 strings->string0[0] = 4;
303 strings->string0[1] = USB_DT_STRING;
304 strings->string0[2] = 0x09;
305 strings->string0[3] = 0x04;
309 /* Populate info Context: */
310 ctx = (struct xdbc_context *)xdbc.dbcc_base;
312 ctx->info.string0 = cpu_to_le64(xdbc.string_dma);
313 ctx->info.manufacturer = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH);
314 ctx->info.product = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 2);
315 ctx->info.serial = cpu_to_le64(xdbc.string_dma + XDBC_MAX_STRING_LENGTH * 3);
316 ctx->info.length = cpu_to_le32(string_length);
318 /* Populate bulk out endpoint context: */
319 max_burst = DEBUG_MAX_BURST(readl(&xdbc.xdbc_reg->control));
320 ep_out = (struct xdbc_ep_context *)&ctx->out;
322 ep_out->ep_info1 = 0;
323 ep_out->ep_info2 = cpu_to_le32(EP_TYPE(BULK_OUT_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
324 ep_out->deq = cpu_to_le64(xdbc.out_seg.dma | xdbc.out_ring.cycle_state);
326 /* Populate bulk in endpoint context: */
327 ep_in = (struct xdbc_ep_context *)&ctx->in;
330 ep_in->ep_info2 = cpu_to_le32(EP_TYPE(BULK_IN_EP) | MAX_PACKET(1024) | MAX_BURST(max_burst));
331 ep_in->deq = cpu_to_le64(xdbc.in_seg.dma | xdbc.in_ring.cycle_state);
333 /* Set DbC context and info registers: */
334 xdbc_write64(xdbc.dbcc_dma, &xdbc.xdbc_reg->dccp);
336 dev_info = cpu_to_le32((XDBC_VENDOR_ID << 16) | XDBC_PROTOCOL);
337 writel(dev_info, &xdbc.xdbc_reg->devinfo1);
339 dev_info = cpu_to_le32((XDBC_DEVICE_REV << 16) | XDBC_PRODUCT_ID);
340 writel(dev_info, &xdbc.xdbc_reg->devinfo2);
342 xdbc.in_buf = xdbc.out_buf + XDBC_MAX_PACKET;
343 xdbc.in_dma = xdbc.out_dma + XDBC_MAX_PACKET;
346 static void xdbc_do_reset_debug_port(u32 id, u32 count)
348 void __iomem *ops_reg;
349 void __iomem *portsc;
353 cap_length = readl(xdbc.xhci_base) & 0xff;
354 ops_reg = xdbc.xhci_base + cap_length;
357 for (i = id; i < (id + count); i++) {
358 portsc = ops_reg + 0x400 + i * 0x10;
360 if (!(val & PORT_CONNECT))
361 writel(val | PORT_RESET, portsc);
365 static void xdbc_reset_debug_port(void)
367 u32 val, port_offset, port_count;
371 offset = xhci_find_next_ext_cap(xdbc.xhci_base, offset, XHCI_EXT_CAPS_PROTOCOL);
375 val = readl(xdbc.xhci_base + offset);
376 if (XHCI_EXT_PORT_MAJOR(val) != 0x3)
379 val = readl(xdbc.xhci_base + offset + 8);
380 port_offset = XHCI_EXT_PORT_OFF(val);
381 port_count = XHCI_EXT_PORT_COUNT(val);
383 xdbc_do_reset_debug_port(port_offset, port_count);
388 xdbc_queue_trb(struct xdbc_ring *ring, u32 field1, u32 field2, u32 field3, u32 field4)
390 struct xdbc_trb *trb, *link_trb;
393 trb->field[0] = cpu_to_le32(field1);
394 trb->field[1] = cpu_to_le32(field2);
395 trb->field[2] = cpu_to_le32(field3);
396 trb->field[3] = cpu_to_le32(field4);
399 if (ring->enqueue >= &ring->segment->trbs[TRBS_PER_SEGMENT - 1]) {
400 link_trb = ring->enqueue;
401 if (ring->cycle_state)
402 link_trb->field[3] |= cpu_to_le32(TRB_CYCLE);
404 link_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
406 ring->enqueue = ring->segment->trbs;
407 ring->cycle_state ^= 1;
411 static void xdbc_ring_doorbell(int target)
413 writel(DOOR_BELL_TARGET(target), &xdbc.xdbc_reg->doorbell);
416 static int xdbc_start(void)
421 ctrl = readl(&xdbc.xdbc_reg->control);
422 writel(ctrl | CTRL_DBC_ENABLE | CTRL_PORT_ENABLE, &xdbc.xdbc_reg->control);
423 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, CTRL_DBC_ENABLE, 100000, 100);
425 xdbc_trace("failed to initialize hardware\n");
429 /* Reset port to avoid bus hang: */
430 if (xdbc.vendor == PCI_VENDOR_ID_INTEL)
431 xdbc_reset_debug_port();
433 /* Wait for port connection: */
434 ret = handshake(&xdbc.xdbc_reg->portsc, PORTSC_CONN_STATUS, PORTSC_CONN_STATUS, 5000000, 100);
436 xdbc_trace("waiting for connection timed out\n");
440 /* Wait for debug device to be configured: */
441 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_RUN, CTRL_DBC_RUN, 5000000, 100);
443 xdbc_trace("waiting for device configuration timed out\n");
447 /* Check port number: */
448 status = readl(&xdbc.xdbc_reg->status);
449 if (!DCST_DEBUG_PORT(status)) {
450 xdbc_trace("invalid root hub port number\n");
454 xdbc.port_number = DCST_DEBUG_PORT(status);
456 xdbc_trace("DbC is running now, control 0x%08x port ID %d\n",
457 readl(&xdbc.xdbc_reg->control), xdbc.port_number);
462 static int xdbc_bulk_transfer(void *data, int size, bool read)
464 struct xdbc_ring *ring;
465 struct xdbc_trb *trb;
470 if (size > XDBC_MAX_PACKET) {
471 xdbc_trace("bad parameter, size %d\n", size);
475 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED) ||
476 !(xdbc.flags & XDBC_FLAGS_CONFIGURED) ||
477 (!read && (xdbc.flags & XDBC_FLAGS_OUT_STALL)) ||
478 (read && (xdbc.flags & XDBC_FLAGS_IN_STALL))) {
480 xdbc_trace("connection not ready, flags %08x\n", xdbc.flags);
484 ring = (read ? &xdbc.in_ring : &xdbc.out_ring);
486 cycle = ring->cycle_state;
487 length = TRB_LEN(size);
488 control = TRB_TYPE(TRB_NORMAL) | TRB_IOC;
491 control &= cpu_to_le32(~TRB_CYCLE);
493 control |= cpu_to_le32(TRB_CYCLE);
496 memset(xdbc.in_buf, 0, XDBC_MAX_PACKET);
498 xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
500 memset(xdbc.out_buf, 0, XDBC_MAX_PACKET);
501 memcpy(xdbc.out_buf, data, size);
503 xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
506 xdbc_queue_trb(ring, lower_32_bits(addr), upper_32_bits(addr), length, control);
509 * Add a barrier between writes of trb fields and flipping
514 trb->field[3] |= cpu_to_le32(cycle);
516 trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
518 xdbc_ring_doorbell(read ? IN_EP_DOORBELL : OUT_EP_DOORBELL);
523 static int xdbc_handle_external_reset(void)
528 writel(0, &xdbc.xdbc_reg->control);
529 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 10);
539 xdbc_trace("dbc recovered\n");
541 xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
543 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
548 xdbc_trace("failed to recover from external reset\n");
552 static int __init xdbc_early_setup(void)
556 writel(0, &xdbc.xdbc_reg->control);
557 ret = handshake(&xdbc.xdbc_reg->control, CTRL_DBC_ENABLE, 0, 100000, 100);
561 /* Allocate the table page: */
562 xdbc.table_base = xdbc_get_page(&xdbc.table_dma);
563 if (!xdbc.table_base)
566 /* Get and store the transfer buffer: */
567 xdbc.out_buf = xdbc_get_page(&xdbc.out_dma);
571 /* Allocate the event ring: */
572 ret = xdbc_alloc_ring(&xdbc.evt_seg, &xdbc.evt_ring);
576 /* Allocate IN/OUT endpoint transfer rings: */
577 ret = xdbc_alloc_ring(&xdbc.in_seg, &xdbc.in_ring);
581 ret = xdbc_alloc_ring(&xdbc.out_seg, &xdbc.out_ring);
589 writel(0, &xdbc.xdbc_reg->control);
593 xdbc.flags |= XDBC_FLAGS_INITIALIZED | XDBC_FLAGS_CONFIGURED;
595 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
600 int __init early_xdbc_parse_parameter(char *s)
602 unsigned long dbgp_num = 0;
603 u32 bus, dev, func, offset;
606 if (!early_pci_allowed())
609 if (strstr(s, "keep"))
610 early_console_keep = true;
615 if (*s && kstrtoul(s, 0, &dbgp_num))
618 pr_notice("dbgp_num: %lu\n", dbgp_num);
620 /* Locate the host controller: */
621 ret = xdbc_find_dbgp(dbgp_num, &bus, &dev, &func);
623 pr_notice("failed to locate xhci host\n");
627 xdbc.vendor = read_pci_config_16(bus, dev, func, PCI_VENDOR_ID);
628 xdbc.device = read_pci_config_16(bus, dev, func, PCI_DEVICE_ID);
633 /* Map the IO memory: */
634 xdbc.xhci_base = xdbc_map_pci_mmio(bus, dev, func);
638 /* Locate DbC registers: */
639 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
641 pr_notice("xhci host doesn't support debug capability\n");
642 early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
643 xdbc.xhci_base = NULL;
644 xdbc.xhci_length = 0;
648 xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
653 int __init early_xdbc_setup_hardware(void)
662 raw_spin_lock_init(&xdbc.lock);
664 ret = xdbc_early_setup();
666 pr_notice("failed to setup the connection to host\n");
668 xdbc_free_ring(&xdbc.evt_ring);
669 xdbc_free_ring(&xdbc.out_ring);
670 xdbc_free_ring(&xdbc.in_ring);
673 memblock_free(xdbc.table_dma, PAGE_SIZE);
676 memblock_free(xdbc.out_dma, PAGE_SIZE);
678 xdbc.table_base = NULL;
685 static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
689 port_reg = readl(&xdbc.xdbc_reg->portsc);
690 if (port_reg & PORTSC_CONN_CHANGE) {
691 xdbc_trace("connect status change event\n");
693 /* Check whether cable unplugged: */
694 if (!(port_reg & PORTSC_CONN_STATUS)) {
696 xdbc_trace("cable unplugged\n");
700 if (port_reg & PORTSC_RESET_CHANGE)
701 xdbc_trace("port reset change event\n");
703 if (port_reg & PORTSC_LINK_CHANGE)
704 xdbc_trace("port link status change event\n");
706 if (port_reg & PORTSC_CONFIG_CHANGE)
707 xdbc_trace("config error change\n");
709 /* Write back the value to clear RW1C bits: */
710 writel(port_reg, &xdbc.xdbc_reg->portsc);
713 static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
718 comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
719 ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
723 case COMP_SHORT_PACKET:
726 case COMP_BABBLE_DETECTED_ERROR:
727 case COMP_USB_TRANSACTION_ERROR:
728 case COMP_STALL_ERROR:
730 if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL)
731 xdbc.flags |= XDBC_FLAGS_OUT_STALL;
732 if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL)
733 xdbc.flags |= XDBC_FLAGS_IN_STALL;
735 xdbc_trace("endpoint %d stalled\n", ep_id);
739 if (ep_id == XDBC_EPID_IN || ep_id == XDBC_EPID_IN_INTEL) {
740 xdbc.flags &= ~XDBC_FLAGS_IN_PROCESS;
741 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
742 } else if (ep_id == XDBC_EPID_OUT || ep_id == XDBC_EPID_OUT_INTEL) {
743 xdbc.flags &= ~XDBC_FLAGS_OUT_PROCESS;
745 xdbc_trace("invalid endpoint id %d\n", ep_id);
749 static void xdbc_handle_events(void)
751 struct xdbc_trb *evt_trb;
752 bool update_erdp = false;
756 cmd = read_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND);
757 if (!(cmd & PCI_COMMAND_MASTER)) {
758 cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
759 write_pci_config_byte(xdbc.bus, xdbc.dev, xdbc.func, PCI_COMMAND, cmd);
762 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
765 /* Handle external reset events: */
766 reg = readl(&xdbc.xdbc_reg->control);
767 if (!(reg & CTRL_DBC_ENABLE)) {
768 if (xdbc_handle_external_reset()) {
769 xdbc_trace("failed to recover connection\n");
774 /* Handle configure-exit event: */
775 reg = readl(&xdbc.xdbc_reg->control);
776 if (reg & CTRL_DBC_RUN_CHANGE) {
777 writel(reg, &xdbc.xdbc_reg->control);
778 if (reg & CTRL_DBC_RUN)
779 xdbc.flags |= XDBC_FLAGS_CONFIGURED;
781 xdbc.flags &= ~XDBC_FLAGS_CONFIGURED;
784 /* Handle endpoint stall event: */
785 reg = readl(&xdbc.xdbc_reg->control);
786 if (reg & CTRL_HALT_IN_TR) {
787 xdbc.flags |= XDBC_FLAGS_IN_STALL;
789 xdbc.flags &= ~XDBC_FLAGS_IN_STALL;
790 if (!(xdbc.flags & XDBC_FLAGS_IN_PROCESS))
791 xdbc_bulk_transfer(NULL, XDBC_MAX_PACKET, true);
794 if (reg & CTRL_HALT_OUT_TR)
795 xdbc.flags |= XDBC_FLAGS_OUT_STALL;
797 xdbc.flags &= ~XDBC_FLAGS_OUT_STALL;
799 /* Handle the events in the event ring: */
800 evt_trb = xdbc.evt_ring.dequeue;
801 while ((le32_to_cpu(evt_trb->field[3]) & TRB_CYCLE) == xdbc.evt_ring.cycle_state) {
803 * Add a barrier between reading the cycle flag and any
804 * reads of the event's flags/data below:
808 switch ((le32_to_cpu(evt_trb->field[3]) & TRB_TYPE_BITMASK)) {
809 case TRB_TYPE(TRB_PORT_STATUS):
810 xdbc_handle_port_status(evt_trb);
812 case TRB_TYPE(TRB_TRANSFER):
813 xdbc_handle_tx_event(evt_trb);
819 ++(xdbc.evt_ring.dequeue);
820 if (xdbc.evt_ring.dequeue == &xdbc.evt_seg.trbs[TRBS_PER_SEGMENT]) {
821 xdbc.evt_ring.dequeue = xdbc.evt_seg.trbs;
822 xdbc.evt_ring.cycle_state ^= 1;
825 evt_trb = xdbc.evt_ring.dequeue;
829 /* Update event ring dequeue pointer: */
831 xdbc_write64(__pa(xdbc.evt_ring.dequeue), &xdbc.xdbc_reg->erdp);
834 static int xdbc_bulk_write(const char *bytes, int size)
836 int ret, timeout = 0;
841 if (!raw_spin_trylock_irqsave(&xdbc.lock, flags))
844 raw_spin_lock_irqsave(&xdbc.lock, flags);
847 xdbc_handle_events();
849 /* Check completion of the previous request: */
850 if ((xdbc.flags & XDBC_FLAGS_OUT_PROCESS) && (timeout < 2000000)) {
851 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
857 if (xdbc.flags & XDBC_FLAGS_OUT_PROCESS) {
858 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
859 xdbc_trace("previous transfer not completed yet\n");
864 ret = xdbc_bulk_transfer((void *)bytes, size, false);
865 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
870 static void early_xdbc_write(struct console *con, const char *str, u32 n)
872 static char buf[XDBC_MAX_PACKET];
878 memset(buf, 0, XDBC_MAX_PACKET);
880 for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) {
882 if (!use_cr && *str == '\n') {
896 ret = xdbc_bulk_write(buf, chunk);
898 xdbc_trace("missed message {%s}\n", buf);
903 static struct console early_xdbc_console = {
905 .write = early_xdbc_write,
906 .flags = CON_PRINTBUFFER,
910 void __init early_xdbc_register_console(void)
915 early_console = &early_xdbc_console;
916 if (early_console_keep)
917 early_console->flags &= ~CON_BOOT;
919 early_console->flags |= CON_BOOT;
920 register_console(early_console);
923 static void xdbc_unregister_console(void)
925 if (early_xdbc_console.flags & CON_ENABLED)
926 unregister_console(&early_xdbc_console);
929 static int xdbc_scrub_function(void *ptr)
934 raw_spin_lock_irqsave(&xdbc.lock, flags);
935 xdbc_handle_events();
937 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED)) {
938 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
942 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
943 schedule_timeout_interruptible(1);
946 xdbc_unregister_console();
947 writel(0, &xdbc.xdbc_reg->control);
948 xdbc_trace("dbc scrub function exits\n");
953 static int __init xdbc_init(void)
960 if (!(xdbc.flags & XDBC_FLAGS_INITIALIZED))
964 * It's time to shut down the DbC, so that the debug
965 * port can be reused by the host controller:
967 if (early_xdbc_console.index == -1 ||
968 (early_xdbc_console.flags & CON_BOOT)) {
969 xdbc_trace("hardware not used anymore\n");
973 base = ioremap(xdbc.xhci_start, xdbc.xhci_length);
975 xdbc_trace("failed to remap the io address\n");
980 raw_spin_lock_irqsave(&xdbc.lock, flags);
981 early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
982 xdbc.xhci_base = base;
983 offset = xhci_find_next_ext_cap(xdbc.xhci_base, 0, XHCI_EXT_CAPS_DEBUG);
984 xdbc.xdbc_reg = (struct xdbc_regs __iomem *)(xdbc.xhci_base + offset);
985 raw_spin_unlock_irqrestore(&xdbc.lock, flags);
987 kthread_run(xdbc_scrub_function, NULL, "%s", "xdbc");
992 xdbc_free_ring(&xdbc.evt_ring);
993 xdbc_free_ring(&xdbc.out_ring);
994 xdbc_free_ring(&xdbc.in_ring);
995 memblock_free(xdbc.table_dma, PAGE_SIZE);
996 memblock_free(xdbc.out_dma, PAGE_SIZE);
997 writel(0, &xdbc.xdbc_reg->control);
998 early_iounmap(xdbc.xhci_base, xdbc.xhci_length);
1002 subsys_initcall(xdbc_init);