1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2020, Intel Corporation
6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
9 #include <kunit/test.h>
10 #include <linux/idr.h>
15 static int __ida_init(struct kunit_resource *res, void *context)
17 struct ida *ida = context;
24 static void __ida_destroy(struct kunit_resource *res)
26 struct ida *ida = res->data;
31 static void kunit_ida_init(struct kunit *test, struct ida *ida)
33 kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
36 static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
37 u8 upstream_port, u8 max_port_number)
43 sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
47 sw->config.upstream_port_number = upstream_port;
48 sw->config.depth = tb_route_length(route);
49 sw->config.route_hi = upper_32_bits(route);
50 sw->config.route_lo = lower_32_bits(route);
51 sw->config.enabled = 0;
52 sw->config.max_port_number = max_port_number;
54 size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
55 sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
59 for (i = 0; i <= sw->config.max_port_number; i++) {
61 sw->ports[i].port = i;
62 sw->ports[i].config.port_number = i;
64 kunit_ida_init(test, &sw->ports[i].in_hopids);
65 kunit_ida_init(test, &sw->ports[i].out_hopids);
72 static struct tb_switch *alloc_host(struct kunit *test)
76 sw = alloc_switch(test, 0, 7, 13);
80 sw->config.vendor_id = 0x8086;
81 sw->config.device_id = 0x9a1b;
83 sw->ports[0].config.type = TB_TYPE_PORT;
84 sw->ports[0].config.max_in_hop_id = 7;
85 sw->ports[0].config.max_out_hop_id = 7;
87 sw->ports[1].config.type = TB_TYPE_PORT;
88 sw->ports[1].config.max_in_hop_id = 19;
89 sw->ports[1].config.max_out_hop_id = 19;
90 sw->ports[1].dual_link_port = &sw->ports[2];
92 sw->ports[2].config.type = TB_TYPE_PORT;
93 sw->ports[2].config.max_in_hop_id = 19;
94 sw->ports[2].config.max_out_hop_id = 19;
95 sw->ports[2].dual_link_port = &sw->ports[1];
96 sw->ports[2].link_nr = 1;
98 sw->ports[3].config.type = TB_TYPE_PORT;
99 sw->ports[3].config.max_in_hop_id = 19;
100 sw->ports[3].config.max_out_hop_id = 19;
101 sw->ports[3].dual_link_port = &sw->ports[4];
103 sw->ports[4].config.type = TB_TYPE_PORT;
104 sw->ports[4].config.max_in_hop_id = 19;
105 sw->ports[4].config.max_out_hop_id = 19;
106 sw->ports[4].dual_link_port = &sw->ports[3];
107 sw->ports[4].link_nr = 1;
109 sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
110 sw->ports[5].config.max_in_hop_id = 9;
111 sw->ports[5].config.max_out_hop_id = 9;
112 sw->ports[5].cap_adap = -1;
114 sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
115 sw->ports[6].config.max_in_hop_id = 9;
116 sw->ports[6].config.max_out_hop_id = 9;
117 sw->ports[6].cap_adap = -1;
119 sw->ports[7].config.type = TB_TYPE_NHI;
120 sw->ports[7].config.max_in_hop_id = 11;
121 sw->ports[7].config.max_out_hop_id = 11;
122 sw->ports[7].config.nfc_credits = 0x41800000;
124 sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
125 sw->ports[8].config.max_in_hop_id = 8;
126 sw->ports[8].config.max_out_hop_id = 8;
128 sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
129 sw->ports[9].config.max_in_hop_id = 8;
130 sw->ports[9].config.max_out_hop_id = 8;
132 sw->ports[10].disabled = true;
133 sw->ports[11].disabled = true;
135 sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
136 sw->ports[12].config.max_in_hop_id = 8;
137 sw->ports[12].config.max_out_hop_id = 8;
139 sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
140 sw->ports[13].config.max_in_hop_id = 8;
141 sw->ports[13].config.max_out_hop_id = 8;
146 static struct tb_switch *alloc_dev_default(struct kunit *test,
147 struct tb_switch *parent,
148 u64 route, bool bonded)
150 struct tb_port *port, *upstream_port;
151 struct tb_switch *sw;
153 sw = alloc_switch(test, route, 1, 19);
157 sw->config.vendor_id = 0x8086;
158 sw->config.device_id = 0x15ef;
160 sw->ports[0].config.type = TB_TYPE_PORT;
161 sw->ports[0].config.max_in_hop_id = 8;
162 sw->ports[0].config.max_out_hop_id = 8;
164 sw->ports[1].config.type = TB_TYPE_PORT;
165 sw->ports[1].config.max_in_hop_id = 19;
166 sw->ports[1].config.max_out_hop_id = 19;
167 sw->ports[1].dual_link_port = &sw->ports[2];
169 sw->ports[2].config.type = TB_TYPE_PORT;
170 sw->ports[2].config.max_in_hop_id = 19;
171 sw->ports[2].config.max_out_hop_id = 19;
172 sw->ports[2].dual_link_port = &sw->ports[1];
173 sw->ports[2].link_nr = 1;
175 sw->ports[3].config.type = TB_TYPE_PORT;
176 sw->ports[3].config.max_in_hop_id = 19;
177 sw->ports[3].config.max_out_hop_id = 19;
178 sw->ports[3].dual_link_port = &sw->ports[4];
180 sw->ports[4].config.type = TB_TYPE_PORT;
181 sw->ports[4].config.max_in_hop_id = 19;
182 sw->ports[4].config.max_out_hop_id = 19;
183 sw->ports[4].dual_link_port = &sw->ports[3];
184 sw->ports[4].link_nr = 1;
186 sw->ports[5].config.type = TB_TYPE_PORT;
187 sw->ports[5].config.max_in_hop_id = 19;
188 sw->ports[5].config.max_out_hop_id = 19;
189 sw->ports[5].dual_link_port = &sw->ports[6];
191 sw->ports[6].config.type = TB_TYPE_PORT;
192 sw->ports[6].config.max_in_hop_id = 19;
193 sw->ports[6].config.max_out_hop_id = 19;
194 sw->ports[6].dual_link_port = &sw->ports[5];
195 sw->ports[6].link_nr = 1;
197 sw->ports[7].config.type = TB_TYPE_PORT;
198 sw->ports[7].config.max_in_hop_id = 19;
199 sw->ports[7].config.max_out_hop_id = 19;
200 sw->ports[7].dual_link_port = &sw->ports[8];
202 sw->ports[8].config.type = TB_TYPE_PORT;
203 sw->ports[8].config.max_in_hop_id = 19;
204 sw->ports[8].config.max_out_hop_id = 19;
205 sw->ports[8].dual_link_port = &sw->ports[7];
206 sw->ports[8].link_nr = 1;
208 sw->ports[9].config.type = TB_TYPE_PCIE_UP;
209 sw->ports[9].config.max_in_hop_id = 8;
210 sw->ports[9].config.max_out_hop_id = 8;
212 sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
213 sw->ports[10].config.max_in_hop_id = 8;
214 sw->ports[10].config.max_out_hop_id = 8;
216 sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
217 sw->ports[11].config.max_in_hop_id = 8;
218 sw->ports[11].config.max_out_hop_id = 8;
220 sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
221 sw->ports[12].config.max_in_hop_id = 8;
222 sw->ports[12].config.max_out_hop_id = 8;
224 sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
225 sw->ports[13].config.max_in_hop_id = 9;
226 sw->ports[13].config.max_out_hop_id = 9;
227 sw->ports[13].cap_adap = -1;
229 sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
230 sw->ports[14].config.max_in_hop_id = 9;
231 sw->ports[14].config.max_out_hop_id = 9;
232 sw->ports[14].cap_adap = -1;
234 sw->ports[15].disabled = true;
236 sw->ports[16].config.type = TB_TYPE_USB3_UP;
237 sw->ports[16].config.max_in_hop_id = 8;
238 sw->ports[16].config.max_out_hop_id = 8;
240 sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
241 sw->ports[17].config.max_in_hop_id = 8;
242 sw->ports[17].config.max_out_hop_id = 8;
244 sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
245 sw->ports[18].config.max_in_hop_id = 8;
246 sw->ports[18].config.max_out_hop_id = 8;
248 sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
249 sw->ports[19].config.max_in_hop_id = 8;
250 sw->ports[19].config.max_out_hop_id = 8;
256 upstream_port = tb_upstream_port(sw);
257 port = tb_port_at(route, parent);
258 port->remote = upstream_port;
259 upstream_port->remote = port;
260 if (port->dual_link_port && upstream_port->dual_link_port) {
261 port->dual_link_port->remote = upstream_port->dual_link_port;
262 upstream_port->dual_link_port->remote = port->dual_link_port;
266 /* Bonding is used */
268 port->dual_link_port->bonded = true;
269 upstream_port->bonded = true;
270 upstream_port->dual_link_port->bonded = true;
276 static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
277 struct tb_switch *parent,
278 u64 route, bool bonded)
280 struct tb_switch *sw;
282 sw = alloc_dev_default(test, parent, route, bonded);
286 sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
287 sw->ports[13].config.max_in_hop_id = 9;
288 sw->ports[13].config.max_out_hop_id = 9;
290 sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
291 sw->ports[14].config.max_in_hop_id = 9;
292 sw->ports[14].config.max_out_hop_id = 9;
297 static void tb_test_path_basic(struct kunit *test)
299 struct tb_port *src_port, *dst_port, *p;
300 struct tb_switch *host;
302 host = alloc_host(test);
304 src_port = &host->ports[5];
307 p = tb_next_port_on_path(src_port, dst_port, NULL);
308 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
310 p = tb_next_port_on_path(src_port, dst_port, p);
311 KUNIT_EXPECT_TRUE(test, !p);
314 static void tb_test_path_not_connected_walk(struct kunit *test)
316 struct tb_port *src_port, *dst_port, *p;
317 struct tb_switch *host, *dev;
319 host = alloc_host(test);
320 /* No connection between host and dev */
321 dev = alloc_dev_default(test, NULL, 3, true);
323 src_port = &host->ports[12];
324 dst_port = &dev->ports[16];
326 p = tb_next_port_on_path(src_port, dst_port, NULL);
327 KUNIT_EXPECT_PTR_EQ(test, p, src_port);
329 p = tb_next_port_on_path(src_port, dst_port, p);
330 KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
332 p = tb_next_port_on_path(src_port, dst_port, p);
333 KUNIT_EXPECT_TRUE(test, !p);
335 /* Other direction */
337 p = tb_next_port_on_path(dst_port, src_port, NULL);
338 KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
340 p = tb_next_port_on_path(dst_port, src_port, p);
341 KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
343 p = tb_next_port_on_path(dst_port, src_port, p);
344 KUNIT_EXPECT_TRUE(test, !p);
347 struct port_expectation {
350 enum tb_port_type type;
353 static void tb_test_path_single_hop_walk(struct kunit *test)
356 * Walks from Host PCIe downstream port to Device #1 PCIe
364 static const struct port_expectation test_data[] = {
365 { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
366 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
367 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
368 { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
370 struct tb_port *src_port, *dst_port, *p;
371 struct tb_switch *host, *dev;
374 host = alloc_host(test);
375 dev = alloc_dev_default(test, host, 1, true);
377 src_port = &host->ports[8];
378 dst_port = &dev->ports[9];
380 /* Walk both directions */
383 tb_for_each_port_on_path(src_port, dst_port, p) {
384 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
385 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
386 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
387 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
392 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
394 i = ARRAY_SIZE(test_data) - 1;
395 tb_for_each_port_on_path(dst_port, src_port, p) {
396 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
397 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
398 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
399 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
404 KUNIT_EXPECT_EQ(test, i, -1);
407 static void tb_test_path_daisy_chain_walk(struct kunit *test)
410 * Walks from Host DP IN to Device #2 DP OUT.
420 static const struct port_expectation test_data[] = {
421 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
422 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
423 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
424 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
425 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
426 { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
428 struct tb_port *src_port, *dst_port, *p;
429 struct tb_switch *host, *dev1, *dev2;
432 host = alloc_host(test);
433 dev1 = alloc_dev_default(test, host, 0x1, true);
434 dev2 = alloc_dev_default(test, dev1, 0x301, true);
436 src_port = &host->ports[5];
437 dst_port = &dev2->ports[13];
439 /* Walk both directions */
442 tb_for_each_port_on_path(src_port, dst_port, p) {
443 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
444 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
445 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
446 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
451 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
453 i = ARRAY_SIZE(test_data) - 1;
454 tb_for_each_port_on_path(dst_port, src_port, p) {
455 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
456 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
457 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
458 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
463 KUNIT_EXPECT_EQ(test, i, -1);
466 static void tb_test_path_simple_tree_walk(struct kunit *test)
469 * Walks from Host DP IN to Device #3 DP OUT.
477 * [Device #2] | [Device #4]
481 static const struct port_expectation test_data[] = {
482 { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
483 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
484 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
485 { .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
486 { .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
487 { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
489 struct tb_port *src_port, *dst_port, *p;
490 struct tb_switch *host, *dev1, *dev3;
493 host = alloc_host(test);
494 dev1 = alloc_dev_default(test, host, 0x1, true);
495 alloc_dev_default(test, dev1, 0x301, true);
496 dev3 = alloc_dev_default(test, dev1, 0x501, true);
497 alloc_dev_default(test, dev1, 0x701, true);
499 src_port = &host->ports[5];
500 dst_port = &dev3->ports[13];
502 /* Walk both directions */
505 tb_for_each_port_on_path(src_port, dst_port, p) {
506 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
507 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
508 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
509 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
514 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
516 i = ARRAY_SIZE(test_data) - 1;
517 tb_for_each_port_on_path(dst_port, src_port, p) {
518 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
519 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
520 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
521 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
526 KUNIT_EXPECT_EQ(test, i, -1);
529 static void tb_test_path_complex_tree_walk(struct kunit *test)
532 * Walks from Device #3 DP IN to Device #9 DP OUT.
540 * [Device #2] | [Device #5]
542 * 1 | [Device #4] \ 1
543 * [Device #3] [Device #6]
552 static const struct port_expectation test_data[] = {
553 { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
554 { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
555 { .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
556 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
557 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
558 { .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
559 { .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
560 { .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
561 { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
562 { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
563 { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
564 { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
565 { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
566 { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
568 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
569 struct tb_port *src_port, *dst_port, *p;
572 host = alloc_host(test);
573 dev1 = alloc_dev_default(test, host, 0x1, true);
574 dev2 = alloc_dev_default(test, dev1, 0x301, true);
575 dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
576 alloc_dev_default(test, dev1, 0x501, true);
577 dev5 = alloc_dev_default(test, dev1, 0x701, true);
578 dev6 = alloc_dev_default(test, dev5, 0x70701, true);
579 dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
580 alloc_dev_default(test, dev7, 0x303070701, true);
581 dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
583 src_port = &dev3->ports[13];
584 dst_port = &dev9->ports[14];
586 /* Walk both directions */
589 tb_for_each_port_on_path(src_port, dst_port, p) {
590 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
591 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
592 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
593 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
598 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
600 i = ARRAY_SIZE(test_data) - 1;
601 tb_for_each_port_on_path(dst_port, src_port, p) {
602 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
603 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
604 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
605 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
610 KUNIT_EXPECT_EQ(test, i, -1);
613 static void tb_test_path_max_length_walk(struct kunit *test)
615 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
616 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
617 struct tb_port *src_port, *dst_port, *p;
621 * Walks from Device #6 DP IN to Device #12 DP OUT.
626 * [Device #1] [Device #7]
629 * [Device #2] [Device #8]
632 * [Device #3] [Device #9]
635 * [Device #4] [Device #10]
638 * [Device #5] [Device #11]
641 * [Device #6] [Device #12]
643 static const struct port_expectation test_data[] = {
644 { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
645 { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
646 { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
647 { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
648 { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
649 { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
650 { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
651 { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
652 { .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
653 { .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
654 { .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
655 { .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
656 { .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
657 { .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
658 { .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
659 { .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
660 { .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
661 { .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
662 { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
663 { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
664 { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
665 { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
666 { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
667 { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
668 { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
669 { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
672 host = alloc_host(test);
673 dev1 = alloc_dev_default(test, host, 0x1, true);
674 dev2 = alloc_dev_default(test, dev1, 0x301, true);
675 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
676 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
677 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
678 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
679 dev7 = alloc_dev_default(test, host, 0x3, true);
680 dev8 = alloc_dev_default(test, dev7, 0x303, true);
681 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
682 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
683 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
684 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
686 src_port = &dev6->ports[13];
687 dst_port = &dev12->ports[13];
689 /* Walk both directions */
692 tb_for_each_port_on_path(src_port, dst_port, p) {
693 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
694 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
695 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
696 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
701 KUNIT_EXPECT_EQ(test, i, (int)ARRAY_SIZE(test_data));
703 i = ARRAY_SIZE(test_data) - 1;
704 tb_for_each_port_on_path(dst_port, src_port, p) {
705 KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
706 KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
707 KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
708 KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
713 KUNIT_EXPECT_EQ(test, i, -1);
716 static void tb_test_path_not_connected(struct kunit *test)
718 struct tb_switch *host, *dev1, *dev2;
719 struct tb_port *down, *up;
720 struct tb_path *path;
722 host = alloc_host(test);
723 dev1 = alloc_dev_default(test, host, 0x3, false);
724 /* Not connected to anything */
725 dev2 = alloc_dev_default(test, NULL, 0x303, false);
727 down = &dev1->ports[10];
728 up = &dev2->ports[9];
730 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
731 KUNIT_ASSERT_TRUE(test, path == NULL);
732 path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
733 KUNIT_ASSERT_TRUE(test, path == NULL);
736 struct hop_expectation {
739 enum tb_port_type in_type;
741 enum tb_port_type out_type;
744 static void tb_test_path_not_bonded_lane0(struct kunit *test)
747 * PCIe path from host to device using lane 0.
754 static const struct hop_expectation test_data[] = {
758 .in_type = TB_TYPE_PCIE_DOWN,
760 .out_type = TB_TYPE_PORT,
765 .in_type = TB_TYPE_PORT,
767 .out_type = TB_TYPE_PCIE_UP,
770 struct tb_switch *host, *dev;
771 struct tb_port *down, *up;
772 struct tb_path *path;
775 host = alloc_host(test);
776 dev = alloc_dev_default(test, host, 0x3, false);
778 down = &host->ports[9];
781 path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
782 KUNIT_ASSERT_TRUE(test, path != NULL);
783 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
784 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
785 const struct tb_port *in_port, *out_port;
787 in_port = path->hops[i].in_port;
788 out_port = path->hops[i].out_port;
790 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
791 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
792 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
793 test_data[i].in_type);
794 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
795 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
796 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
797 test_data[i].out_type);
802 static void tb_test_path_not_bonded_lane1(struct kunit *test)
805 * DP Video path from host to device using lane 1. Paths like
806 * these are only used with Thunderbolt 1 devices where lane
807 * bonding is not possible. USB4 specifically does not allow
808 * paths like this (you either use lane 0 where lane 1 is
809 * disabled or both lanes are bonded).
816 static const struct hop_expectation test_data[] = {
820 .in_type = TB_TYPE_DP_HDMI_IN,
822 .out_type = TB_TYPE_PORT,
827 .in_type = TB_TYPE_PORT,
829 .out_type = TB_TYPE_DP_HDMI_OUT,
832 struct tb_switch *host, *dev;
833 struct tb_port *in, *out;
834 struct tb_path *path;
837 host = alloc_host(test);
838 dev = alloc_dev_default(test, host, 0x1, false);
840 in = &host->ports[5];
841 out = &dev->ports[13];
843 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
844 KUNIT_ASSERT_TRUE(test, path != NULL);
845 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
846 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
847 const struct tb_port *in_port, *out_port;
849 in_port = path->hops[i].in_port;
850 out_port = path->hops[i].out_port;
852 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
853 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
854 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
855 test_data[i].in_type);
856 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
857 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
858 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
859 test_data[i].out_type);
864 static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
867 * DP Video path from host to device 3 using lane 1.
880 static const struct hop_expectation test_data[] = {
884 .in_type = TB_TYPE_DP_HDMI_IN,
886 .out_type = TB_TYPE_PORT,
891 .in_type = TB_TYPE_PORT,
893 .out_type = TB_TYPE_PORT,
898 .in_type = TB_TYPE_PORT,
900 .out_type = TB_TYPE_PORT,
905 .in_type = TB_TYPE_PORT,
907 .out_type = TB_TYPE_DP_HDMI_OUT,
910 struct tb_switch *host, *dev1, *dev2, *dev3;
911 struct tb_port *in, *out;
912 struct tb_path *path;
915 host = alloc_host(test);
916 dev1 = alloc_dev_default(test, host, 0x1, false);
917 dev2 = alloc_dev_default(test, dev1, 0x701, false);
918 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
920 in = &host->ports[5];
921 out = &dev3->ports[13];
923 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
924 KUNIT_ASSERT_TRUE(test, path != NULL);
925 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
926 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
927 const struct tb_port *in_port, *out_port;
929 in_port = path->hops[i].in_port;
930 out_port = path->hops[i].out_port;
932 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
933 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
934 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
935 test_data[i].in_type);
936 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
937 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
938 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
939 test_data[i].out_type);
944 static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
947 * DP Video path from device 3 to host using lane 1.
960 static const struct hop_expectation test_data[] = {
964 .in_type = TB_TYPE_DP_HDMI_IN,
966 .out_type = TB_TYPE_PORT,
971 .in_type = TB_TYPE_PORT,
973 .out_type = TB_TYPE_PORT,
978 .in_type = TB_TYPE_PORT,
980 .out_type = TB_TYPE_PORT,
985 .in_type = TB_TYPE_PORT,
987 .out_type = TB_TYPE_DP_HDMI_IN,
990 struct tb_switch *host, *dev1, *dev2, *dev3;
991 struct tb_port *in, *out;
992 struct tb_path *path;
995 host = alloc_host(test);
996 dev1 = alloc_dev_default(test, host, 0x1, false);
997 dev2 = alloc_dev_default(test, dev1, 0x701, false);
998 dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
1000 in = &dev3->ports[13];
1001 out = &host->ports[5];
1003 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1004 KUNIT_ASSERT_TRUE(test, path != NULL);
1005 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1006 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1007 const struct tb_port *in_port, *out_port;
1009 in_port = path->hops[i].in_port;
1010 out_port = path->hops[i].out_port;
1012 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1013 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1014 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1015 test_data[i].in_type);
1016 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1017 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1018 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1019 test_data[i].out_type);
1024 static void tb_test_path_mixed_chain(struct kunit *test)
1027 * DP Video path from host to device 4 where first and last link
1044 static const struct hop_expectation test_data[] = {
1048 .in_type = TB_TYPE_DP_HDMI_IN,
1050 .out_type = TB_TYPE_PORT,
1055 .in_type = TB_TYPE_PORT,
1057 .out_type = TB_TYPE_PORT,
1062 .in_type = TB_TYPE_PORT,
1064 .out_type = TB_TYPE_PORT,
1069 .in_type = TB_TYPE_PORT,
1071 .out_type = TB_TYPE_PORT,
1076 .in_type = TB_TYPE_PORT,
1078 .out_type = TB_TYPE_DP_HDMI_OUT,
1081 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1082 struct tb_port *in, *out;
1083 struct tb_path *path;
1086 host = alloc_host(test);
1087 dev1 = alloc_dev_default(test, host, 0x1, true);
1088 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1089 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1090 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1092 in = &host->ports[5];
1093 out = &dev4->ports[13];
1095 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1096 KUNIT_ASSERT_TRUE(test, path != NULL);
1097 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1098 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1099 const struct tb_port *in_port, *out_port;
1101 in_port = path->hops[i].in_port;
1102 out_port = path->hops[i].out_port;
1104 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1105 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1106 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1107 test_data[i].in_type);
1108 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1109 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1110 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1111 test_data[i].out_type);
1116 static void tb_test_path_mixed_chain_reverse(struct kunit *test)
1119 * DP Video path from device 4 to host where first and last link
1136 static const struct hop_expectation test_data[] = {
1140 .in_type = TB_TYPE_DP_HDMI_OUT,
1142 .out_type = TB_TYPE_PORT,
1147 .in_type = TB_TYPE_PORT,
1149 .out_type = TB_TYPE_PORT,
1154 .in_type = TB_TYPE_PORT,
1156 .out_type = TB_TYPE_PORT,
1161 .in_type = TB_TYPE_PORT,
1163 .out_type = TB_TYPE_PORT,
1168 .in_type = TB_TYPE_PORT,
1170 .out_type = TB_TYPE_DP_HDMI_IN,
1173 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
1174 struct tb_port *in, *out;
1175 struct tb_path *path;
1178 host = alloc_host(test);
1179 dev1 = alloc_dev_default(test, host, 0x1, true);
1180 dev2 = alloc_dev_default(test, dev1, 0x701, false);
1181 dev3 = alloc_dev_default(test, dev2, 0x50701, false);
1182 dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
1184 in = &dev4->ports[13];
1185 out = &host->ports[5];
1187 path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
1188 KUNIT_ASSERT_TRUE(test, path != NULL);
1189 KUNIT_ASSERT_EQ(test, path->path_length, (int)ARRAY_SIZE(test_data));
1190 for (i = 0; i < ARRAY_SIZE(test_data); i++) {
1191 const struct tb_port *in_port, *out_port;
1193 in_port = path->hops[i].in_port;
1194 out_port = path->hops[i].out_port;
1196 KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
1197 KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
1198 KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
1199 test_data[i].in_type);
1200 KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
1201 KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
1202 KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
1203 test_data[i].out_type);
1208 static void tb_test_tunnel_pcie(struct kunit *test)
1210 struct tb_switch *host, *dev1, *dev2;
1211 struct tb_tunnel *tunnel1, *tunnel2;
1212 struct tb_port *down, *up;
1215 * Create PCIe tunnel between host and two devices.
1225 host = alloc_host(test);
1226 dev1 = alloc_dev_default(test, host, 0x1, true);
1227 dev2 = alloc_dev_default(test, dev1, 0x501, true);
1229 down = &host->ports[8];
1230 up = &dev1->ports[9];
1231 tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
1232 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1233 KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
1234 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1235 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1236 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
1237 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1238 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1239 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1240 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1241 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1242 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1244 down = &dev1->ports[10];
1245 up = &dev2->ports[9];
1246 tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
1247 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1248 KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_PCI);
1249 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1250 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1251 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
1252 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1253 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1254 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1255 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1256 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1257 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1259 tb_tunnel_free(tunnel2);
1260 tb_tunnel_free(tunnel1);
1263 static void tb_test_tunnel_dp(struct kunit *test)
1265 struct tb_switch *host, *dev;
1266 struct tb_port *in, *out;
1267 struct tb_tunnel *tunnel;
1270 * Create DP tunnel between Host and Device
1277 host = alloc_host(test);
1278 dev = alloc_dev_default(test, host, 0x3, true);
1280 in = &host->ports[5];
1281 out = &dev->ports[13];
1283 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1284 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1285 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1286 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1287 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1288 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1289 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
1290 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1291 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
1292 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
1293 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1294 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
1295 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
1296 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1297 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
1298 tb_tunnel_free(tunnel);
1301 static void tb_test_tunnel_dp_chain(struct kunit *test)
1303 struct tb_switch *host, *dev1, *dev4;
1304 struct tb_port *in, *out;
1305 struct tb_tunnel *tunnel;
1308 * Create DP tunnel from Host DP IN to Device #4 DP OUT.
1316 * [Device #2] | [Device #4]
1320 host = alloc_host(test);
1321 dev1 = alloc_dev_default(test, host, 0x1, true);
1322 alloc_dev_default(test, dev1, 0x301, true);
1323 alloc_dev_default(test, dev1, 0x501, true);
1324 dev4 = alloc_dev_default(test, dev1, 0x701, true);
1326 in = &host->ports[5];
1327 out = &dev4->ports[14];
1329 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1330 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1331 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1332 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1333 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1334 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1335 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1336 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1337 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
1338 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1339 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1340 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
1341 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
1342 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1343 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
1344 tb_tunnel_free(tunnel);
1347 static void tb_test_tunnel_dp_tree(struct kunit *test)
1349 struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
1350 struct tb_port *in, *out;
1351 struct tb_tunnel *tunnel;
1354 * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
1362 * [Device #2] | [Device #4]
1369 host = alloc_host(test);
1370 dev1 = alloc_dev_default(test, host, 0x3, true);
1371 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1372 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1373 alloc_dev_default(test, dev1, 0x703, true);
1374 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1376 in = &dev2->ports[13];
1377 out = &dev5->ports[13];
1379 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1380 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1381 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1382 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1383 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1384 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1385 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
1386 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1387 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
1388 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
1389 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1390 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
1391 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
1392 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1393 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
1394 tb_tunnel_free(tunnel);
1397 static void tb_test_tunnel_dp_max_length(struct kunit *test)
1399 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
1400 struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
1401 struct tb_port *in, *out;
1402 struct tb_tunnel *tunnel;
1405 * Creates DP tunnel from Device #6 to Device #12.
1410 * [Device #1] [Device #7]
1413 * [Device #2] [Device #8]
1416 * [Device #3] [Device #9]
1419 * [Device #4] [Device #10]
1422 * [Device #5] [Device #11]
1425 * [Device #6] [Device #12]
1427 host = alloc_host(test);
1428 dev1 = alloc_dev_default(test, host, 0x1, true);
1429 dev2 = alloc_dev_default(test, dev1, 0x301, true);
1430 dev3 = alloc_dev_default(test, dev2, 0x30301, true);
1431 dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
1432 dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
1433 dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
1434 dev7 = alloc_dev_default(test, host, 0x3, true);
1435 dev8 = alloc_dev_default(test, dev7, 0x303, true);
1436 dev9 = alloc_dev_default(test, dev8, 0x30303, true);
1437 dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
1438 dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
1439 dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
1441 in = &dev6->ports[13];
1442 out = &dev12->ports[13];
1444 tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1445 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1446 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DP);
1447 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
1448 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
1449 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
1450 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
1452 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
1454 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
1456 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
1459 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
1460 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
1461 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
1462 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
1464 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
1466 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
1467 KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
1468 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
1469 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
1471 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
1473 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
1474 tb_tunnel_free(tunnel);
1477 static void tb_test_tunnel_usb3(struct kunit *test)
1479 struct tb_switch *host, *dev1, *dev2;
1480 struct tb_tunnel *tunnel1, *tunnel2;
1481 struct tb_port *down, *up;
1484 * Create USB3 tunnel between host and two devices.
1494 host = alloc_host(test);
1495 dev1 = alloc_dev_default(test, host, 0x1, true);
1496 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1498 down = &host->ports[12];
1499 up = &dev1->ports[16];
1500 tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1501 KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
1502 KUNIT_EXPECT_EQ(test, tunnel1->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
1503 KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
1504 KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
1505 KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
1506 KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
1507 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
1508 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
1509 KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
1510 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
1511 KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
1513 down = &dev1->ports[17];
1514 up = &dev2->ports[16];
1515 tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
1516 KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
1517 KUNIT_EXPECT_EQ(test, tunnel2->type, (enum tb_tunnel_type)TB_TUNNEL_USB3);
1518 KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
1519 KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
1520 KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
1521 KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
1522 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
1523 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
1524 KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
1525 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
1526 KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
1528 tb_tunnel_free(tunnel2);
1529 tb_tunnel_free(tunnel1);
1532 static void tb_test_tunnel_port_on_path(struct kunit *test)
1534 struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
1535 struct tb_port *in, *out, *port;
1536 struct tb_tunnel *dp_tunnel;
1545 * [Device #2] | [Device #4]
1552 host = alloc_host(test);
1553 dev1 = alloc_dev_default(test, host, 0x3, true);
1554 dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
1555 dev3 = alloc_dev_default(test, dev1, 0x503, true);
1556 dev4 = alloc_dev_default(test, dev1, 0x703, true);
1557 dev5 = alloc_dev_default(test, dev3, 0x50503, true);
1559 in = &dev2->ports[13];
1560 out = &dev5->ports[13];
1562 dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 0, 0);
1563 KUNIT_ASSERT_TRUE(test, dp_tunnel != NULL);
1565 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
1566 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
1568 port = &host->ports[8];
1569 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1571 port = &host->ports[3];
1572 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1574 port = &dev1->ports[1];
1575 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1577 port = &dev1->ports[3];
1578 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1580 port = &dev1->ports[5];
1581 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1583 port = &dev1->ports[7];
1584 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1586 port = &dev3->ports[1];
1587 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1589 port = &dev5->ports[1];
1590 KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1592 port = &dev4->ports[1];
1593 KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
1595 tb_tunnel_free(dp_tunnel);
1598 static void tb_test_tunnel_dma(struct kunit *test)
1600 struct tb_port *nhi, *port;
1601 struct tb_tunnel *tunnel;
1602 struct tb_switch *host;
1605 * Create DMA tunnel from NHI to port 1 and back.
1608 * 1 ^ In HopID 1 -> Out HopID 8
1610 * v In HopID 8 -> Out HopID 1
1611 * ............ Domain border
1615 host = alloc_host(test);
1616 nhi = &host->ports[7];
1617 port = &host->ports[1];
1619 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1620 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1621 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
1622 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1623 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1624 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1626 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1627 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1628 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1629 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1630 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
1632 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
1633 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1634 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1635 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
1636 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
1638 tb_tunnel_free(tunnel);
1641 static void tb_test_tunnel_dma_rx(struct kunit *test)
1643 struct tb_port *nhi, *port;
1644 struct tb_tunnel *tunnel;
1645 struct tb_switch *host;
1648 * Create DMA RX tunnel from port 1 to NHI.
1653 * | In HopID 15 -> Out HopID 2
1654 * ............ Domain border
1658 host = alloc_host(test);
1659 nhi = &host->ports[7];
1660 port = &host->ports[1];
1662 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
1663 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1664 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
1665 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1666 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1667 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
1669 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1670 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1671 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
1672 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
1673 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
1675 tb_tunnel_free(tunnel);
1678 static void tb_test_tunnel_dma_tx(struct kunit *test)
1680 struct tb_port *nhi, *port;
1681 struct tb_tunnel *tunnel;
1682 struct tb_switch *host;
1685 * Create DMA TX tunnel from NHI to port 1.
1688 * 1 | In HopID 2 -> Out HopID 15
1691 * ............ Domain border
1695 host = alloc_host(test);
1696 nhi = &host->ports[7];
1697 port = &host->ports[1];
1699 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
1700 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1701 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
1702 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1703 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1704 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)1);
1706 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
1707 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
1708 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
1709 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
1710 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
1712 tb_tunnel_free(tunnel);
1715 static void tb_test_tunnel_dma_chain(struct kunit *test)
1717 struct tb_switch *host, *dev1, *dev2;
1718 struct tb_port *nhi, *port;
1719 struct tb_tunnel *tunnel;
1722 * Create DMA tunnel from NHI to Device #2 port 3 and back.
1725 * 1 ^ In HopID 1 -> Out HopID x
1727 * 1 | In HopID x -> Out HopID 1
1732 * 3 | In HopID x -> Out HopID 8
1734 * v In HopID 8 -> Out HopID x
1735 * ............ Domain border
1739 host = alloc_host(test);
1740 dev1 = alloc_dev_default(test, host, 0x1, true);
1741 dev2 = alloc_dev_default(test, dev1, 0x701, true);
1743 nhi = &host->ports[7];
1744 port = &dev2->ports[3];
1745 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
1746 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1747 KUNIT_EXPECT_EQ(test, tunnel->type, (enum tb_tunnel_type)TB_TUNNEL_DMA);
1748 KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
1749 KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
1750 KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
1752 KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
1753 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
1754 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
1755 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
1757 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
1759 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
1761 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
1763 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
1764 KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
1766 KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
1767 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
1768 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
1769 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
1771 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
1773 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
1775 KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
1776 KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
1778 tb_tunnel_free(tunnel);
1781 static void tb_test_tunnel_dma_match(struct kunit *test)
1783 struct tb_port *nhi, *port;
1784 struct tb_tunnel *tunnel;
1785 struct tb_switch *host;
1787 host = alloc_host(test);
1788 nhi = &host->ports[7];
1789 port = &host->ports[1];
1791 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
1792 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1794 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1795 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
1796 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1797 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1798 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1799 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1800 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1801 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
1802 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1803 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
1805 tb_tunnel_free(tunnel);
1807 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
1808 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1809 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
1810 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
1811 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
1812 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1813 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
1814 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1815 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1817 tb_tunnel_free(tunnel);
1819 tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
1820 KUNIT_ASSERT_TRUE(test, tunnel != NULL);
1821 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
1822 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
1823 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
1824 KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
1825 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
1826 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
1827 KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
1829 tb_tunnel_free(tunnel);
1832 static const u32 root_directory[] = {
1833 0x55584401, /* "UXD" v1 */
1834 0x00000018, /* Root directory length */
1835 0x76656e64, /* "vend" */
1836 0x6f726964, /* "orid" */
1837 0x76000001, /* "v" R 1 */
1838 0x00000a27, /* Immediate value, ! Vendor ID */
1839 0x76656e64, /* "vend" */
1840 0x6f726964, /* "orid" */
1841 0x74000003, /* "t" R 3 */
1842 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
1843 0x64657669, /* "devi" */
1844 0x63656964, /* "ceid" */
1845 0x76000001, /* "v" R 1 */
1846 0x0000000a, /* Immediate value, ! Device ID */
1847 0x64657669, /* "devi" */
1848 0x63656964, /* "ceid" */
1849 0x74000003, /* "t" R 3 */
1850 0x0000001d, /* Text leaf offset, (“Macintosh”) */
1851 0x64657669, /* "devi" */
1852 0x63657276, /* "cerv" */
1853 0x76000001, /* "v" R 1 */
1854 0x80000100, /* Immediate value, Device Revision */
1855 0x6e657477, /* "netw" */
1856 0x6f726b00, /* "ork" */
1857 0x44000014, /* "D" R 20 */
1858 0x00000021, /* Directory data offset, (Network Directory) */
1859 0x4170706c, /* "Appl" */
1860 0x6520496e, /* "e In" */
1861 0x632e0000, /* "c." ! */
1862 0x4d616369, /* "Maci" */
1863 0x6e746f73, /* "ntos" */
1864 0x68000000, /* "h" */
1865 0x00000000, /* padding */
1866 0xca8961c6, /* Directory UUID, Network Directory */
1867 0x9541ce1c, /* Directory UUID, Network Directory */
1868 0x5949b8bd, /* Directory UUID, Network Directory */
1869 0x4f5a5f2e, /* Directory UUID, Network Directory */
1870 0x70727463, /* "prtc" */
1871 0x69640000, /* "id" */
1872 0x76000001, /* "v" R 1 */
1873 0x00000001, /* Immediate value, Network Protocol ID */
1874 0x70727463, /* "prtc" */
1875 0x76657273, /* "vers" */
1876 0x76000001, /* "v" R 1 */
1877 0x00000001, /* Immediate value, Network Protocol Version */
1878 0x70727463, /* "prtc" */
1879 0x72657673, /* "revs" */
1880 0x76000001, /* "v" R 1 */
1881 0x00000001, /* Immediate value, Network Protocol Revision */
1882 0x70727463, /* "prtc" */
1883 0x73746e73, /* "stns" */
1884 0x76000001, /* "v" R 1 */
1885 0x00000000, /* Immediate value, Network Protocol Settings */
1888 static const uuid_t network_dir_uuid =
1889 UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
1890 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
1892 static void tb_test_property_parse(struct kunit *test)
1894 struct tb_property_dir *dir, *network_dir;
1895 struct tb_property *p;
1897 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
1898 KUNIT_ASSERT_TRUE(test, dir != NULL);
1900 p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
1901 KUNIT_ASSERT_TRUE(test, !p);
1903 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
1904 KUNIT_ASSERT_TRUE(test, p != NULL);
1905 KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
1907 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
1908 KUNIT_ASSERT_TRUE(test, p != NULL);
1909 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0xa27);
1911 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
1912 KUNIT_ASSERT_TRUE(test, p != NULL);
1913 KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
1915 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
1916 KUNIT_ASSERT_TRUE(test, p != NULL);
1917 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0xa);
1919 p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
1920 KUNIT_ASSERT_TRUE(test, !p);
1922 p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
1923 KUNIT_ASSERT_TRUE(test, p != NULL);
1925 network_dir = p->value.dir;
1926 KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
1928 p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
1929 KUNIT_ASSERT_TRUE(test, p != NULL);
1930 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
1932 p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
1933 KUNIT_ASSERT_TRUE(test, p != NULL);
1934 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
1936 p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
1937 KUNIT_ASSERT_TRUE(test, p != NULL);
1938 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x1);
1940 p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
1941 KUNIT_ASSERT_TRUE(test, p != NULL);
1942 KUNIT_EXPECT_EQ(test, p->value.immediate, (u32)0x0);
1944 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
1945 KUNIT_EXPECT_TRUE(test, !p);
1946 p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
1947 KUNIT_EXPECT_TRUE(test, !p);
1949 tb_property_free_dir(dir);
1952 static void tb_test_property_format(struct kunit *test)
1954 struct tb_property_dir *dir;
1959 dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
1960 KUNIT_ASSERT_TRUE(test, dir != NULL);
1962 ret = tb_property_format_dir(dir, NULL, 0);
1963 KUNIT_ASSERT_EQ(test, ret, (int)ARRAY_SIZE(root_directory));
1967 block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
1968 KUNIT_ASSERT_TRUE(test, block != NULL);
1970 ret = tb_property_format_dir(dir, block, block_len);
1971 KUNIT_EXPECT_EQ(test, ret, 0);
1973 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
1974 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
1976 tb_property_free_dir(dir);
1979 static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
1980 struct tb_property_dir *d2)
1982 struct tb_property *p1, *p2, *tmp;
1986 KUNIT_ASSERT_TRUE(test, d2->uuid != NULL);
1987 KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
1989 KUNIT_ASSERT_TRUE(test, d2->uuid == NULL);
1993 tb_property_for_each(d1, tmp)
1995 KUNIT_ASSERT_NE(test, n1, 0);
1998 tb_property_for_each(d2, tmp)
2000 KUNIT_ASSERT_NE(test, n2, 0);
2002 KUNIT_ASSERT_EQ(test, n1, n2);
2006 for (i = 0; i < n1; i++) {
2007 p1 = tb_property_get_next(d1, p1);
2008 KUNIT_ASSERT_TRUE(test, p1 != NULL);
2009 p2 = tb_property_get_next(d2, p2);
2010 KUNIT_ASSERT_TRUE(test, p2 != NULL);
2012 KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
2013 KUNIT_ASSERT_EQ(test, p1->type, p2->type);
2014 KUNIT_ASSERT_EQ(test, p1->length, p2->length);
2017 case TB_PROPERTY_TYPE_DIRECTORY:
2018 KUNIT_ASSERT_TRUE(test, p1->value.dir != NULL);
2019 KUNIT_ASSERT_TRUE(test, p2->value.dir != NULL);
2020 compare_dirs(test, p1->value.dir, p2->value.dir);
2023 case TB_PROPERTY_TYPE_DATA:
2024 KUNIT_ASSERT_TRUE(test, p1->value.data != NULL);
2025 KUNIT_ASSERT_TRUE(test, p2->value.data != NULL);
2026 KUNIT_ASSERT_TRUE(test,
2027 !memcmp(p1->value.data, p2->value.data,
2032 case TB_PROPERTY_TYPE_TEXT:
2033 KUNIT_ASSERT_TRUE(test, p1->value.text != NULL);
2034 KUNIT_ASSERT_TRUE(test, p2->value.text != NULL);
2035 KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
2038 case TB_PROPERTY_TYPE_VALUE:
2039 KUNIT_ASSERT_EQ(test, p1->value.immediate,
2040 p2->value.immediate);
2043 KUNIT_FAIL(test, "unexpected property type");
2049 static void tb_test_property_copy(struct kunit *test)
2051 struct tb_property_dir *src, *dst;
2055 src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
2056 KUNIT_ASSERT_TRUE(test, src != NULL);
2058 dst = tb_property_copy_dir(src);
2059 KUNIT_ASSERT_TRUE(test, dst != NULL);
2061 /* Compare the structures */
2062 compare_dirs(test, src, dst);
2064 /* Compare the resulting property block */
2065 ret = tb_property_format_dir(dst, NULL, 0);
2066 KUNIT_ASSERT_EQ(test, ret, (int)ARRAY_SIZE(root_directory));
2068 block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
2069 KUNIT_ASSERT_TRUE(test, block != NULL);
2071 ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
2072 KUNIT_EXPECT_TRUE(test, !ret);
2074 for (i = 0; i < ARRAY_SIZE(root_directory); i++)
2075 KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
2077 tb_property_free_dir(dst);
2078 tb_property_free_dir(src);
2081 static struct kunit_case tb_test_cases[] = {
2082 KUNIT_CASE(tb_test_path_basic),
2083 KUNIT_CASE(tb_test_path_not_connected_walk),
2084 KUNIT_CASE(tb_test_path_single_hop_walk),
2085 KUNIT_CASE(tb_test_path_daisy_chain_walk),
2086 KUNIT_CASE(tb_test_path_simple_tree_walk),
2087 KUNIT_CASE(tb_test_path_complex_tree_walk),
2088 KUNIT_CASE(tb_test_path_max_length_walk),
2089 KUNIT_CASE(tb_test_path_not_connected),
2090 KUNIT_CASE(tb_test_path_not_bonded_lane0),
2091 KUNIT_CASE(tb_test_path_not_bonded_lane1),
2092 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
2093 KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
2094 KUNIT_CASE(tb_test_path_mixed_chain),
2095 KUNIT_CASE(tb_test_path_mixed_chain_reverse),
2096 KUNIT_CASE(tb_test_tunnel_pcie),
2097 KUNIT_CASE(tb_test_tunnel_dp),
2098 KUNIT_CASE(tb_test_tunnel_dp_chain),
2099 KUNIT_CASE(tb_test_tunnel_dp_tree),
2100 KUNIT_CASE(tb_test_tunnel_dp_max_length),
2101 KUNIT_CASE(tb_test_tunnel_port_on_path),
2102 KUNIT_CASE(tb_test_tunnel_usb3),
2103 KUNIT_CASE(tb_test_tunnel_dma),
2104 KUNIT_CASE(tb_test_tunnel_dma_rx),
2105 KUNIT_CASE(tb_test_tunnel_dma_tx),
2106 KUNIT_CASE(tb_test_tunnel_dma_chain),
2107 KUNIT_CASE(tb_test_tunnel_dma_match),
2108 KUNIT_CASE(tb_test_property_parse),
2109 KUNIT_CASE(tb_test_property_format),
2110 KUNIT_CASE(tb_test_property_copy),
2114 static struct kunit_suite tb_test_suite = {
2115 .name = "thunderbolt",
2116 .test_cases = tb_test_cases,
2119 static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL };
2121 int tb_test_init(void)
2123 return __kunit_test_suites_init(tb_test_suites);
2126 void tb_test_exit(void)
2128 return __kunit_test_suites_exit(tb_test_suites);