Merge tag 'v5.17-rc4' into next
[linux-2.6-microblaze.git] / drivers / thunderbolt / tmu.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt Time Management Unit (TMU) support
4  *
5  * Copyright (C) 2019, Intel Corporation
6  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7  *          Rajmohan Mani <rajmohan.mani@intel.com>
8  */
9
10 #include <linux/delay.h>
11
12 #include "tb.h"
13
14 static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
15 {
16         bool root_switch = !tb_route(sw);
17
18         switch (sw->tmu.rate) {
19         case TB_SWITCH_TMU_RATE_OFF:
20                 return "off";
21
22         case TB_SWITCH_TMU_RATE_HIFI:
23                 /* Root switch does not have upstream directionality */
24                 if (root_switch)
25                         return "HiFi";
26                 if (sw->tmu.unidirectional)
27                         return "uni-directional, HiFi";
28                 return "bi-directional, HiFi";
29
30         case TB_SWITCH_TMU_RATE_NORMAL:
31                 if (root_switch)
32                         return "normal";
33                 return "uni-directional, normal";
34
35         default:
36                 return "unknown";
37         }
38 }
39
40 static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
41 {
42         int ret;
43         u32 val;
44
45         ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
46                          sw->tmu.cap + TMU_RTR_CS_0, 1);
47         if (ret)
48                 return false;
49
50         return !!(val & TMU_RTR_CS_0_UCAP);
51 }
52
53 static int tb_switch_tmu_rate_read(struct tb_switch *sw)
54 {
55         int ret;
56         u32 val;
57
58         ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
59                          sw->tmu.cap + TMU_RTR_CS_3, 1);
60         if (ret)
61                 return ret;
62
63         val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
64         return val;
65 }
66
67 static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
68 {
69         int ret;
70         u32 val;
71
72         ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
73                          sw->tmu.cap + TMU_RTR_CS_3, 1);
74         if (ret)
75                 return ret;
76
77         val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
78         val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
79
80         return tb_sw_write(sw, &val, TB_CFG_SWITCH,
81                            sw->tmu.cap + TMU_RTR_CS_3, 1);
82 }
83
84 static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
85                              u32 value)
86 {
87         u32 data;
88         int ret;
89
90         ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
91         if (ret)
92                 return ret;
93
94         data &= ~mask;
95         data |= value;
96
97         return tb_port_write(port, &data, TB_CFG_PORT,
98                              port->cap_tmu + offset, 1);
99 }
100
101 static int tb_port_tmu_set_unidirectional(struct tb_port *port,
102                                           bool unidirectional)
103 {
104         u32 val;
105
106         if (!port->sw->tmu.has_ucap)
107                 return 0;
108
109         val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
110         return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
111 }
112
113 static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
114 {
115         return tb_port_tmu_set_unidirectional(port, false);
116 }
117
118 static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
119 {
120         return tb_port_tmu_set_unidirectional(port, true);
121 }
122
123 static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
124 {
125         int ret;
126         u32 val;
127
128         ret = tb_port_read(port, &val, TB_CFG_PORT,
129                            port->cap_tmu + TMU_ADP_CS_3, 1);
130         if (ret)
131                 return false;
132
133         return val & TMU_ADP_CS_3_UDM;
134 }
135
136 static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
137 {
138         u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
139
140         return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
141 }
142
143 static int tb_port_tmu_time_sync_disable(struct tb_port *port)
144 {
145         return tb_port_tmu_time_sync(port, true);
146 }
147
148 static int tb_port_tmu_time_sync_enable(struct tb_port *port)
149 {
150         return tb_port_tmu_time_sync(port, false);
151 }
152
153 static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
154 {
155         u32 val, offset, bit;
156         int ret;
157
158         if (tb_switch_is_usb4(sw)) {
159                 offset = sw->tmu.cap + TMU_RTR_CS_0;
160                 bit = TMU_RTR_CS_0_TD;
161         } else {
162                 offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
163                 bit = TB_TIME_VSEC_3_CS_26_TD;
164         }
165
166         ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
167         if (ret)
168                 return ret;
169
170         if (set)
171                 val |= bit;
172         else
173                 val &= ~bit;
174
175         return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
176 }
177
178 /**
179  * tb_switch_tmu_init() - Initialize switch TMU structures
180  * @sw: Switch to initialized
181  *
182  * This function must be called before other TMU related functions to
183  * makes the internal structures are filled in correctly. Does not
184  * change any hardware configuration.
185  */
186 int tb_switch_tmu_init(struct tb_switch *sw)
187 {
188         struct tb_port *port;
189         int ret;
190
191         if (tb_switch_is_icm(sw))
192                 return 0;
193
194         ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
195         if (ret > 0)
196                 sw->tmu.cap = ret;
197
198         tb_switch_for_each_port(sw, port) {
199                 int cap;
200
201                 cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
202                 if (cap > 0)
203                         port->cap_tmu = cap;
204         }
205
206         ret = tb_switch_tmu_rate_read(sw);
207         if (ret < 0)
208                 return ret;
209
210         sw->tmu.rate = ret;
211
212         sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
213         if (sw->tmu.has_ucap) {
214                 tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
215
216                 if (tb_route(sw)) {
217                         struct tb_port *up = tb_upstream_port(sw);
218
219                         sw->tmu.unidirectional =
220                                 tb_port_tmu_is_unidirectional(up);
221                 }
222         } else {
223                 sw->tmu.unidirectional = false;
224         }
225
226         tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
227         return 0;
228 }
229
230 /**
231  * tb_switch_tmu_post_time() - Update switch local time
232  * @sw: Switch whose time to update
233  *
234  * Updates switch local time using time posting procedure.
235  */
236 int tb_switch_tmu_post_time(struct tb_switch *sw)
237 {
238         unsigned int post_time_high_offset, post_time_high = 0;
239         unsigned int post_local_time_offset, post_time_offset;
240         struct tb_switch *root_switch = sw->tb->root_switch;
241         u64 hi, mid, lo, local_time, post_time;
242         int i, ret, retries = 100;
243         u32 gm_local_time[3];
244
245         if (!tb_route(sw))
246                 return 0;
247
248         if (!tb_switch_is_usb4(sw))
249                 return 0;
250
251         /* Need to be able to read the grand master time */
252         if (!root_switch->tmu.cap)
253                 return 0;
254
255         ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
256                          root_switch->tmu.cap + TMU_RTR_CS_1,
257                          ARRAY_SIZE(gm_local_time));
258         if (ret)
259                 return ret;
260
261         for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
262                 tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
263                           gm_local_time[i]);
264
265         /* Convert to nanoseconds (drop fractional part) */
266         hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
267         mid = gm_local_time[1];
268         lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
269                 TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
270         local_time = hi << 48 | mid << 16 | lo;
271
272         /* Tell the switch that time sync is disrupted for a while */
273         ret = tb_switch_tmu_set_time_disruption(sw, true);
274         if (ret)
275                 return ret;
276
277         post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
278         post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
279         post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
280
281         /*
282          * Write the Grandmaster time to the Post Local Time registers
283          * of the new switch.
284          */
285         ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
286                           post_local_time_offset, 2);
287         if (ret)
288                 goto out;
289
290         /*
291          * Have the new switch update its local time by:
292          * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
293          * Post Time High register.
294          * 2) write 0 to Post Time High register and then wait for
295          * the completion of the post_time register becomes 0.
296          * This means the time has been converged properly.
297          */
298         post_time = 0xffffffff00000001ULL;
299
300         ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
301         if (ret)
302                 goto out;
303
304         ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
305                           post_time_high_offset, 1);
306         if (ret)
307                 goto out;
308
309         do {
310                 usleep_range(5, 10);
311                 ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
312                                  post_time_offset, 2);
313                 if (ret)
314                         goto out;
315         } while (--retries && post_time);
316
317         if (!retries) {
318                 ret = -ETIMEDOUT;
319                 goto out;
320         }
321
322         tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
323
324 out:
325         tb_switch_tmu_set_time_disruption(sw, false);
326         return ret;
327 }
328
329 /**
330  * tb_switch_tmu_disable() - Disable TMU of a switch
331  * @sw: Switch whose TMU to disable
332  *
333  * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
334  */
335 int tb_switch_tmu_disable(struct tb_switch *sw)
336 {
337         /*
338          * No need to disable TMU on devices that don't support CLx since
339          * on these devices e.g. Alpine Ridge and earlier, the TMU mode
340          * HiFi bi-directional is enabled by default and we don't change it.
341          */
342         if (!tb_switch_is_clx_supported(sw))
343                 return 0;
344
345         /* Already disabled? */
346         if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
347                 return 0;
348
349
350         if (tb_route(sw)) {
351                 bool unidirectional = tb_switch_tmu_hifi_is_enabled(sw, true);
352                 struct tb_switch *parent = tb_switch_parent(sw);
353                 struct tb_port *down, *up;
354                 int ret;
355
356                 down = tb_port_at(tb_route(sw), parent);
357                 up = tb_upstream_port(sw);
358                 /*
359                  * In case of uni-directional time sync, TMU handshake is
360                  * initiated by upstream router. In case of bi-directional
361                  * time sync, TMU handshake is initiated by downstream router.
362                  * Therefore, we change the rate to off in the respective
363                  * router.
364                  */
365                 if (unidirectional)
366                         tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
367                 else
368                         tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
369
370                 tb_port_tmu_time_sync_disable(up);
371                 ret = tb_port_tmu_time_sync_disable(down);
372                 if (ret)
373                         return ret;
374
375                 if (unidirectional) {
376                         /* The switch may be unplugged so ignore any errors */
377                         tb_port_tmu_unidirectional_disable(up);
378                         ret = tb_port_tmu_unidirectional_disable(down);
379                         if (ret)
380                                 return ret;
381                 }
382         } else {
383                 tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
384         }
385
386         sw->tmu.unidirectional = false;
387         sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
388
389         tb_sw_dbg(sw, "TMU: disabled\n");
390         return 0;
391 }
392
393 static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
394 {
395         struct tb_switch *parent = tb_switch_parent(sw);
396         struct tb_port *down, *up;
397
398         down = tb_port_at(tb_route(sw), parent);
399         up = tb_upstream_port(sw);
400         /*
401          * In case of any failure in one of the steps when setting
402          * bi-directional or uni-directional TMU mode, get back to the TMU
403          * configurations in off mode. In case of additional failures in
404          * the functions below, ignore them since the caller shall already
405          * report a failure.
406          */
407         tb_port_tmu_time_sync_disable(down);
408         tb_port_tmu_time_sync_disable(up);
409         if (unidirectional)
410                 tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
411         else
412                 tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
413
414         tb_port_tmu_unidirectional_disable(down);
415         tb_port_tmu_unidirectional_disable(up);
416 }
417
418 /*
419  * This function is called when the previous TMU mode was
420  * TB_SWITCH_TMU_RATE_OFF.
421  */
422 static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
423 {
424         struct tb_switch *parent = tb_switch_parent(sw);
425         struct tb_port *up, *down;
426         int ret;
427
428         up = tb_upstream_port(sw);
429         down = tb_port_at(tb_route(sw), parent);
430
431         ret = tb_port_tmu_unidirectional_disable(up);
432         if (ret)
433                 return ret;
434
435         ret = tb_port_tmu_unidirectional_disable(down);
436         if (ret)
437                 goto out;
438
439         ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
440         if (ret)
441                 goto out;
442
443         ret = tb_port_tmu_time_sync_enable(up);
444         if (ret)
445                 goto out;
446
447         ret = tb_port_tmu_time_sync_enable(down);
448         if (ret)
449                 goto out;
450
451         return 0;
452
453 out:
454         __tb_switch_tmu_off(sw, false);
455         return ret;
456 }
457
458 static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
459 {
460         u32 val;
461         int ret;
462
463         ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
464                          sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
465         if (ret)
466                 return ret;
467
468         val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
469
470         return tb_sw_write(sw, &val, TB_CFG_SWITCH,
471                            sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
472 }
473
474 static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
475 {
476         struct tb_port *up = tb_upstream_port(sw);
477
478         return tb_port_tmu_write(up, TMU_ADP_CS_6,
479                                  TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
480                                  TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
481 }
482
483 /*
484  * This function is called when the previous TMU mode was
485  * TB_SWITCH_TMU_RATE_OFF.
486  */
487 static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
488 {
489         struct tb_switch *parent = tb_switch_parent(sw);
490         struct tb_port *up, *down;
491         int ret;
492
493         up = tb_upstream_port(sw);
494         down = tb_port_at(tb_route(sw), parent);
495         ret = tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_HIFI);
496         if (ret)
497                 return ret;
498
499         ret = tb_port_tmu_unidirectional_enable(up);
500         if (ret)
501                 goto out;
502
503         ret = tb_port_tmu_time_sync_enable(up);
504         if (ret)
505                 goto out;
506
507         ret = tb_port_tmu_unidirectional_enable(down);
508         if (ret)
509                 goto out;
510
511         ret = tb_port_tmu_time_sync_enable(down);
512         if (ret)
513                 goto out;
514
515         return 0;
516
517 out:
518         __tb_switch_tmu_off(sw, true);
519         return ret;
520 }
521
522 static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
523 {
524         bool unidirectional = sw->tmu.unidirectional_request;
525         int ret;
526
527         if (unidirectional && !sw->tmu.has_ucap)
528                 return -EOPNOTSUPP;
529
530         /*
531          * No need to enable TMU on devices that don't support CLx since on
532          * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
533          * bi-directional is enabled by default.
534          */
535         if (!tb_switch_is_clx_supported(sw))
536                 return 0;
537
538         if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
539                 return 0;
540
541         if (tb_switch_is_titan_ridge(sw) && unidirectional) {
542                 /* Titan Ridge supports only CL0s */
543                 if (!tb_switch_is_cl0s_enabled(sw))
544                         return -EOPNOTSUPP;
545
546                 ret = tb_switch_tmu_objection_mask(sw);
547                 if (ret)
548                         return ret;
549
550                 ret = tb_switch_tmu_unidirectional_enable(sw);
551                 if (ret)
552                         return ret;
553         }
554
555         ret = tb_switch_tmu_set_time_disruption(sw, true);
556         if (ret)
557                 return ret;
558
559         if (tb_route(sw)) {
560                 /* The used mode changes are from OFF to HiFi-Uni/HiFi-BiDir */
561                 if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
562                         if (unidirectional)
563                                 ret = __tb_switch_tmu_enable_unidirectional(sw);
564                         else
565                                 ret = __tb_switch_tmu_enable_bidirectional(sw);
566                         if (ret)
567                                 return ret;
568                 }
569                 sw->tmu.unidirectional = unidirectional;
570         } else {
571                 /*
572                  * Host router port configurations are written as
573                  * part of configurations for downstream port of the parent
574                  * of the child node - see above.
575                  * Here only the host router' rate configuration is written.
576                  */
577                 ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
578                 if (ret)
579                         return ret;
580         }
581
582         sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
583
584         tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
585         return tb_switch_tmu_set_time_disruption(sw, false);
586 }
587
588 /**
589  * tb_switch_tmu_enable() - Enable TMU on a router
590  * @sw: Router whose TMU to enable
591  *
592  * Enables TMU of a router to be in uni-directional or bi-directional HiFi mode.
593  * Calling tb_switch_tmu_configure() is required before calling this function,
594  * to select the mode HiFi and directionality (uni-directional/bi-directional).
595  * In both modes all tunneling should work. Uni-directional mode is required for
596  * CLx (Link Low-Power) to work.
597  */
598 int tb_switch_tmu_enable(struct tb_switch *sw)
599 {
600         if (sw->tmu.rate_request == TB_SWITCH_TMU_RATE_NORMAL)
601                 return -EOPNOTSUPP;
602
603         return tb_switch_tmu_hifi_enable(sw);
604 }
605
606 /**
607  * tb_switch_tmu_configure() - Configure the TMU rate and directionality
608  * @sw: Router whose mode to change
609  * @rate: Rate to configure Off/LowRes/HiFi
610  * @unidirectional: If uni-directional (bi-directional otherwise)
611  *
612  * Selects the rate of the TMU and directionality (uni-directional or
613  * bi-directional). Must be called before tb_switch_tmu_enable().
614  */
615 void tb_switch_tmu_configure(struct tb_switch *sw,
616                              enum tb_switch_tmu_rate rate, bool unidirectional)
617 {
618         sw->tmu.unidirectional_request = unidirectional;
619         sw->tmu.rate_request = rate;
620 }