Merge tag 'block-5.11-2021-02-05' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / arch / um / kernel / time.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4  * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5  * Copyright (C) 2012-2014 Cisco Systems
6  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7  * Copyright (C) 2019 Intel Corporation
8  */
9
10 #include <linux/clockchips.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/threads.h>
18 #include <asm/irq.h>
19 #include <asm/param.h>
20 #include <kern_util.h>
21 #include <os.h>
22 #include <linux/time-internal.h>
23 #include <linux/um_timetravel.h>
24 #include <shared/init.h>
25
26 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27 enum time_travel_mode time_travel_mode;
28 EXPORT_SYMBOL_GPL(time_travel_mode);
29
30 static bool time_travel_start_set;
31 static unsigned long long time_travel_start;
32 static unsigned long long time_travel_time;
33 static LIST_HEAD(time_travel_events);
34 static LIST_HEAD(time_travel_irqs);
35 static unsigned long long time_travel_timer_interval;
36 static unsigned long long time_travel_next_event;
37 static struct time_travel_event time_travel_timer_event;
38 static int time_travel_ext_fd = -1;
39 static unsigned int time_travel_ext_waiting;
40 static bool time_travel_ext_prev_request_valid;
41 static unsigned long long time_travel_ext_prev_request;
42 static bool time_travel_ext_free_until_valid;
43 static unsigned long long time_travel_ext_free_until;
44
45 static void time_travel_set_time(unsigned long long ns)
46 {
47         if (unlikely(ns < time_travel_time))
48                 panic("time-travel: time goes backwards %lld -> %lld\n",
49                       time_travel_time, ns);
50         else if (unlikely(ns >= S64_MAX))
51                 panic("The system was going to sleep forever, aborting");
52
53         time_travel_time = ns;
54 }
55
56 enum time_travel_message_handling {
57         TTMH_IDLE,
58         TTMH_POLL,
59         TTMH_READ,
60 };
61
62 static void time_travel_handle_message(struct um_timetravel_msg *msg,
63                                        enum time_travel_message_handling mode)
64 {
65         struct um_timetravel_msg resp = {
66                 .op = UM_TIMETRAVEL_ACK,
67         };
68         int ret;
69
70         /*
71          * Poll outside the locked section (if we're not called to only read
72          * the response) so we can get interrupts for e.g. virtio while we're
73          * here, but then we need to lock to not get interrupted between the
74          * read of the message and write of the ACK.
75          */
76         if (mode != TTMH_READ) {
77                 bool disabled = irqs_disabled();
78
79                 BUG_ON(mode == TTMH_IDLE && !disabled);
80
81                 if (disabled)
82                         local_irq_enable();
83                 while (os_poll(1, &time_travel_ext_fd) != 0) {
84                         /* nothing */
85                 }
86                 if (disabled)
87                         local_irq_disable();
88         }
89
90         ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
91
92         if (ret == 0)
93                 panic("time-travel external link is broken\n");
94         if (ret != sizeof(*msg))
95                 panic("invalid time-travel message - %d bytes\n", ret);
96
97         switch (msg->op) {
98         default:
99                 WARN_ONCE(1, "time-travel: unexpected message %lld\n",
100                           (unsigned long long)msg->op);
101                 break;
102         case UM_TIMETRAVEL_ACK:
103                 return;
104         case UM_TIMETRAVEL_RUN:
105                 time_travel_set_time(msg->time);
106                 break;
107         case UM_TIMETRAVEL_FREE_UNTIL:
108                 time_travel_ext_free_until_valid = true;
109                 time_travel_ext_free_until = msg->time;
110                 break;
111         }
112
113         resp.seq = msg->seq;
114         os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
115 }
116
117 static u64 time_travel_ext_req(u32 op, u64 time)
118 {
119         static int seq;
120         int mseq = ++seq;
121         struct um_timetravel_msg msg = {
122                 .op = op,
123                 .time = time,
124                 .seq = mseq,
125         };
126         unsigned long flags;
127
128         /*
129          * We need to save interrupts here and only restore when we
130          * got the ACK - otherwise we can get interrupted and send
131          * another request while we're still waiting for an ACK, but
132          * the peer doesn't know we got interrupted and will send
133          * the ACKs in the same order as the message, but we'd need
134          * to see them in the opposite order ...
135          *
136          * This wouldn't matter *too* much, but some ACKs carry the
137          * current time (for UM_TIMETRAVEL_GET) and getting another
138          * ACK without a time would confuse us a lot!
139          *
140          * The sequence number assignment that happens here lets us
141          * debug such message handling issues more easily.
142          */
143         local_irq_save(flags);
144         os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
145
146         while (msg.op != UM_TIMETRAVEL_ACK)
147                 time_travel_handle_message(&msg, TTMH_READ);
148
149         if (msg.seq != mseq)
150                 panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
151                       msg.op, msg.seq, mseq, msg.time);
152
153         if (op == UM_TIMETRAVEL_GET)
154                 time_travel_set_time(msg.time);
155         local_irq_restore(flags);
156
157         return msg.time;
158 }
159
160 void __time_travel_wait_readable(int fd)
161 {
162         int fds[2] = { fd, time_travel_ext_fd };
163         int ret;
164
165         if (time_travel_mode != TT_MODE_EXTERNAL)
166                 return;
167
168         while ((ret = os_poll(2, fds))) {
169                 struct um_timetravel_msg msg;
170
171                 if (ret == 1)
172                         time_travel_handle_message(&msg, TTMH_READ);
173         }
174 }
175 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
176
177 static void time_travel_ext_update_request(unsigned long long time)
178 {
179         if (time_travel_mode != TT_MODE_EXTERNAL)
180                 return;
181
182         /* asked for exactly this time previously */
183         if (time_travel_ext_prev_request_valid &&
184             time == time_travel_ext_prev_request)
185                 return;
186
187         /*
188          * if we're running and are allowed to run past the request
189          * then we don't need to update it either
190          */
191         if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
192             time < time_travel_ext_free_until)
193                 return;
194
195         time_travel_ext_prev_request = time;
196         time_travel_ext_prev_request_valid = true;
197         time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
198 }
199
200 void __time_travel_propagate_time(void)
201 {
202         static unsigned long long last_propagated;
203
204         if (last_propagated == time_travel_time)
205                 return;
206
207         time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
208         last_propagated = time_travel_time;
209 }
210 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
211
212 /* returns true if we must do a wait to the simtime device */
213 static bool time_travel_ext_request(unsigned long long time)
214 {
215         /*
216          * If we received an external sync point ("free until") then we
217          * don't have to request/wait for anything until then, unless
218          * we're already waiting.
219          */
220         if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
221             time < time_travel_ext_free_until)
222                 return false;
223
224         time_travel_ext_update_request(time);
225         return true;
226 }
227
228 static void time_travel_ext_wait(bool idle)
229 {
230         struct um_timetravel_msg msg = {
231                 .op = UM_TIMETRAVEL_ACK,
232         };
233
234         time_travel_ext_prev_request_valid = false;
235         time_travel_ext_free_until_valid = false;
236         time_travel_ext_waiting++;
237
238         time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
239
240         /*
241          * Here we are deep in the idle loop, so we have to break out of the
242          * kernel abstraction in a sense and implement this in terms of the
243          * UML system waiting on the VQ interrupt while sleeping, when we get
244          * the signal it'll call time_travel_ext_vq_notify_done() completing the
245          * call.
246          */
247         while (msg.op != UM_TIMETRAVEL_RUN)
248                 time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
249
250         time_travel_ext_waiting--;
251
252         /* we might request more stuff while polling - reset when we run */
253         time_travel_ext_prev_request_valid = false;
254 }
255
256 static void time_travel_ext_get_time(void)
257 {
258         time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
259 }
260
261 static void __time_travel_update_time(unsigned long long ns, bool idle)
262 {
263         if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
264                 time_travel_ext_wait(idle);
265         else
266                 time_travel_set_time(ns);
267 }
268
269 static struct time_travel_event *time_travel_first_event(void)
270 {
271         return list_first_entry_or_null(&time_travel_events,
272                                         struct time_travel_event,
273                                         list);
274 }
275
276 static void __time_travel_add_event(struct time_travel_event *e,
277                                     unsigned long long time)
278 {
279         struct time_travel_event *tmp;
280         bool inserted = false;
281
282         if (e->pending)
283                 return;
284
285         e->pending = true;
286         e->time = time;
287
288         list_for_each_entry(tmp, &time_travel_events, list) {
289                 /*
290                  * Add the new entry before one with higher time,
291                  * or if they're equal and both on stack, because
292                  * in that case we need to unwind the stack in the
293                  * right order, and the later event (timer sleep
294                  * or such) must be dequeued first.
295                  */
296                 if ((tmp->time > e->time) ||
297                     (tmp->time == e->time && tmp->onstack && e->onstack)) {
298                         list_add_tail(&e->list, &tmp->list);
299                         inserted = true;
300                         break;
301                 }
302         }
303
304         if (!inserted)
305                 list_add_tail(&e->list, &time_travel_events);
306
307         tmp = time_travel_first_event();
308         time_travel_ext_update_request(tmp->time);
309         time_travel_next_event = tmp->time;
310 }
311
312 static void time_travel_add_event(struct time_travel_event *e,
313                                   unsigned long long time)
314 {
315         if (WARN_ON(!e->fn))
316                 return;
317
318         __time_travel_add_event(e, time);
319 }
320
321 void time_travel_periodic_timer(struct time_travel_event *e)
322 {
323         time_travel_add_event(&time_travel_timer_event,
324                               time_travel_time + time_travel_timer_interval);
325         deliver_alarm();
326 }
327
328 void deliver_time_travel_irqs(void)
329 {
330         struct time_travel_event *e;
331         unsigned long flags;
332
333         /*
334          * Don't do anything for most cases. Note that because here we have
335          * to disable IRQs (and re-enable later) we'll actually recurse at
336          * the end of the function, so this is strictly necessary.
337          */
338         if (likely(list_empty(&time_travel_irqs)))
339                 return;
340
341         local_irq_save(flags);
342         irq_enter();
343         while ((e = list_first_entry_or_null(&time_travel_irqs,
344                                              struct time_travel_event,
345                                              list))) {
346                 WARN(e->time != time_travel_time,
347                      "time moved from %lld to %lld before IRQ delivery\n",
348                      time_travel_time, e->time);
349                 list_del(&e->list);
350                 e->pending = false;
351                 e->fn(e);
352         }
353         irq_exit();
354         local_irq_restore(flags);
355 }
356
357 static void time_travel_deliver_event(struct time_travel_event *e)
358 {
359         if (e == &time_travel_timer_event) {
360                 /*
361                  * deliver_alarm() does the irq_enter/irq_exit
362                  * by itself, so must handle it specially here
363                  */
364                 e->fn(e);
365         } else if (irqs_disabled()) {
366                 list_add_tail(&e->list, &time_travel_irqs);
367                 /*
368                  * set pending again, it was set to false when the
369                  * event was deleted from the original list, but
370                  * now it's still pending until we deliver the IRQ.
371                  */
372                 e->pending = true;
373         } else {
374                 unsigned long flags;
375
376                 local_irq_save(flags);
377                 irq_enter();
378                 e->fn(e);
379                 irq_exit();
380                 local_irq_restore(flags);
381         }
382 }
383
384 static bool time_travel_del_event(struct time_travel_event *e)
385 {
386         if (!e->pending)
387                 return false;
388         list_del(&e->list);
389         e->pending = false;
390         return true;
391 }
392
393 static void time_travel_update_time(unsigned long long next, bool idle)
394 {
395         struct time_travel_event ne = {
396                 .onstack = true,
397         };
398         struct time_travel_event *e;
399         bool finished = idle;
400
401         /* add it without a handler - we deal with that specifically below */
402         __time_travel_add_event(&ne, next);
403
404         do {
405                 e = time_travel_first_event();
406
407                 BUG_ON(!e);
408                 __time_travel_update_time(e->time, idle);
409
410                 /* new events may have been inserted while we were waiting */
411                 if (e == time_travel_first_event()) {
412                         BUG_ON(!time_travel_del_event(e));
413                         BUG_ON(time_travel_time != e->time);
414
415                         if (e == &ne) {
416                                 finished = true;
417                         } else {
418                                 if (e->onstack)
419                                         panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
420                                               time_travel_time, e->time, e);
421                                 time_travel_deliver_event(e);
422                         }
423                 }
424
425                 e = time_travel_first_event();
426                 if (e)
427                         time_travel_ext_update_request(e->time);
428         } while (ne.pending && !finished);
429
430         time_travel_del_event(&ne);
431 }
432
433 void time_travel_ndelay(unsigned long nsec)
434 {
435         time_travel_update_time(time_travel_time + nsec, false);
436 }
437 EXPORT_SYMBOL(time_travel_ndelay);
438
439 void time_travel_add_irq_event(struct time_travel_event *e)
440 {
441         BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
442
443         time_travel_ext_get_time();
444         /*
445          * We could model interrupt latency here, for now just
446          * don't have any latency at all and request the exact
447          * same time (again) to run the interrupt...
448          */
449         time_travel_add_event(e, time_travel_time);
450 }
451 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
452
453 static void time_travel_oneshot_timer(struct time_travel_event *e)
454 {
455         deliver_alarm();
456 }
457
458 void time_travel_sleep(void)
459 {
460         /*
461          * Wait "forever" (using S64_MAX because there are some potential
462          * wrapping issues, especially with the current TT_MODE_EXTERNAL
463          * controller application.
464          */
465         unsigned long long next = S64_MAX;
466
467         if (time_travel_mode == TT_MODE_BASIC)
468                 os_timer_disable();
469
470         time_travel_update_time(next, true);
471
472         if (time_travel_mode == TT_MODE_BASIC &&
473             time_travel_timer_event.pending) {
474                 if (time_travel_timer_event.fn == time_travel_periodic_timer) {
475                         /*
476                          * This is somewhat wrong - we should get the first
477                          * one sooner like the os_timer_one_shot() below...
478                          */
479                         os_timer_set_interval(time_travel_timer_interval);
480                 } else {
481                         os_timer_one_shot(time_travel_timer_event.time - next);
482                 }
483         }
484 }
485
486 static void time_travel_handle_real_alarm(void)
487 {
488         time_travel_set_time(time_travel_next_event);
489
490         time_travel_del_event(&time_travel_timer_event);
491
492         if (time_travel_timer_event.fn == time_travel_periodic_timer)
493                 time_travel_add_event(&time_travel_timer_event,
494                                       time_travel_time +
495                                       time_travel_timer_interval);
496 }
497
498 static void time_travel_set_interval(unsigned long long interval)
499 {
500         time_travel_timer_interval = interval;
501 }
502
503 static int time_travel_connect_external(const char *socket)
504 {
505         const char *sep;
506         unsigned long long id = (unsigned long long)-1;
507         int rc;
508
509         if ((sep = strchr(socket, ':'))) {
510                 char buf[25] = {};
511                 if (sep - socket > sizeof(buf) - 1)
512                         goto invalid_number;
513
514                 memcpy(buf, socket, sep - socket);
515                 if (kstrtoull(buf, 0, &id)) {
516 invalid_number:
517                         panic("time-travel: invalid external ID in string '%s'\n",
518                               socket);
519                         return -EINVAL;
520                 }
521
522                 socket = sep + 1;
523         }
524
525         rc = os_connect_socket(socket);
526         if (rc < 0) {
527                 panic("time-travel: failed to connect to external socket %s\n",
528                       socket);
529                 return rc;
530         }
531
532         time_travel_ext_fd = rc;
533
534         time_travel_ext_req(UM_TIMETRAVEL_START, id);
535
536         return 1;
537 }
538
539 static void time_travel_set_start(void)
540 {
541         if (time_travel_start_set)
542                 return;
543
544         switch (time_travel_mode) {
545         case TT_MODE_EXTERNAL:
546                 time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
547                 /* controller gave us the *current* time, so adjust by that */
548                 time_travel_ext_get_time();
549                 time_travel_start -= time_travel_time;
550                 break;
551         case TT_MODE_INFCPU:
552         case TT_MODE_BASIC:
553                 if (!time_travel_start_set)
554                         time_travel_start = os_persistent_clock_emulation();
555                 break;
556         case TT_MODE_OFF:
557                 /* we just read the host clock with os_persistent_clock_emulation() */
558                 break;
559         }
560
561         time_travel_start_set = true;
562 }
563 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
564 #define time_travel_start_set 0
565 #define time_travel_start 0
566 #define time_travel_time 0
567 #define time_travel_ext_waiting 0
568
569 static inline void time_travel_update_time(unsigned long long ns, bool retearly)
570 {
571 }
572
573 static inline void time_travel_handle_real_alarm(void)
574 {
575 }
576
577 static void time_travel_set_interval(unsigned long long interval)
578 {
579 }
580
581 static inline void time_travel_set_start(void)
582 {
583 }
584
585 /* fail link if this actually gets used */
586 extern u64 time_travel_ext_req(u32 op, u64 time);
587
588 /* these are empty macros so the struct/fn need not exist */
589 #define time_travel_add_event(e, time) do { } while (0)
590 #define time_travel_del_event(e) do { } while (0)
591 #endif
592
593 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
594 {
595         unsigned long flags;
596
597         /*
598          * In basic time-travel mode we still get real interrupts
599          * (signals) but since we don't read time from the OS, we
600          * must update the simulated time here to the expiry when
601          * we get a signal.
602          * This is not the case in inf-cpu mode, since there we
603          * never get any real signals from the OS.
604          */
605         if (time_travel_mode == TT_MODE_BASIC)
606                 time_travel_handle_real_alarm();
607
608         local_irq_save(flags);
609         do_IRQ(TIMER_IRQ, regs);
610         local_irq_restore(flags);
611 }
612
613 static int itimer_shutdown(struct clock_event_device *evt)
614 {
615         if (time_travel_mode != TT_MODE_OFF)
616                 time_travel_del_event(&time_travel_timer_event);
617
618         if (time_travel_mode != TT_MODE_INFCPU &&
619             time_travel_mode != TT_MODE_EXTERNAL)
620                 os_timer_disable();
621
622         return 0;
623 }
624
625 static int itimer_set_periodic(struct clock_event_device *evt)
626 {
627         unsigned long long interval = NSEC_PER_SEC / HZ;
628
629         if (time_travel_mode != TT_MODE_OFF) {
630                 time_travel_del_event(&time_travel_timer_event);
631                 time_travel_set_event_fn(&time_travel_timer_event,
632                                          time_travel_periodic_timer);
633                 time_travel_set_interval(interval);
634                 time_travel_add_event(&time_travel_timer_event,
635                                       time_travel_time + interval);
636         }
637
638         if (time_travel_mode != TT_MODE_INFCPU &&
639             time_travel_mode != TT_MODE_EXTERNAL)
640                 os_timer_set_interval(interval);
641
642         return 0;
643 }
644
645 static int itimer_next_event(unsigned long delta,
646                              struct clock_event_device *evt)
647 {
648         delta += 1;
649
650         if (time_travel_mode != TT_MODE_OFF) {
651                 time_travel_del_event(&time_travel_timer_event);
652                 time_travel_set_event_fn(&time_travel_timer_event,
653                                          time_travel_oneshot_timer);
654                 time_travel_add_event(&time_travel_timer_event,
655                                       time_travel_time + delta);
656         }
657
658         if (time_travel_mode != TT_MODE_INFCPU &&
659             time_travel_mode != TT_MODE_EXTERNAL)
660                 return os_timer_one_shot(delta);
661
662         return 0;
663 }
664
665 static int itimer_one_shot(struct clock_event_device *evt)
666 {
667         return itimer_next_event(0, evt);
668 }
669
670 static struct clock_event_device timer_clockevent = {
671         .name                   = "posix-timer",
672         .rating                 = 250,
673         .cpumask                = cpu_possible_mask,
674         .features               = CLOCK_EVT_FEAT_PERIODIC |
675                                   CLOCK_EVT_FEAT_ONESHOT,
676         .set_state_shutdown     = itimer_shutdown,
677         .set_state_periodic     = itimer_set_periodic,
678         .set_state_oneshot      = itimer_one_shot,
679         .set_next_event         = itimer_next_event,
680         .shift                  = 0,
681         .max_delta_ns           = 0xffffffff,
682         .max_delta_ticks        = 0xffffffff,
683         .min_delta_ns           = TIMER_MIN_DELTA,
684         .min_delta_ticks        = TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
685         .irq                    = 0,
686         .mult                   = 1,
687 };
688
689 static irqreturn_t um_timer(int irq, void *dev)
690 {
691         if (get_current()->mm != NULL)
692         {
693         /* userspace - relay signal, results in correct userspace timers */
694                 os_alarm_process(get_current()->mm->context.id.u.pid);
695         }
696
697         (*timer_clockevent.event_handler)(&timer_clockevent);
698
699         return IRQ_HANDLED;
700 }
701
702 static u64 timer_read(struct clocksource *cs)
703 {
704         if (time_travel_mode != TT_MODE_OFF) {
705                 /*
706                  * We make reading the timer cost a bit so that we don't get
707                  * stuck in loops that expect time to move more than the
708                  * exact requested sleep amount, e.g. python's socket server,
709                  * see https://bugs.python.org/issue37026.
710                  *
711                  * However, don't do that when we're in interrupt or such as
712                  * then we might recurse into our own processing, and get to
713                  * even more waiting, and that's not good - it messes up the
714                  * "what do I do next" and onstack event we use to know when
715                  * to return from time_travel_update_time().
716                  */
717                 if (!irqs_disabled() && !in_interrupt() && !in_softirq() &&
718                     !time_travel_ext_waiting)
719                         time_travel_update_time(time_travel_time +
720                                                 TIMER_MULTIPLIER,
721                                                 false);
722                 return time_travel_time / TIMER_MULTIPLIER;
723         }
724
725         return os_nsecs() / TIMER_MULTIPLIER;
726 }
727
728 static struct clocksource timer_clocksource = {
729         .name           = "timer",
730         .rating         = 300,
731         .read           = timer_read,
732         .mask           = CLOCKSOURCE_MASK(64),
733         .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
734 };
735
736 static void __init um_timer_setup(void)
737 {
738         int err;
739
740         err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
741         if (err != 0)
742                 printk(KERN_ERR "register_timer : request_irq failed - "
743                        "errno = %d\n", -err);
744
745         err = os_timer_create();
746         if (err != 0) {
747                 printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
748                 return;
749         }
750
751         err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
752         if (err) {
753                 printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
754                 return;
755         }
756         clockevents_register_device(&timer_clockevent);
757 }
758
759 void read_persistent_clock64(struct timespec64 *ts)
760 {
761         long long nsecs;
762
763         time_travel_set_start();
764
765         if (time_travel_mode != TT_MODE_OFF)
766                 nsecs = time_travel_start + time_travel_time;
767         else
768                 nsecs = os_persistent_clock_emulation();
769
770         set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
771                                   nsecs % NSEC_PER_SEC);
772 }
773
774 void __init time_init(void)
775 {
776         timer_set_signal_handler();
777         late_time_init = um_timer_setup;
778 }
779
780 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
781 unsigned long calibrate_delay_is_known(void)
782 {
783         if (time_travel_mode == TT_MODE_INFCPU ||
784             time_travel_mode == TT_MODE_EXTERNAL)
785                 return 1;
786         return 0;
787 }
788
789 int setup_time_travel(char *str)
790 {
791         if (strcmp(str, "=inf-cpu") == 0) {
792                 time_travel_mode = TT_MODE_INFCPU;
793                 timer_clockevent.name = "time-travel-timer-infcpu";
794                 timer_clocksource.name = "time-travel-clock";
795                 return 1;
796         }
797
798         if (strncmp(str, "=ext:", 5) == 0) {
799                 time_travel_mode = TT_MODE_EXTERNAL;
800                 timer_clockevent.name = "time-travel-timer-external";
801                 timer_clocksource.name = "time-travel-clock-external";
802                 return time_travel_connect_external(str + 5);
803         }
804
805         if (!*str) {
806                 time_travel_mode = TT_MODE_BASIC;
807                 timer_clockevent.name = "time-travel-timer";
808                 timer_clocksource.name = "time-travel-clock";
809                 return 1;
810         }
811
812         return -EINVAL;
813 }
814
815 __setup("time-travel", setup_time_travel);
816 __uml_help(setup_time_travel,
817 "time-travel\n"
818 "This option just enables basic time travel mode, in which the clock/timers\n"
819 "inside the UML instance skip forward when there's nothing to do, rather than\n"
820 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
821 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
822 "clock (but quicker when there's nothing to do).\n"
823 "\n"
824 "time-travel=inf-cpu\n"
825 "This enables time travel mode with infinite processing power, in which there\n"
826 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
827 "guest - instantly. This can be useful for accurate simulation regardless of\n"
828 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
829 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
830 "\n"
831 "time-travel=ext:[ID:]/path/to/socket\n"
832 "This enables time travel mode similar to =inf-cpu, except the system will\n"
833 "use the given socket to coordinate with a central scheduler, in order to\n"
834 "have more than one system simultaneously be on simulated time. The virtio\n"
835 "driver code in UML knows about this so you can also simulate networks and\n"
836 "devices using it, assuming the device has the right capabilities.\n"
837 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
838
839 int setup_time_travel_start(char *str)
840 {
841         int err;
842
843         err = kstrtoull(str, 0, &time_travel_start);
844         if (err)
845                 return err;
846
847         time_travel_start_set = 1;
848         return 1;
849 }
850
851 __setup("time-travel-start", setup_time_travel_start);
852 __uml_help(setup_time_travel_start,
853 "time-travel-start=<seconds>\n"
854 "Configure the UML instance's wall clock to start at this value rather than\n"
855 "the host's wall clock at the time of UML boot.\n");
856 #endif