drm/amdgpu: Fix GPU TLB update error when PAGE_SIZE > AMDGPU_PAGE_SIZE
[linux-2.6-microblaze.git] / drivers / hv / hv_util.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2010, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/sysctl.h>
16 #include <linux/reboot.h>
17 #include <linux/hyperv.h>
18 #include <linux/clockchips.h>
19 #include <linux/ptp_clock_kernel.h>
20 #include <clocksource/hyperv_timer.h>
21 #include <asm/mshyperv.h>
22
23 #include "hyperv_vmbus.h"
24
25 #define SD_MAJOR        3
26 #define SD_MINOR        0
27 #define SD_MINOR_1      1
28 #define SD_MINOR_2      2
29 #define SD_VERSION_3_1  (SD_MAJOR << 16 | SD_MINOR_1)
30 #define SD_VERSION_3_2  (SD_MAJOR << 16 | SD_MINOR_2)
31 #define SD_VERSION      (SD_MAJOR << 16 | SD_MINOR)
32
33 #define SD_MAJOR_1      1
34 #define SD_VERSION_1    (SD_MAJOR_1 << 16 | SD_MINOR)
35
36 #define TS_MAJOR        4
37 #define TS_MINOR        0
38 #define TS_VERSION      (TS_MAJOR << 16 | TS_MINOR)
39
40 #define TS_MAJOR_1      1
41 #define TS_VERSION_1    (TS_MAJOR_1 << 16 | TS_MINOR)
42
43 #define TS_MAJOR_3      3
44 #define TS_VERSION_3    (TS_MAJOR_3 << 16 | TS_MINOR)
45
46 #define HB_MAJOR        3
47 #define HB_MINOR        0
48 #define HB_VERSION      (HB_MAJOR << 16 | HB_MINOR)
49
50 #define HB_MAJOR_1      1
51 #define HB_VERSION_1    (HB_MAJOR_1 << 16 | HB_MINOR)
52
53 static int sd_srv_version;
54 static int ts_srv_version;
55 static int hb_srv_version;
56
57 #define SD_VER_COUNT 4
58 static const int sd_versions[] = {
59         SD_VERSION_3_2,
60         SD_VERSION_3_1,
61         SD_VERSION,
62         SD_VERSION_1
63 };
64
65 #define TS_VER_COUNT 3
66 static const int ts_versions[] = {
67         TS_VERSION,
68         TS_VERSION_3,
69         TS_VERSION_1
70 };
71
72 #define HB_VER_COUNT 2
73 static const int hb_versions[] = {
74         HB_VERSION,
75         HB_VERSION_1
76 };
77
78 #define FW_VER_COUNT 2
79 static const int fw_versions[] = {
80         UTIL_FW_VERSION,
81         UTIL_WS2K8_FW_VERSION
82 };
83
84 /*
85  * Send the "hibernate" udev event in a thread context.
86  */
87 struct hibernate_work_context {
88         struct work_struct work;
89         struct hv_device *dev;
90 };
91
92 static struct hibernate_work_context hibernate_context;
93 static bool hibernation_supported;
94
95 static void send_hibernate_uevent(struct work_struct *work)
96 {
97         char *uevent_env[2] = { "EVENT=hibernate", NULL };
98         struct hibernate_work_context *ctx;
99
100         ctx = container_of(work, struct hibernate_work_context, work);
101
102         kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env);
103
104         pr_info("Sent hibernation uevent\n");
105 }
106
107 static int hv_shutdown_init(struct hv_util_service *srv)
108 {
109         struct vmbus_channel *channel = srv->channel;
110
111         INIT_WORK(&hibernate_context.work, send_hibernate_uevent);
112         hibernate_context.dev = channel->device_obj;
113
114         hibernation_supported = hv_is_hibernation_supported();
115
116         return 0;
117 }
118
119 static void shutdown_onchannelcallback(void *context);
120 static struct hv_util_service util_shutdown = {
121         .util_cb = shutdown_onchannelcallback,
122         .util_init = hv_shutdown_init,
123 };
124
125 static int hv_timesync_init(struct hv_util_service *srv);
126 static int hv_timesync_pre_suspend(void);
127 static void hv_timesync_deinit(void);
128
129 static void timesync_onchannelcallback(void *context);
130 static struct hv_util_service util_timesynch = {
131         .util_cb = timesync_onchannelcallback,
132         .util_init = hv_timesync_init,
133         .util_pre_suspend = hv_timesync_pre_suspend,
134         .util_deinit = hv_timesync_deinit,
135 };
136
137 static void heartbeat_onchannelcallback(void *context);
138 static struct hv_util_service util_heartbeat = {
139         .util_cb = heartbeat_onchannelcallback,
140 };
141
142 static struct hv_util_service util_kvp = {
143         .util_cb = hv_kvp_onchannelcallback,
144         .util_init = hv_kvp_init,
145         .util_pre_suspend = hv_kvp_pre_suspend,
146         .util_pre_resume = hv_kvp_pre_resume,
147         .util_deinit = hv_kvp_deinit,
148 };
149
150 static struct hv_util_service util_vss = {
151         .util_cb = hv_vss_onchannelcallback,
152         .util_init = hv_vss_init,
153         .util_pre_suspend = hv_vss_pre_suspend,
154         .util_pre_resume = hv_vss_pre_resume,
155         .util_deinit = hv_vss_deinit,
156 };
157
158 static struct hv_util_service util_fcopy = {
159         .util_cb = hv_fcopy_onchannelcallback,
160         .util_init = hv_fcopy_init,
161         .util_pre_suspend = hv_fcopy_pre_suspend,
162         .util_pre_resume = hv_fcopy_pre_resume,
163         .util_deinit = hv_fcopy_deinit,
164 };
165
166 static void perform_shutdown(struct work_struct *dummy)
167 {
168         orderly_poweroff(true);
169 }
170
171 static void perform_restart(struct work_struct *dummy)
172 {
173         orderly_reboot();
174 }
175
176 /*
177  * Perform the shutdown operation in a thread context.
178  */
179 static DECLARE_WORK(shutdown_work, perform_shutdown);
180
181 /*
182  * Perform the restart operation in a thread context.
183  */
184 static DECLARE_WORK(restart_work, perform_restart);
185
186 static void shutdown_onchannelcallback(void *context)
187 {
188         struct vmbus_channel *channel = context;
189         struct work_struct *work = NULL;
190         u32 recvlen;
191         u64 requestid;
192         u8  *shut_txf_buf = util_shutdown.recv_buffer;
193
194         struct shutdown_msg_data *shutdown_msg;
195
196         struct icmsg_hdr *icmsghdrp;
197
198         if (vmbus_recvpacket(channel, shut_txf_buf, HV_HYP_PAGE_SIZE, &recvlen, &requestid)) {
199                 pr_err_ratelimited("Shutdown request received. Could not read into shut txf buf\n");
200                 return;
201         }
202
203         if (!recvlen)
204                 return;
205
206         /* Ensure recvlen is big enough to read header data */
207         if (recvlen < ICMSG_HDR) {
208                 pr_err_ratelimited("Shutdown request received. Packet length too small: %d\n",
209                                    recvlen);
210                 return;
211         }
212
213         icmsghdrp = (struct icmsg_hdr *)&shut_txf_buf[sizeof(struct vmbuspipe_hdr)];
214
215         if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
216                 if (vmbus_prep_negotiate_resp(icmsghdrp,
217                                 shut_txf_buf, recvlen,
218                                 fw_versions, FW_VER_COUNT,
219                                 sd_versions, SD_VER_COUNT,
220                                 NULL, &sd_srv_version)) {
221                         pr_info("Shutdown IC version %d.%d\n",
222                                 sd_srv_version >> 16,
223                                 sd_srv_version & 0xFFFF);
224                 }
225         } else if (icmsghdrp->icmsgtype == ICMSGTYPE_SHUTDOWN) {
226                 /* Ensure recvlen is big enough to contain shutdown_msg_data struct */
227                 if (recvlen < ICMSG_HDR + sizeof(struct shutdown_msg_data)) {
228                         pr_err_ratelimited("Invalid shutdown msg data. Packet length too small: %u\n",
229                                            recvlen);
230                         return;
231                 }
232
233                 shutdown_msg = (struct shutdown_msg_data *)&shut_txf_buf[ICMSG_HDR];
234
235                 /*
236                  * shutdown_msg->flags can be 0(shut down), 2(reboot),
237                  * or 4(hibernate). It may bitwise-OR 1, which means
238                  * performing the request by force. Linux always tries
239                  * to perform the request by force.
240                  */
241                 switch (shutdown_msg->flags) {
242                 case 0:
243                 case 1:
244                         icmsghdrp->status = HV_S_OK;
245                         work = &shutdown_work;
246                         pr_info("Shutdown request received - graceful shutdown initiated\n");
247                         break;
248                 case 2:
249                 case 3:
250                         icmsghdrp->status = HV_S_OK;
251                         work = &restart_work;
252                         pr_info("Restart request received - graceful restart initiated\n");
253                         break;
254                 case 4:
255                 case 5:
256                         pr_info("Hibernation request received\n");
257                         icmsghdrp->status = hibernation_supported ?
258                                 HV_S_OK : HV_E_FAIL;
259                         if (hibernation_supported)
260                                 work = &hibernate_context.work;
261                         break;
262                 default:
263                         icmsghdrp->status = HV_E_FAIL;
264                         pr_info("Shutdown request received - Invalid request\n");
265                         break;
266                 }
267         } else {
268                 icmsghdrp->status = HV_E_FAIL;
269                 pr_err_ratelimited("Shutdown request received. Invalid msg type: %d\n",
270                                    icmsghdrp->icmsgtype);
271         }
272
273         icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
274                 | ICMSGHDRFLAG_RESPONSE;
275
276         vmbus_sendpacket(channel, shut_txf_buf,
277                          recvlen, requestid,
278                          VM_PKT_DATA_INBAND, 0);
279
280         if (work)
281                 schedule_work(work);
282 }
283
284 /*
285  * Set the host time in a process context.
286  */
287 static struct work_struct adj_time_work;
288
289 /*
290  * The last time sample, received from the host. PTP device responds to
291  * requests by using this data and the current partition-wide time reference
292  * count.
293  */
294 static struct {
295         u64                             host_time;
296         u64                             ref_time;
297         spinlock_t                      lock;
298 } host_ts;
299
300 static inline u64 reftime_to_ns(u64 reftime)
301 {
302         return (reftime - WLTIMEDELTA) * 100;
303 }
304
305 /*
306  * Hard coded threshold for host timesync delay: 600 seconds
307  */
308 static const u64 HOST_TIMESYNC_DELAY_THRESH = 600 * (u64)NSEC_PER_SEC;
309
310 static int hv_get_adj_host_time(struct timespec64 *ts)
311 {
312         u64 newtime, reftime, timediff_adj;
313         unsigned long flags;
314         int ret = 0;
315
316         spin_lock_irqsave(&host_ts.lock, flags);
317         reftime = hv_read_reference_counter();
318
319         /*
320          * We need to let the caller know that last update from host
321          * is older than the max allowable threshold. clock_gettime()
322          * and PTP ioctl do not have a documented error that we could
323          * return for this specific case. Use ESTALE to report this.
324          */
325         timediff_adj = reftime - host_ts.ref_time;
326         if (timediff_adj * 100 > HOST_TIMESYNC_DELAY_THRESH) {
327                 pr_warn_once("TIMESYNC IC: Stale time stamp, %llu nsecs old\n",
328                              (timediff_adj * 100));
329                 ret = -ESTALE;
330         }
331
332         newtime = host_ts.host_time + timediff_adj;
333         *ts = ns_to_timespec64(reftime_to_ns(newtime));
334         spin_unlock_irqrestore(&host_ts.lock, flags);
335
336         return ret;
337 }
338
339 static void hv_set_host_time(struct work_struct *work)
340 {
341
342         struct timespec64 ts;
343
344         if (!hv_get_adj_host_time(&ts))
345                 do_settimeofday64(&ts);
346 }
347
348 /*
349  * Synchronize time with host after reboot, restore, etc.
350  *
351  * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM.
352  * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time
353  * message after the timesync channel is opened. Since the hv_utils module is
354  * loaded after hv_vmbus, the first message is usually missed. This bit is
355  * considered a hard request to discipline the clock.
356  *
357  * ICTIMESYNCFLAG_SAMPLE bit indicates a time sample from host. This is
358  * typically used as a hint to the guest. The guest is under no obligation
359  * to discipline the clock.
360  */
361 static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
362 {
363         unsigned long flags;
364         u64 cur_reftime;
365
366         /*
367          * Save the adjusted time sample from the host and the snapshot
368          * of the current system time.
369          */
370         spin_lock_irqsave(&host_ts.lock, flags);
371
372         cur_reftime = hv_read_reference_counter();
373         host_ts.host_time = hosttime;
374         host_ts.ref_time = cur_reftime;
375
376         /*
377          * TimeSync v4 messages contain reference time (guest's Hyper-V
378          * clocksource read when the time sample was generated), we can
379          * improve the precision by adding the delta between now and the
380          * time of generation. For older protocols we set
381          * reftime == cur_reftime on call.
382          */
383         host_ts.host_time += (cur_reftime - reftime);
384
385         spin_unlock_irqrestore(&host_ts.lock, flags);
386
387         /* Schedule work to do do_settimeofday64() */
388         if (adj_flags & ICTIMESYNCFLAG_SYNC)
389                 schedule_work(&adj_time_work);
390 }
391
392 /*
393  * Time Sync Channel message handler.
394  */
395 static void timesync_onchannelcallback(void *context)
396 {
397         struct vmbus_channel *channel = context;
398         u32 recvlen;
399         u64 requestid;
400         struct icmsg_hdr *icmsghdrp;
401         struct ictimesync_data *timedatap;
402         struct ictimesync_ref_data *refdata;
403         u8 *time_txf_buf = util_timesynch.recv_buffer;
404
405         /*
406          * Drain the ring buffer and use the last packet to update
407          * host_ts
408          */
409         while (1) {
410                 int ret = vmbus_recvpacket(channel, time_txf_buf,
411                                            HV_HYP_PAGE_SIZE, &recvlen,
412                                            &requestid);
413                 if (ret) {
414                         pr_err_ratelimited("TimeSync IC pkt recv failed (Err: %d)\n",
415                                            ret);
416                         break;
417                 }
418
419                 if (!recvlen)
420                         break;
421
422                 /* Ensure recvlen is big enough to read header data */
423                 if (recvlen < ICMSG_HDR) {
424                         pr_err_ratelimited("Timesync request received. Packet length too small: %d\n",
425                                            recvlen);
426                         break;
427                 }
428
429                 icmsghdrp = (struct icmsg_hdr *)&time_txf_buf[
430                                 sizeof(struct vmbuspipe_hdr)];
431
432                 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
433                         if (vmbus_prep_negotiate_resp(icmsghdrp,
434                                                 time_txf_buf, recvlen,
435                                                 fw_versions, FW_VER_COUNT,
436                                                 ts_versions, TS_VER_COUNT,
437                                                 NULL, &ts_srv_version)) {
438                                 pr_info("TimeSync IC version %d.%d\n",
439                                         ts_srv_version >> 16,
440                                         ts_srv_version & 0xFFFF);
441                         }
442                 } else if (icmsghdrp->icmsgtype == ICMSGTYPE_TIMESYNC) {
443                         if (ts_srv_version > TS_VERSION_3) {
444                                 /* Ensure recvlen is big enough to read ictimesync_ref_data */
445                                 if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_ref_data)) {
446                                         pr_err_ratelimited("Invalid ictimesync ref data. Length too small: %u\n",
447                                                            recvlen);
448                                         break;
449                                 }
450                                 refdata = (struct ictimesync_ref_data *)&time_txf_buf[ICMSG_HDR];
451
452                                 adj_guesttime(refdata->parenttime,
453                                                 refdata->vmreferencetime,
454                                                 refdata->flags);
455                         } else {
456                                 /* Ensure recvlen is big enough to read ictimesync_data */
457                                 if (recvlen < ICMSG_HDR + sizeof(struct ictimesync_data)) {
458                                         pr_err_ratelimited("Invalid ictimesync data. Length too small: %u\n",
459                                                            recvlen);
460                                         break;
461                                 }
462                                 timedatap = (struct ictimesync_data *)&time_txf_buf[ICMSG_HDR];
463
464                                 adj_guesttime(timedatap->parenttime,
465                                               hv_read_reference_counter(),
466                                               timedatap->flags);
467                         }
468                 } else {
469                         icmsghdrp->status = HV_E_FAIL;
470                         pr_err_ratelimited("Timesync request received. Invalid msg type: %d\n",
471                                            icmsghdrp->icmsgtype);
472                 }
473
474                 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
475                         | ICMSGHDRFLAG_RESPONSE;
476
477                 vmbus_sendpacket(channel, time_txf_buf,
478                                  recvlen, requestid,
479                                  VM_PKT_DATA_INBAND, 0);
480         }
481 }
482
483 /*
484  * Heartbeat functionality.
485  * Every two seconds, Hyper-V send us a heartbeat request message.
486  * we respond to this message, and Hyper-V knows we are alive.
487  */
488 static void heartbeat_onchannelcallback(void *context)
489 {
490         struct vmbus_channel *channel = context;
491         u32 recvlen;
492         u64 requestid;
493         struct icmsg_hdr *icmsghdrp;
494         struct heartbeat_msg_data *heartbeat_msg;
495         u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
496
497         while (1) {
498
499                 if (vmbus_recvpacket(channel, hbeat_txf_buf, HV_HYP_PAGE_SIZE,
500                                      &recvlen, &requestid)) {
501                         pr_err_ratelimited("Heartbeat request received. Could not read into hbeat txf buf\n");
502                         return;
503                 }
504
505                 if (!recvlen)
506                         break;
507
508                 /* Ensure recvlen is big enough to read header data */
509                 if (recvlen < ICMSG_HDR) {
510                         pr_err_ratelimited("Heartbeat request received. Packet length too small: %d\n",
511                                            recvlen);
512                         break;
513                 }
514
515                 icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
516                                 sizeof(struct vmbuspipe_hdr)];
517
518                 if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
519                         if (vmbus_prep_negotiate_resp(icmsghdrp,
520                                         hbeat_txf_buf, recvlen,
521                                         fw_versions, FW_VER_COUNT,
522                                         hb_versions, HB_VER_COUNT,
523                                         NULL, &hb_srv_version)) {
524
525                                 pr_info("Heartbeat IC version %d.%d\n",
526                                         hb_srv_version >> 16,
527                                         hb_srv_version & 0xFFFF);
528                         }
529                 } else if (icmsghdrp->icmsgtype == ICMSGTYPE_HEARTBEAT) {
530                         /*
531                          * Ensure recvlen is big enough to read seq_num. Reserved area is not
532                          * included in the check as the host may not fill it up entirely
533                          */
534                         if (recvlen < ICMSG_HDR + sizeof(u64)) {
535                                 pr_err_ratelimited("Invalid heartbeat msg data. Length too small: %u\n",
536                                                    recvlen);
537                                 break;
538                         }
539                         heartbeat_msg = (struct heartbeat_msg_data *)&hbeat_txf_buf[ICMSG_HDR];
540
541                         heartbeat_msg->seq_num += 1;
542                 } else {
543                         icmsghdrp->status = HV_E_FAIL;
544                         pr_err_ratelimited("Heartbeat request received. Invalid msg type: %d\n",
545                                            icmsghdrp->icmsgtype);
546                 }
547
548                 icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
549                         | ICMSGHDRFLAG_RESPONSE;
550
551                 vmbus_sendpacket(channel, hbeat_txf_buf,
552                                  recvlen, requestid,
553                                  VM_PKT_DATA_INBAND, 0);
554         }
555 }
556
557 #define HV_UTIL_RING_SEND_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
558 #define HV_UTIL_RING_RECV_SIZE VMBUS_RING_SIZE(3 * HV_HYP_PAGE_SIZE)
559
560 static int util_probe(struct hv_device *dev,
561                         const struct hv_vmbus_device_id *dev_id)
562 {
563         struct hv_util_service *srv =
564                 (struct hv_util_service *)dev_id->driver_data;
565         int ret;
566
567         srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
568         if (!srv->recv_buffer)
569                 return -ENOMEM;
570         srv->channel = dev->channel;
571         if (srv->util_init) {
572                 ret = srv->util_init(srv);
573                 if (ret) {
574                         ret = -ENODEV;
575                         goto error1;
576                 }
577         }
578
579         /*
580          * The set of services managed by the util driver are not performance
581          * critical and do not need batched reading. Furthermore, some services
582          * such as KVP can only handle one message from the host at a time.
583          * Turn off batched reading for all util drivers before we open the
584          * channel.
585          */
586         set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
587
588         hv_set_drvdata(dev, srv);
589
590         ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
591                          HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
592                          dev->channel);
593         if (ret)
594                 goto error;
595
596         return 0;
597
598 error:
599         if (srv->util_deinit)
600                 srv->util_deinit();
601 error1:
602         kfree(srv->recv_buffer);
603         return ret;
604 }
605
606 static int util_remove(struct hv_device *dev)
607 {
608         struct hv_util_service *srv = hv_get_drvdata(dev);
609
610         if (srv->util_deinit)
611                 srv->util_deinit();
612         vmbus_close(dev->channel);
613         kfree(srv->recv_buffer);
614
615         return 0;
616 }
617
618 /*
619  * When we're in util_suspend(), all the userspace processes have been frozen
620  * (refer to hibernate() -> freeze_processes()). The userspace is thawed only
621  * after the whole resume procedure, including util_resume(), finishes.
622  */
623 static int util_suspend(struct hv_device *dev)
624 {
625         struct hv_util_service *srv = hv_get_drvdata(dev);
626         int ret = 0;
627
628         if (srv->util_pre_suspend) {
629                 ret = srv->util_pre_suspend();
630                 if (ret)
631                         return ret;
632         }
633
634         vmbus_close(dev->channel);
635
636         return 0;
637 }
638
639 static int util_resume(struct hv_device *dev)
640 {
641         struct hv_util_service *srv = hv_get_drvdata(dev);
642         int ret = 0;
643
644         if (srv->util_pre_resume) {
645                 ret = srv->util_pre_resume();
646                 if (ret)
647                         return ret;
648         }
649
650         ret = vmbus_open(dev->channel, HV_UTIL_RING_SEND_SIZE,
651                          HV_UTIL_RING_RECV_SIZE, NULL, 0, srv->util_cb,
652                          dev->channel);
653         return ret;
654 }
655
656 static const struct hv_vmbus_device_id id_table[] = {
657         /* Shutdown guid */
658         { HV_SHUTDOWN_GUID,
659           .driver_data = (unsigned long)&util_shutdown
660         },
661         /* Time synch guid */
662         { HV_TS_GUID,
663           .driver_data = (unsigned long)&util_timesynch
664         },
665         /* Heartbeat guid */
666         { HV_HEART_BEAT_GUID,
667           .driver_data = (unsigned long)&util_heartbeat
668         },
669         /* KVP guid */
670         { HV_KVP_GUID,
671           .driver_data = (unsigned long)&util_kvp
672         },
673         /* VSS GUID */
674         { HV_VSS_GUID,
675           .driver_data = (unsigned long)&util_vss
676         },
677         /* File copy GUID */
678         { HV_FCOPY_GUID,
679           .driver_data = (unsigned long)&util_fcopy
680         },
681         { },
682 };
683
684 MODULE_DEVICE_TABLE(vmbus, id_table);
685
686 /* The one and only one */
687 static  struct hv_driver util_drv = {
688         .name = "hv_utils",
689         .id_table = id_table,
690         .probe =  util_probe,
691         .remove =  util_remove,
692         .suspend = util_suspend,
693         .resume =  util_resume,
694         .driver = {
695                 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
696         },
697 };
698
699 static int hv_ptp_enable(struct ptp_clock_info *info,
700                          struct ptp_clock_request *request, int on)
701 {
702         return -EOPNOTSUPP;
703 }
704
705 static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
706 {
707         return -EOPNOTSUPP;
708 }
709
710 static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
711 {
712         return -EOPNOTSUPP;
713 }
714 static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
715 {
716         return -EOPNOTSUPP;
717 }
718
719 static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
720 {
721         return hv_get_adj_host_time(ts);
722 }
723
724 static struct ptp_clock_info ptp_hyperv_info = {
725         .name           = "hyperv",
726         .enable         = hv_ptp_enable,
727         .adjtime        = hv_ptp_adjtime,
728         .adjfreq        = hv_ptp_adjfreq,
729         .gettime64      = hv_ptp_gettime,
730         .settime64      = hv_ptp_settime,
731         .owner          = THIS_MODULE,
732 };
733
734 static struct ptp_clock *hv_ptp_clock;
735
736 static int hv_timesync_init(struct hv_util_service *srv)
737 {
738         /* TimeSync requires Hyper-V clocksource. */
739         if (!hv_read_reference_counter)
740                 return -ENODEV;
741
742         spin_lock_init(&host_ts.lock);
743
744         INIT_WORK(&adj_time_work, hv_set_host_time);
745
746         /*
747          * ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
748          * disabled but the driver is still useful without the PTP device
749          * as it still handles the ICTIMESYNCFLAG_SYNC case.
750          */
751         hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
752         if (IS_ERR_OR_NULL(hv_ptp_clock)) {
753                 pr_err("cannot register PTP clock: %ld\n",
754                        PTR_ERR(hv_ptp_clock));
755                 hv_ptp_clock = NULL;
756         }
757
758         return 0;
759 }
760
761 static void hv_timesync_cancel_work(void)
762 {
763         cancel_work_sync(&adj_time_work);
764 }
765
766 static int hv_timesync_pre_suspend(void)
767 {
768         hv_timesync_cancel_work();
769         return 0;
770 }
771
772 static void hv_timesync_deinit(void)
773 {
774         if (hv_ptp_clock)
775                 ptp_clock_unregister(hv_ptp_clock);
776
777         hv_timesync_cancel_work();
778 }
779
780 static int __init init_hyperv_utils(void)
781 {
782         pr_info("Registering HyperV Utility Driver\n");
783
784         return vmbus_driver_register(&util_drv);
785 }
786
787 static void exit_hyperv_utils(void)
788 {
789         pr_info("De-Registered HyperV Utility Driver\n");
790
791         vmbus_driver_unregister(&util_drv);
792 }
793
794 module_init(init_hyperv_utils);
795 module_exit(exit_hyperv_utils);
796
797 MODULE_DESCRIPTION("Hyper-V Utilities");
798 MODULE_LICENSE("GPL");