Drivers: hv: vmbus: Add timeout to vmbus_wait_for_unload
[linux-2.6-microblaze.git] / drivers / hv / channel_mgmt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * Authors:
6  *   Haiyang Zhang <haiyangz@microsoft.com>
7  *   Hank Janssen  <hjanssen@microsoft.com>
8  */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/sched.h>
14 #include <linux/wait.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/list.h>
18 #include <linux/module.h>
19 #include <linux/completion.h>
20 #include <linux/delay.h>
21 #include <linux/cpu.h>
22 #include <linux/hyperv.h>
23 #include <asm/mshyperv.h>
24
25 #include "hyperv_vmbus.h"
26
27 static void init_vp_index(struct vmbus_channel *channel);
28
29 const struct vmbus_device vmbus_devs[] = {
30         /* IDE */
31         { .dev_type = HV_IDE,
32           HV_IDE_GUID,
33           .perf_device = true,
34         },
35
36         /* SCSI */
37         { .dev_type = HV_SCSI,
38           HV_SCSI_GUID,
39           .perf_device = true,
40         },
41
42         /* Fibre Channel */
43         { .dev_type = HV_FC,
44           HV_SYNTHFC_GUID,
45           .perf_device = true,
46         },
47
48         /* Synthetic NIC */
49         { .dev_type = HV_NIC,
50           HV_NIC_GUID,
51           .perf_device = true,
52         },
53
54         /* Network Direct */
55         { .dev_type = HV_ND,
56           HV_ND_GUID,
57           .perf_device = true,
58         },
59
60         /* PCIE */
61         { .dev_type = HV_PCIE,
62           HV_PCIE_GUID,
63           .perf_device = false,
64         },
65
66         /* Synthetic Frame Buffer */
67         { .dev_type = HV_FB,
68           HV_SYNTHVID_GUID,
69           .perf_device = false,
70         },
71
72         /* Synthetic Keyboard */
73         { .dev_type = HV_KBD,
74           HV_KBD_GUID,
75           .perf_device = false,
76         },
77
78         /* Synthetic MOUSE */
79         { .dev_type = HV_MOUSE,
80           HV_MOUSE_GUID,
81           .perf_device = false,
82         },
83
84         /* KVP */
85         { .dev_type = HV_KVP,
86           HV_KVP_GUID,
87           .perf_device = false,
88         },
89
90         /* Time Synch */
91         { .dev_type = HV_TS,
92           HV_TS_GUID,
93           .perf_device = false,
94         },
95
96         /* Heartbeat */
97         { .dev_type = HV_HB,
98           HV_HEART_BEAT_GUID,
99           .perf_device = false,
100         },
101
102         /* Shutdown */
103         { .dev_type = HV_SHUTDOWN,
104           HV_SHUTDOWN_GUID,
105           .perf_device = false,
106         },
107
108         /* File copy */
109         { .dev_type = HV_FCOPY,
110           HV_FCOPY_GUID,
111           .perf_device = false,
112         },
113
114         /* Backup */
115         { .dev_type = HV_BACKUP,
116           HV_VSS_GUID,
117           .perf_device = false,
118         },
119
120         /* Dynamic Memory */
121         { .dev_type = HV_DM,
122           HV_DM_GUID,
123           .perf_device = false,
124         },
125
126         /* Unknown GUID */
127         { .dev_type = HV_UNKNOWN,
128           .perf_device = false,
129         },
130 };
131
132 static const struct {
133         guid_t guid;
134 } vmbus_unsupported_devs[] = {
135         { HV_AVMA1_GUID },
136         { HV_AVMA2_GUID },
137         { HV_RDV_GUID   },
138 };
139
140 /*
141  * The rescinded channel may be blocked waiting for a response from the host;
142  * take care of that.
143  */
144 static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
145 {
146         struct vmbus_channel_msginfo *msginfo;
147         unsigned long flags;
148
149
150         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
151         channel->rescind = true;
152         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
153                                 msglistentry) {
154
155                 if (msginfo->waiting_channel == channel) {
156                         complete(&msginfo->waitevent);
157                         break;
158                 }
159         }
160         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
161 }
162
163 static bool is_unsupported_vmbus_devs(const guid_t *guid)
164 {
165         int i;
166
167         for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
168                 if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
169                         return true;
170         return false;
171 }
172
173 static u16 hv_get_dev_type(const struct vmbus_channel *channel)
174 {
175         const guid_t *guid = &channel->offermsg.offer.if_type;
176         u16 i;
177
178         if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
179                 return HV_UNKNOWN;
180
181         for (i = HV_IDE; i < HV_UNKNOWN; i++) {
182                 if (guid_equal(guid, &vmbus_devs[i].guid))
183                         return i;
184         }
185         pr_info("Unknown GUID: %pUl\n", guid);
186         return i;
187 }
188
189 /**
190  * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
191  * @icmsghdrp: Pointer to msg header structure
192  * @buf: Raw buffer channel data
193  * @fw_version: The framework versions we can support.
194  * @fw_vercnt: The size of @fw_version.
195  * @srv_version: The service versions we can support.
196  * @srv_vercnt: The size of @srv_version.
197  * @nego_fw_version: The selected framework version.
198  * @nego_srv_version: The selected service version.
199  *
200  * Note: Versions are given in decreasing order.
201  *
202  * Set up and fill in default negotiate response message.
203  * Mainly used by Hyper-V drivers.
204  */
205 bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
206                                 u8 *buf, const int *fw_version, int fw_vercnt,
207                                 const int *srv_version, int srv_vercnt,
208                                 int *nego_fw_version, int *nego_srv_version)
209 {
210         int icframe_major, icframe_minor;
211         int icmsg_major, icmsg_minor;
212         int fw_major, fw_minor;
213         int srv_major, srv_minor;
214         int i, j;
215         bool found_match = false;
216         struct icmsg_negotiate *negop;
217
218         icmsghdrp->icmsgsize = 0x10;
219         negop = (struct icmsg_negotiate *)&buf[
220                 sizeof(struct vmbuspipe_hdr) +
221                 sizeof(struct icmsg_hdr)];
222
223         icframe_major = negop->icframe_vercnt;
224         icframe_minor = 0;
225
226         icmsg_major = negop->icmsg_vercnt;
227         icmsg_minor = 0;
228
229         /*
230          * Select the framework version number we will
231          * support.
232          */
233
234         for (i = 0; i < fw_vercnt; i++) {
235                 fw_major = (fw_version[i] >> 16);
236                 fw_minor = (fw_version[i] & 0xFFFF);
237
238                 for (j = 0; j < negop->icframe_vercnt; j++) {
239                         if ((negop->icversion_data[j].major == fw_major) &&
240                             (negop->icversion_data[j].minor == fw_minor)) {
241                                 icframe_major = negop->icversion_data[j].major;
242                                 icframe_minor = negop->icversion_data[j].minor;
243                                 found_match = true;
244                                 break;
245                         }
246                 }
247
248                 if (found_match)
249                         break;
250         }
251
252         if (!found_match)
253                 goto fw_error;
254
255         found_match = false;
256
257         for (i = 0; i < srv_vercnt; i++) {
258                 srv_major = (srv_version[i] >> 16);
259                 srv_minor = (srv_version[i] & 0xFFFF);
260
261                 for (j = negop->icframe_vercnt;
262                         (j < negop->icframe_vercnt + negop->icmsg_vercnt);
263                         j++) {
264
265                         if ((negop->icversion_data[j].major == srv_major) &&
266                                 (negop->icversion_data[j].minor == srv_minor)) {
267
268                                 icmsg_major = negop->icversion_data[j].major;
269                                 icmsg_minor = negop->icversion_data[j].minor;
270                                 found_match = true;
271                                 break;
272                         }
273                 }
274
275                 if (found_match)
276                         break;
277         }
278
279         /*
280          * Respond with the framework and service
281          * version numbers we can support.
282          */
283
284 fw_error:
285         if (!found_match) {
286                 negop->icframe_vercnt = 0;
287                 negop->icmsg_vercnt = 0;
288         } else {
289                 negop->icframe_vercnt = 1;
290                 negop->icmsg_vercnt = 1;
291         }
292
293         if (nego_fw_version)
294                 *nego_fw_version = (icframe_major << 16) | icframe_minor;
295
296         if (nego_srv_version)
297                 *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
298
299         negop->icversion_data[0].major = icframe_major;
300         negop->icversion_data[0].minor = icframe_minor;
301         negop->icversion_data[1].major = icmsg_major;
302         negop->icversion_data[1].minor = icmsg_minor;
303         return found_match;
304 }
305
306 EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
307
308 /*
309  * alloc_channel - Allocate and initialize a vmbus channel object
310  */
311 static struct vmbus_channel *alloc_channel(void)
312 {
313         struct vmbus_channel *channel;
314
315         channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
316         if (!channel)
317                 return NULL;
318
319         spin_lock_init(&channel->sched_lock);
320         spin_lock_init(&channel->lock);
321         init_completion(&channel->rescind_event);
322
323         INIT_LIST_HEAD(&channel->sc_list);
324
325         tasklet_init(&channel->callback_event,
326                      vmbus_on_event, (unsigned long)channel);
327
328         hv_ringbuffer_pre_init(channel);
329
330         return channel;
331 }
332
333 /*
334  * free_channel - Release the resources used by the vmbus channel object
335  */
336 static void free_channel(struct vmbus_channel *channel)
337 {
338         tasklet_kill(&channel->callback_event);
339         vmbus_remove_channel_attr_group(channel);
340
341         kobject_put(&channel->kobj);
342 }
343
344 void vmbus_channel_map_relid(struct vmbus_channel *channel)
345 {
346         if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
347                 return;
348         /*
349          * The mapping of the channel's relid is visible from the CPUs that
350          * execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
351          * execute:
352          *
353          *  (a) In the "normal (i.e., not resuming from hibernation)" path,
354          *      the full barrier in smp_store_mb() guarantees that the store
355          *      is propagated to all CPUs before the add_channel_work work
356          *      is queued.  In turn, add_channel_work is queued before the
357          *      channel's ring buffer is allocated/initialized and the
358          *      OPENCHANNEL message for the channel is sent in vmbus_open().
359          *      Hyper-V won't start sending the interrupts for the channel
360          *      before the OPENCHANNEL message is acked.  The memory barrier
361          *      in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
362          *      that vmbus_chan_sched() must find the channel's relid in
363          *      recv_int_page before retrieving the channel pointer from the
364          *      array of channels.
365          *
366          *  (b) In the "resuming from hibernation" path, the smp_store_mb()
367          *      guarantees that the store is propagated to all CPUs before
368          *      the VMBus connection is marked as ready for the resume event
369          *      (cf. check_ready_for_resume_event()).  The interrupt handler
370          *      of the VMBus driver and vmbus_chan_sched() can not run before
371          *      vmbus_bus_resume() has completed execution (cf. resume_noirq).
372          */
373         smp_store_mb(
374                 vmbus_connection.channels[channel->offermsg.child_relid],
375                 channel);
376 }
377
378 void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
379 {
380         if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
381                 return;
382         WRITE_ONCE(
383                 vmbus_connection.channels[channel->offermsg.child_relid],
384                 NULL);
385 }
386
387 static void vmbus_release_relid(u32 relid)
388 {
389         struct vmbus_channel_relid_released msg;
390         int ret;
391
392         memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
393         msg.child_relid = relid;
394         msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
395         ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
396                              true);
397
398         trace_vmbus_release_relid(&msg, ret);
399 }
400
401 void hv_process_channel_removal(struct vmbus_channel *channel)
402 {
403         unsigned long flags;
404
405         lockdep_assert_held(&vmbus_connection.channel_mutex);
406         BUG_ON(!channel->rescind);
407
408         /*
409          * hv_process_channel_removal() could find INVALID_RELID only for
410          * hv_sock channels.  See the inline comments in vmbus_onoffer().
411          */
412         WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
413                 !is_hvsock_channel(channel));
414
415         /*
416          * Upon suspend, an in-use hv_sock channel is removed from the array of
417          * channels and the relid is invalidated.  After hibernation, when the
418          * user-space appplication destroys the channel, it's unnecessary and
419          * unsafe to remove the channel from the array of channels.  See also
420          * the inline comments before the call of vmbus_release_relid() below.
421          */
422         if (channel->offermsg.child_relid != INVALID_RELID)
423                 vmbus_channel_unmap_relid(channel);
424
425         if (channel->primary_channel == NULL) {
426                 list_del(&channel->listentry);
427         } else {
428                 struct vmbus_channel *primary_channel = channel->primary_channel;
429                 spin_lock_irqsave(&primary_channel->lock, flags);
430                 list_del(&channel->sc_list);
431                 spin_unlock_irqrestore(&primary_channel->lock, flags);
432         }
433
434         /*
435          * If this is a "perf" channel, updates the hv_numa_map[] masks so that
436          * init_vp_index() can (re-)use the CPU.
437          */
438         if (hv_is_perf_channel(channel))
439                 hv_clear_alloced_cpu(channel->target_cpu);
440
441         /*
442          * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
443          * the relid is invalidated; after hibernation, when the user-space app
444          * destroys the channel, the relid is INVALID_RELID, and in this case
445          * it's unnecessary and unsafe to release the old relid, since the same
446          * relid can refer to a completely different channel now.
447          */
448         if (channel->offermsg.child_relid != INVALID_RELID)
449                 vmbus_release_relid(channel->offermsg.child_relid);
450
451         free_channel(channel);
452 }
453
454 void vmbus_free_channels(void)
455 {
456         struct vmbus_channel *channel, *tmp;
457
458         list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
459                 listentry) {
460                 /* hv_process_channel_removal() needs this */
461                 channel->rescind = true;
462
463                 vmbus_device_unregister(channel->device_obj);
464         }
465 }
466
467 /* Note: the function can run concurrently for primary/sub channels. */
468 static void vmbus_add_channel_work(struct work_struct *work)
469 {
470         struct vmbus_channel *newchannel =
471                 container_of(work, struct vmbus_channel, add_channel_work);
472         struct vmbus_channel *primary_channel = newchannel->primary_channel;
473         unsigned long flags;
474         int ret;
475
476         /*
477          * This state is used to indicate a successful open
478          * so that when we do close the channel normally, we
479          * can cleanup properly.
480          */
481         newchannel->state = CHANNEL_OPEN_STATE;
482
483         if (primary_channel != NULL) {
484                 /* newchannel is a sub-channel. */
485                 struct hv_device *dev = primary_channel->device_obj;
486
487                 if (vmbus_add_channel_kobj(dev, newchannel))
488                         goto err_deq_chan;
489
490                 if (primary_channel->sc_creation_callback != NULL)
491                         primary_channel->sc_creation_callback(newchannel);
492
493                 newchannel->probe_done = true;
494                 return;
495         }
496
497         /*
498          * Start the process of binding the primary channel to the driver
499          */
500         newchannel->device_obj = vmbus_device_create(
501                 &newchannel->offermsg.offer.if_type,
502                 &newchannel->offermsg.offer.if_instance,
503                 newchannel);
504         if (!newchannel->device_obj)
505                 goto err_deq_chan;
506
507         newchannel->device_obj->device_id = newchannel->device_id;
508         /*
509          * Add the new device to the bus. This will kick off device-driver
510          * binding which eventually invokes the device driver's AddDevice()
511          * method.
512          */
513         ret = vmbus_device_register(newchannel->device_obj);
514
515         if (ret != 0) {
516                 pr_err("unable to add child device object (relid %d)\n",
517                         newchannel->offermsg.child_relid);
518                 kfree(newchannel->device_obj);
519                 goto err_deq_chan;
520         }
521
522         newchannel->probe_done = true;
523         return;
524
525 err_deq_chan:
526         mutex_lock(&vmbus_connection.channel_mutex);
527
528         /*
529          * We need to set the flag, otherwise
530          * vmbus_onoffer_rescind() can be blocked.
531          */
532         newchannel->probe_done = true;
533
534         if (primary_channel == NULL) {
535                 list_del(&newchannel->listentry);
536         } else {
537                 spin_lock_irqsave(&primary_channel->lock, flags);
538                 list_del(&newchannel->sc_list);
539                 spin_unlock_irqrestore(&primary_channel->lock, flags);
540         }
541
542         /* vmbus_process_offer() has mapped the channel. */
543         vmbus_channel_unmap_relid(newchannel);
544
545         mutex_unlock(&vmbus_connection.channel_mutex);
546
547         vmbus_release_relid(newchannel->offermsg.child_relid);
548
549         free_channel(newchannel);
550 }
551
552 /*
553  * vmbus_process_offer - Process the offer by creating a channel/device
554  * associated with this offer
555  */
556 static void vmbus_process_offer(struct vmbus_channel *newchannel)
557 {
558         struct vmbus_channel *channel;
559         struct workqueue_struct *wq;
560         unsigned long flags;
561         bool fnew = true;
562
563         /*
564          * Synchronize vmbus_process_offer() and CPU hotplugging:
565          *
566          * CPU1                         CPU2
567          *
568          * [vmbus_process_offer()]      [Hot removal of the CPU]
569          *
570          * CPU_READ_LOCK                CPUS_WRITE_LOCK
571          * LOAD cpu_online_mask         SEARCH chn_list
572          * STORE target_cpu             LOAD target_cpu
573          * INSERT chn_list              STORE cpu_online_mask
574          * CPUS_READ_UNLOCK             CPUS_WRITE_UNLOCK
575          *
576          * Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
577          *              CPU2's SEARCH from *not* seeing CPU1's INSERT
578          *
579          * Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
580          *              CPU2's LOAD from *not* seing CPU1's STORE
581          */
582         cpus_read_lock();
583
584         /*
585          * Serializes the modifications of the chn_list list as well as
586          * the accesses to next_numa_node_id in init_vp_index().
587          */
588         mutex_lock(&vmbus_connection.channel_mutex);
589
590         init_vp_index(newchannel);
591
592         /* Remember the channels that should be cleaned up upon suspend. */
593         if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
594                 atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
595
596         /*
597          * Now that we have acquired the channel_mutex,
598          * we can release the potentially racing rescind thread.
599          */
600         atomic_dec(&vmbus_connection.offer_in_progress);
601
602         list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
603                 if (guid_equal(&channel->offermsg.offer.if_type,
604                                &newchannel->offermsg.offer.if_type) &&
605                     guid_equal(&channel->offermsg.offer.if_instance,
606                                &newchannel->offermsg.offer.if_instance)) {
607                         fnew = false;
608                         break;
609                 }
610         }
611
612         if (fnew)
613                 list_add_tail(&newchannel->listentry,
614                               &vmbus_connection.chn_list);
615         else {
616                 /*
617                  * Check to see if this is a valid sub-channel.
618                  */
619                 if (newchannel->offermsg.offer.sub_channel_index == 0) {
620                         mutex_unlock(&vmbus_connection.channel_mutex);
621                         /*
622                          * Don't call free_channel(), because newchannel->kobj
623                          * is not initialized yet.
624                          */
625                         kfree(newchannel);
626                         WARN_ON_ONCE(1);
627                         return;
628                 }
629                 /*
630                  * Process the sub-channel.
631                  */
632                 newchannel->primary_channel = channel;
633                 spin_lock_irqsave(&channel->lock, flags);
634                 list_add_tail(&newchannel->sc_list, &channel->sc_list);
635                 spin_unlock_irqrestore(&channel->lock, flags);
636         }
637
638         vmbus_channel_map_relid(newchannel);
639
640         mutex_unlock(&vmbus_connection.channel_mutex);
641         cpus_read_unlock();
642
643         /*
644          * vmbus_process_offer() mustn't call channel->sc_creation_callback()
645          * directly for sub-channels, because sc_creation_callback() ->
646          * vmbus_open() may never get the host's response to the
647          * OPEN_CHANNEL message (the host may rescind a channel at any time,
648          * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
649          * may not wake up the vmbus_open() as it's blocked due to a non-zero
650          * vmbus_connection.offer_in_progress, and finally we have a deadlock.
651          *
652          * The above is also true for primary channels, if the related device
653          * drivers use sync probing mode by default.
654          *
655          * And, usually the handling of primary channels and sub-channels can
656          * depend on each other, so we should offload them to different
657          * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
658          * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
659          * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
660          * and waits for all the sub-channels to appear, but the latter
661          * can't get the rtnl_lock and this blocks the handling of
662          * sub-channels.
663          */
664         INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
665         wq = fnew ? vmbus_connection.handle_primary_chan_wq :
666                     vmbus_connection.handle_sub_chan_wq;
667         queue_work(wq, &newchannel->add_channel_work);
668 }
669
670 /*
671  * We use this state to statically distribute the channel interrupt load.
672  */
673 static int next_numa_node_id;
674
675 /*
676  * Starting with Win8, we can statically distribute the incoming
677  * channel interrupt load by binding a channel to VCPU.
678  *
679  * For pre-win8 hosts or non-performance critical channels we assign the
680  * VMBUS_CONNECT_CPU.
681  *
682  * Starting with win8, performance critical channels will be distributed
683  * evenly among all the available NUMA nodes.  Once the node is assigned,
684  * we will assign the CPU based on a simple round robin scheme.
685  */
686 static void init_vp_index(struct vmbus_channel *channel)
687 {
688         bool perf_chn = hv_is_perf_channel(channel);
689         cpumask_var_t available_mask;
690         struct cpumask *alloced_mask;
691         u32 target_cpu;
692         int numa_node;
693
694         if ((vmbus_proto_version == VERSION_WS2008) ||
695             (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
696             !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
697                 /*
698                  * Prior to win8, all channel interrupts are
699                  * delivered on VMBUS_CONNECT_CPU.
700                  * Also if the channel is not a performance critical
701                  * channel, bind it to VMBUS_CONNECT_CPU.
702                  * In case alloc_cpumask_var() fails, bind it to
703                  * VMBUS_CONNECT_CPU.
704                  */
705                 channel->numa_node = cpu_to_node(VMBUS_CONNECT_CPU);
706                 channel->target_cpu = VMBUS_CONNECT_CPU;
707                 channel->target_vp =
708                         hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
709                 if (perf_chn)
710                         hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
711                 return;
712         }
713
714         while (true) {
715                 numa_node = next_numa_node_id++;
716                 if (numa_node == nr_node_ids) {
717                         next_numa_node_id = 0;
718                         continue;
719                 }
720                 if (cpumask_empty(cpumask_of_node(numa_node)))
721                         continue;
722                 break;
723         }
724         channel->numa_node = numa_node;
725         alloced_mask = &hv_context.hv_numa_map[numa_node];
726
727         if (cpumask_weight(alloced_mask) ==
728             cpumask_weight(cpumask_of_node(numa_node))) {
729                 /*
730                  * We have cycled through all the CPUs in the node;
731                  * reset the alloced map.
732                  */
733                 cpumask_clear(alloced_mask);
734         }
735
736         cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
737
738         target_cpu = cpumask_first(available_mask);
739         cpumask_set_cpu(target_cpu, alloced_mask);
740
741         channel->target_cpu = target_cpu;
742         channel->target_vp = hv_cpu_number_to_vp_number(target_cpu);
743
744         free_cpumask_var(available_mask);
745 }
746
747 static void vmbus_wait_for_unload(void)
748 {
749         int cpu;
750         void *page_addr;
751         struct hv_message *msg;
752         struct vmbus_channel_message_header *hdr;
753         u32 message_type, i;
754
755         /*
756          * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
757          * used for initial contact or to CPU0 depending on host version. When
758          * we're crashing on a different CPU let's hope that IRQ handler on
759          * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
760          * functional and vmbus_unload_response() will complete
761          * vmbus_connection.unload_event. If not, the last thing we can do is
762          * read message pages for all CPUs directly.
763          *
764          * Wait no more than 10 seconds so that the panic path can't get
765          * hung forever in case the response message isn't seen.
766          */
767         for (i = 0; i < 1000; i++) {
768                 if (completion_done(&vmbus_connection.unload_event))
769                         break;
770
771                 for_each_online_cpu(cpu) {
772                         struct hv_per_cpu_context *hv_cpu
773                                 = per_cpu_ptr(hv_context.cpu_context, cpu);
774
775                         page_addr = hv_cpu->synic_message_page;
776                         msg = (struct hv_message *)page_addr
777                                 + VMBUS_MESSAGE_SINT;
778
779                         message_type = READ_ONCE(msg->header.message_type);
780                         if (message_type == HVMSG_NONE)
781                                 continue;
782
783                         hdr = (struct vmbus_channel_message_header *)
784                                 msg->u.payload;
785
786                         if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
787                                 complete(&vmbus_connection.unload_event);
788
789                         vmbus_signal_eom(msg, message_type);
790                 }
791
792                 mdelay(10);
793         }
794
795         /*
796          * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
797          * maybe-pending messages on all CPUs to be able to receive new
798          * messages after we reconnect.
799          */
800         for_each_online_cpu(cpu) {
801                 struct hv_per_cpu_context *hv_cpu
802                         = per_cpu_ptr(hv_context.cpu_context, cpu);
803
804                 page_addr = hv_cpu->synic_message_page;
805                 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
806                 msg->header.message_type = HVMSG_NONE;
807         }
808 }
809
810 /*
811  * vmbus_unload_response - Handler for the unload response.
812  */
813 static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
814 {
815         /*
816          * This is a global event; just wakeup the waiting thread.
817          * Once we successfully unload, we can cleanup the monitor state.
818          */
819         complete(&vmbus_connection.unload_event);
820 }
821
822 void vmbus_initiate_unload(bool crash)
823 {
824         struct vmbus_channel_message_header hdr;
825
826         if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
827                 return;
828
829         /* Pre-Win2012R2 hosts don't support reconnect */
830         if (vmbus_proto_version < VERSION_WIN8_1)
831                 return;
832
833         init_completion(&vmbus_connection.unload_event);
834         memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
835         hdr.msgtype = CHANNELMSG_UNLOAD;
836         vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
837                        !crash);
838
839         /*
840          * vmbus_initiate_unload() is also called on crash and the crash can be
841          * happening in an interrupt context, where scheduling is impossible.
842          */
843         if (!crash)
844                 wait_for_completion(&vmbus_connection.unload_event);
845         else
846                 vmbus_wait_for_unload();
847 }
848
849 static void check_ready_for_resume_event(void)
850 {
851         /*
852          * If all the old primary channels have been fixed up, then it's safe
853          * to resume.
854          */
855         if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
856                 complete(&vmbus_connection.ready_for_resume_event);
857 }
858
859 static void vmbus_setup_channel_state(struct vmbus_channel *channel,
860                                       struct vmbus_channel_offer_channel *offer)
861 {
862         /*
863          * Setup state for signalling the host.
864          */
865         channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
866
867         if (vmbus_proto_version != VERSION_WS2008) {
868                 channel->is_dedicated_interrupt =
869                                 (offer->is_dedicated_interrupt != 0);
870                 channel->sig_event = offer->connection_id;
871         }
872
873         memcpy(&channel->offermsg, offer,
874                sizeof(struct vmbus_channel_offer_channel));
875         channel->monitor_grp = (u8)offer->monitorid / 32;
876         channel->monitor_bit = (u8)offer->monitorid % 32;
877         channel->device_id = hv_get_dev_type(channel);
878 }
879
880 /*
881  * find_primary_channel_by_offer - Get the channel object given the new offer.
882  * This is only used in the resume path of hibernation.
883  */
884 static struct vmbus_channel *
885 find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
886 {
887         struct vmbus_channel *channel = NULL, *iter;
888         const guid_t *inst1, *inst2;
889
890         /* Ignore sub-channel offers. */
891         if (offer->offer.sub_channel_index != 0)
892                 return NULL;
893
894         mutex_lock(&vmbus_connection.channel_mutex);
895
896         list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
897                 inst1 = &iter->offermsg.offer.if_instance;
898                 inst2 = &offer->offer.if_instance;
899
900                 if (guid_equal(inst1, inst2)) {
901                         channel = iter;
902                         break;
903                 }
904         }
905
906         mutex_unlock(&vmbus_connection.channel_mutex);
907
908         return channel;
909 }
910
911 /*
912  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
913  *
914  */
915 static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
916 {
917         struct vmbus_channel_offer_channel *offer;
918         struct vmbus_channel *oldchannel, *newchannel;
919         size_t offer_sz;
920
921         offer = (struct vmbus_channel_offer_channel *)hdr;
922
923         trace_vmbus_onoffer(offer);
924
925         oldchannel = find_primary_channel_by_offer(offer);
926
927         if (oldchannel != NULL) {
928                 /*
929                  * We're resuming from hibernation: all the sub-channel and
930                  * hv_sock channels we had before the hibernation should have
931                  * been cleaned up, and now we must be seeing a re-offered
932                  * primary channel that we had before the hibernation.
933                  */
934
935                 /*
936                  * { Initially: channel relid = INVALID_RELID,
937                  *              channels[valid_relid] = NULL }
938                  *
939                  * CPU1                                 CPU2
940                  *
941                  * [vmbus_onoffer()]                    [vmbus_device_release()]
942                  *
943                  * LOCK channel_mutex                   LOCK channel_mutex
944                  * STORE channel relid = valid_relid    LOAD r1 = channel relid
945                  * MAP_RELID channel                    if (r1 != INVALID_RELID)
946                  * UNLOCK channel_mutex                   UNMAP_RELID channel
947                  *                                      UNLOCK channel_mutex
948                  *
949                  * Forbids: r1 == valid_relid &&
950                  *              channels[valid_relid] == channel
951                  *
952                  * Note.  r1 can be INVALID_RELID only for an hv_sock channel.
953                  * None of the hv_sock channels which were present before the
954                  * suspend are re-offered upon the resume.  See the WARN_ON()
955                  * in hv_process_channel_removal().
956                  */
957                 mutex_lock(&vmbus_connection.channel_mutex);
958
959                 atomic_dec(&vmbus_connection.offer_in_progress);
960
961                 WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
962                 /* Fix up the relid. */
963                 oldchannel->offermsg.child_relid = offer->child_relid;
964
965                 offer_sz = sizeof(*offer);
966                 if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
967                         /*
968                          * This is not an error, since the host can also change
969                          * the other field(s) of the offer, e.g. on WS RS5
970                          * (Build 17763), the offer->connection_id of the
971                          * Mellanox VF vmbus device can change when the host
972                          * reoffers the device upon resume.
973                          */
974                         pr_debug("vmbus offer changed: relid=%d\n",
975                                  offer->child_relid);
976
977                         print_hex_dump_debug("Old vmbus offer: ",
978                                              DUMP_PREFIX_OFFSET, 16, 4,
979                                              &oldchannel->offermsg, offer_sz,
980                                              false);
981                         print_hex_dump_debug("New vmbus offer: ",
982                                              DUMP_PREFIX_OFFSET, 16, 4,
983                                              offer, offer_sz, false);
984
985                         /* Fix up the old channel. */
986                         vmbus_setup_channel_state(oldchannel, offer);
987                 }
988
989                 /* Add the channel back to the array of channels. */
990                 vmbus_channel_map_relid(oldchannel);
991                 check_ready_for_resume_event();
992
993                 mutex_unlock(&vmbus_connection.channel_mutex);
994                 return;
995         }
996
997         /* Allocate the channel object and save this offer. */
998         newchannel = alloc_channel();
999         if (!newchannel) {
1000                 vmbus_release_relid(offer->child_relid);
1001                 atomic_dec(&vmbus_connection.offer_in_progress);
1002                 pr_err("Unable to allocate channel object\n");
1003                 return;
1004         }
1005
1006         vmbus_setup_channel_state(newchannel, offer);
1007
1008         vmbus_process_offer(newchannel);
1009 }
1010
1011 static void check_ready_for_suspend_event(void)
1012 {
1013         /*
1014          * If all the sub-channels or hv_sock channels have been cleaned up,
1015          * then it's safe to suspend.
1016          */
1017         if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
1018                 complete(&vmbus_connection.ready_for_suspend_event);
1019 }
1020
1021 /*
1022  * vmbus_onoffer_rescind - Rescind offer handler.
1023  *
1024  * We queue a work item to process this offer synchronously
1025  */
1026 static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1027 {
1028         struct vmbus_channel_rescind_offer *rescind;
1029         struct vmbus_channel *channel;
1030         struct device *dev;
1031         bool clean_up_chan_for_suspend;
1032
1033         rescind = (struct vmbus_channel_rescind_offer *)hdr;
1034
1035         trace_vmbus_onoffer_rescind(rescind);
1036
1037         /*
1038          * The offer msg and the corresponding rescind msg
1039          * from the host are guranteed to be ordered -
1040          * offer comes in first and then the rescind.
1041          * Since we process these events in work elements,
1042          * and with preemption, we may end up processing
1043          * the events out of order.  We rely on the synchronization
1044          * provided by offer_in_progress and by channel_mutex for
1045          * ordering these events:
1046          *
1047          * { Initially: offer_in_progress = 1 }
1048          *
1049          * CPU1                         CPU2
1050          *
1051          * [vmbus_onoffer()]            [vmbus_onoffer_rescind()]
1052          *
1053          * LOCK channel_mutex           WAIT_ON offer_in_progress == 0
1054          * DECREMENT offer_in_progress  LOCK channel_mutex
1055          * STORE channels[]             LOAD channels[]
1056          * UNLOCK channel_mutex         UNLOCK channel_mutex
1057          *
1058          * Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
1059          */
1060
1061         while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
1062                 /*
1063                  * We wait here until any channel offer is currently
1064                  * being processed.
1065                  */
1066                 msleep(1);
1067         }
1068
1069         mutex_lock(&vmbus_connection.channel_mutex);
1070         channel = relid2channel(rescind->child_relid);
1071         mutex_unlock(&vmbus_connection.channel_mutex);
1072
1073         if (channel == NULL) {
1074                 /*
1075                  * We failed in processing the offer message;
1076                  * we would have cleaned up the relid in that
1077                  * failure path.
1078                  */
1079                 return;
1080         }
1081
1082         clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
1083                                     is_sub_channel(channel);
1084         /*
1085          * Before setting channel->rescind in vmbus_rescind_cleanup(), we
1086          * should make sure the channel callback is not running any more.
1087          */
1088         vmbus_reset_channel_cb(channel);
1089
1090         /*
1091          * Now wait for offer handling to complete.
1092          */
1093         vmbus_rescind_cleanup(channel);
1094         while (READ_ONCE(channel->probe_done) == false) {
1095                 /*
1096                  * We wait here until any channel offer is currently
1097                  * being processed.
1098                  */
1099                 msleep(1);
1100         }
1101
1102         /*
1103          * At this point, the rescind handling can proceed safely.
1104          */
1105
1106         if (channel->device_obj) {
1107                 if (channel->chn_rescind_callback) {
1108                         channel->chn_rescind_callback(channel);
1109
1110                         if (clean_up_chan_for_suspend)
1111                                 check_ready_for_suspend_event();
1112
1113                         return;
1114                 }
1115                 /*
1116                  * We will have to unregister this device from the
1117                  * driver core.
1118                  */
1119                 dev = get_device(&channel->device_obj->device);
1120                 if (dev) {
1121                         vmbus_device_unregister(channel->device_obj);
1122                         put_device(dev);
1123                 }
1124         }
1125         if (channel->primary_channel != NULL) {
1126                 /*
1127                  * Sub-channel is being rescinded. Following is the channel
1128                  * close sequence when initiated from the driveri (refer to
1129                  * vmbus_close() for details):
1130                  * 1. Close all sub-channels first
1131                  * 2. Then close the primary channel.
1132                  */
1133                 mutex_lock(&vmbus_connection.channel_mutex);
1134                 if (channel->state == CHANNEL_OPEN_STATE) {
1135                         /*
1136                          * The channel is currently not open;
1137                          * it is safe for us to cleanup the channel.
1138                          */
1139                         hv_process_channel_removal(channel);
1140                 } else {
1141                         complete(&channel->rescind_event);
1142                 }
1143                 mutex_unlock(&vmbus_connection.channel_mutex);
1144         }
1145
1146         /* The "channel" may have been freed. Do not access it any longer. */
1147
1148         if (clean_up_chan_for_suspend)
1149                 check_ready_for_suspend_event();
1150 }
1151
1152 void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
1153 {
1154         BUG_ON(!is_hvsock_channel(channel));
1155
1156         /* We always get a rescind msg when a connection is closed. */
1157         while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
1158                 msleep(1);
1159
1160         vmbus_device_unregister(channel->device_obj);
1161 }
1162 EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
1163
1164
1165 /*
1166  * vmbus_onoffers_delivered -
1167  * This is invoked when all offers have been delivered.
1168  *
1169  * Nothing to do here.
1170  */
1171 static void vmbus_onoffers_delivered(
1172                         struct vmbus_channel_message_header *hdr)
1173 {
1174 }
1175
1176 /*
1177  * vmbus_onopen_result - Open result handler.
1178  *
1179  * This is invoked when we received a response to our channel open request.
1180  * Find the matching request, copy the response and signal the requesting
1181  * thread.
1182  */
1183 static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
1184 {
1185         struct vmbus_channel_open_result *result;
1186         struct vmbus_channel_msginfo *msginfo;
1187         struct vmbus_channel_message_header *requestheader;
1188         struct vmbus_channel_open_channel *openmsg;
1189         unsigned long flags;
1190
1191         result = (struct vmbus_channel_open_result *)hdr;
1192
1193         trace_vmbus_onopen_result(result);
1194
1195         /*
1196          * Find the open msg, copy the result and signal/unblock the wait event
1197          */
1198         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1199
1200         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1201                                 msglistentry) {
1202                 requestheader =
1203                         (struct vmbus_channel_message_header *)msginfo->msg;
1204
1205                 if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
1206                         openmsg =
1207                         (struct vmbus_channel_open_channel *)msginfo->msg;
1208                         if (openmsg->child_relid == result->child_relid &&
1209                             openmsg->openid == result->openid) {
1210                                 memcpy(&msginfo->response.open_result,
1211                                        result,
1212                                        sizeof(
1213                                         struct vmbus_channel_open_result));
1214                                 complete(&msginfo->waitevent);
1215                                 break;
1216                         }
1217                 }
1218         }
1219         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1220 }
1221
1222 /*
1223  * vmbus_ongpadl_created - GPADL created handler.
1224  *
1225  * This is invoked when we received a response to our gpadl create request.
1226  * Find the matching request, copy the response and signal the requesting
1227  * thread.
1228  */
1229 static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1230 {
1231         struct vmbus_channel_gpadl_created *gpadlcreated;
1232         struct vmbus_channel_msginfo *msginfo;
1233         struct vmbus_channel_message_header *requestheader;
1234         struct vmbus_channel_gpadl_header *gpadlheader;
1235         unsigned long flags;
1236
1237         gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1238
1239         trace_vmbus_ongpadl_created(gpadlcreated);
1240
1241         /*
1242          * Find the establish msg, copy the result and signal/unblock the wait
1243          * event
1244          */
1245         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1246
1247         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1248                                 msglistentry) {
1249                 requestheader =
1250                         (struct vmbus_channel_message_header *)msginfo->msg;
1251
1252                 if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1253                         gpadlheader =
1254                         (struct vmbus_channel_gpadl_header *)requestheader;
1255
1256                         if ((gpadlcreated->child_relid ==
1257                              gpadlheader->child_relid) &&
1258                             (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1259                                 memcpy(&msginfo->response.gpadl_created,
1260                                        gpadlcreated,
1261                                        sizeof(
1262                                         struct vmbus_channel_gpadl_created));
1263                                 complete(&msginfo->waitevent);
1264                                 break;
1265                         }
1266                 }
1267         }
1268         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1269 }
1270
1271 /*
1272  * vmbus_ongpadl_torndown - GPADL torndown handler.
1273  *
1274  * This is invoked when we received a response to our gpadl teardown request.
1275  * Find the matching request, copy the response and signal the requesting
1276  * thread.
1277  */
1278 static void vmbus_ongpadl_torndown(
1279                         struct vmbus_channel_message_header *hdr)
1280 {
1281         struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1282         struct vmbus_channel_msginfo *msginfo;
1283         struct vmbus_channel_message_header *requestheader;
1284         struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1285         unsigned long flags;
1286
1287         gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1288
1289         trace_vmbus_ongpadl_torndown(gpadl_torndown);
1290
1291         /*
1292          * Find the open msg, copy the result and signal/unblock the wait event
1293          */
1294         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1295
1296         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1297                                 msglistentry) {
1298                 requestheader =
1299                         (struct vmbus_channel_message_header *)msginfo->msg;
1300
1301                 if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1302                         gpadl_teardown =
1303                         (struct vmbus_channel_gpadl_teardown *)requestheader;
1304
1305                         if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1306                                 memcpy(&msginfo->response.gpadl_torndown,
1307                                        gpadl_torndown,
1308                                        sizeof(
1309                                         struct vmbus_channel_gpadl_torndown));
1310                                 complete(&msginfo->waitevent);
1311                                 break;
1312                         }
1313                 }
1314         }
1315         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1316 }
1317
1318 /*
1319  * vmbus_onversion_response - Version response handler
1320  *
1321  * This is invoked when we received a response to our initiate contact request.
1322  * Find the matching request, copy the response and signal the requesting
1323  * thread.
1324  */
1325 static void vmbus_onversion_response(
1326                 struct vmbus_channel_message_header *hdr)
1327 {
1328         struct vmbus_channel_msginfo *msginfo;
1329         struct vmbus_channel_message_header *requestheader;
1330         struct vmbus_channel_version_response *version_response;
1331         unsigned long flags;
1332
1333         version_response = (struct vmbus_channel_version_response *)hdr;
1334
1335         trace_vmbus_onversion_response(version_response);
1336
1337         spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1338
1339         list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1340                                 msglistentry) {
1341                 requestheader =
1342                         (struct vmbus_channel_message_header *)msginfo->msg;
1343
1344                 if (requestheader->msgtype ==
1345                     CHANNELMSG_INITIATE_CONTACT) {
1346                         memcpy(&msginfo->response.version_response,
1347                               version_response,
1348                               sizeof(struct vmbus_channel_version_response));
1349                         complete(&msginfo->waitevent);
1350                 }
1351         }
1352         spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1353 }
1354
1355 /* Channel message dispatch table */
1356 const struct vmbus_channel_message_table_entry
1357 channel_message_table[CHANNELMSG_COUNT] = {
1358         { CHANNELMSG_INVALID,                   0, NULL, 0},
1359         { CHANNELMSG_OFFERCHANNEL,              0, vmbus_onoffer,
1360                 sizeof(struct vmbus_channel_offer_channel)},
1361         { CHANNELMSG_RESCIND_CHANNELOFFER,      0, vmbus_onoffer_rescind,
1362                 sizeof(struct vmbus_channel_rescind_offer) },
1363         { CHANNELMSG_REQUESTOFFERS,             0, NULL, 0},
1364         { CHANNELMSG_ALLOFFERS_DELIVERED,       1, vmbus_onoffers_delivered, 0},
1365         { CHANNELMSG_OPENCHANNEL,               0, NULL, 0},
1366         { CHANNELMSG_OPENCHANNEL_RESULT,        1, vmbus_onopen_result,
1367                 sizeof(struct vmbus_channel_open_result)},
1368         { CHANNELMSG_CLOSECHANNEL,              0, NULL, 0},
1369         { CHANNELMSG_GPADL_HEADER,              0, NULL, 0},
1370         { CHANNELMSG_GPADL_BODY,                0, NULL, 0},
1371         { CHANNELMSG_GPADL_CREATED,             1, vmbus_ongpadl_created,
1372                 sizeof(struct vmbus_channel_gpadl_created)},
1373         { CHANNELMSG_GPADL_TEARDOWN,            0, NULL, 0},
1374         { CHANNELMSG_GPADL_TORNDOWN,            1, vmbus_ongpadl_torndown,
1375                 sizeof(struct vmbus_channel_gpadl_torndown) },
1376         { CHANNELMSG_RELID_RELEASED,            0, NULL, 0},
1377         { CHANNELMSG_INITIATE_CONTACT,          0, NULL, 0},
1378         { CHANNELMSG_VERSION_RESPONSE,          1, vmbus_onversion_response,
1379                 sizeof(struct vmbus_channel_version_response)},
1380         { CHANNELMSG_UNLOAD,                    0, NULL, 0},
1381         { CHANNELMSG_UNLOAD_RESPONSE,           1, vmbus_unload_response, 0},
1382         { CHANNELMSG_18,                        0, NULL, 0},
1383         { CHANNELMSG_19,                        0, NULL, 0},
1384         { CHANNELMSG_20,                        0, NULL, 0},
1385         { CHANNELMSG_TL_CONNECT_REQUEST,        0, NULL, 0},
1386         { CHANNELMSG_MODIFYCHANNEL,             0, NULL, 0},
1387         { CHANNELMSG_TL_CONNECT_RESULT,         0, NULL, 0},
1388 };
1389
1390 /*
1391  * vmbus_onmessage - Handler for channel protocol messages.
1392  *
1393  * This is invoked in the vmbus worker thread context.
1394  */
1395 void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
1396 {
1397         trace_vmbus_on_message(hdr);
1398
1399         /*
1400          * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
1401          * out of bound and the message_handler pointer can not be NULL.
1402          */
1403         channel_message_table[hdr->msgtype].message_handler(hdr);
1404 }
1405
1406 /*
1407  * vmbus_request_offers - Send a request to get all our pending offers.
1408  */
1409 int vmbus_request_offers(void)
1410 {
1411         struct vmbus_channel_message_header *msg;
1412         struct vmbus_channel_msginfo *msginfo;
1413         int ret;
1414
1415         msginfo = kmalloc(sizeof(*msginfo) +
1416                           sizeof(struct vmbus_channel_message_header),
1417                           GFP_KERNEL);
1418         if (!msginfo)
1419                 return -ENOMEM;
1420
1421         msg = (struct vmbus_channel_message_header *)msginfo->msg;
1422
1423         msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1424
1425         ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1426                              true);
1427
1428         trace_vmbus_request_offers(ret);
1429
1430         if (ret != 0) {
1431                 pr_err("Unable to request offers - %d\n", ret);
1432
1433                 goto cleanup;
1434         }
1435
1436 cleanup:
1437         kfree(msginfo);
1438
1439         return ret;
1440 }
1441
1442 static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1443 {
1444         struct list_head *cur, *tmp;
1445         struct vmbus_channel *cur_channel;
1446
1447         if (primary_channel->sc_creation_callback == NULL)
1448                 return;
1449
1450         list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1451                 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1452
1453                 primary_channel->sc_creation_callback(cur_channel);
1454         }
1455 }
1456
1457 void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1458                                 void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1459 {
1460         primary_channel->sc_creation_callback = sc_cr_cb;
1461 }
1462 EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1463
1464 bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1465 {
1466         bool ret;
1467
1468         ret = !list_empty(&primary->sc_list);
1469
1470         if (ret) {
1471                 /*
1472                  * Invoke the callback on sub-channel creation.
1473                  * This will present a uniform interface to the
1474                  * clients.
1475                  */
1476                 invoke_sc_cb(primary);
1477         }
1478
1479         return ret;
1480 }
1481 EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1482
1483 void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1484                 void (*chn_rescind_cb)(struct vmbus_channel *))
1485 {
1486         channel->chn_rescind_callback = chn_rescind_cb;
1487 }
1488 EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);