1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright (c) 2011, Microsoft Corporation.
7 * Haiyang Zhang <haiyangz@microsoft.com>
8 * Hank Janssen <hjanssen@microsoft.com>
9 * K. Y. Srinivasan <kys@microsoft.com>
12 #ifndef _HYPERV_VMBUS_H
13 #define _HYPERV_VMBUS_H
15 #include <linux/list.h>
16 #include <linux/bitops.h>
17 #include <asm/sync_bitops.h>
18 #include <asm/hyperv-tlfs.h>
19 #include <linux/atomic.h>
20 #include <linux/hyperv.h>
21 #include <linux/interrupt.h>
26 * Timeout for services such as KVP and fcopy.
28 #define HV_UTIL_TIMEOUT 30
31 * Timeout for guest-host handshake for services.
33 #define HV_UTIL_NEGO_TIMEOUT 55
36 /* Definitions for the monitored notification facility */
37 union hv_monitor_trigger_group {
45 struct hv_monitor_parameter {
46 union hv_connection_id connectionid;
51 union hv_monitor_trigger_state {
60 /* struct hv_monitor_page Layout */
61 /* ------------------------------------------------------ */
62 /* | 0 | TriggerState (4 bytes) | Rsvd1 (4 bytes) | */
63 /* | 8 | TriggerGroup[0] | */
64 /* | 10 | TriggerGroup[1] | */
65 /* | 18 | TriggerGroup[2] | */
66 /* | 20 | TriggerGroup[3] | */
67 /* | 28 | Rsvd2[0] | */
68 /* | 30 | Rsvd2[1] | */
69 /* | 38 | Rsvd2[2] | */
70 /* | 40 | NextCheckTime[0][0] | NextCheckTime[0][1] | */
72 /* | 240 | Latency[0][0..3] | */
73 /* | 340 | Rsvz3[0] | */
74 /* | 440 | Parameter[0][0] | */
75 /* | 448 | Parameter[0][1] | */
77 /* | 840 | Rsvd4[0] | */
78 /* ------------------------------------------------------ */
79 struct hv_monitor_page {
80 union hv_monitor_trigger_state trigger_state;
83 union hv_monitor_trigger_group trigger_group[4];
86 s32 next_checktime[4][32];
91 struct hv_monitor_parameter parameter[4][32];
96 #define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
98 /* Definition of the hv_post_message hypercall input structure. */
99 struct hv_input_post_message {
100 union hv_connection_id connectionid;
104 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
109 VMBUS_MESSAGE_CONNECTION_ID = 1,
110 VMBUS_MESSAGE_CONNECTION_ID_4 = 4,
111 VMBUS_MESSAGE_PORT_ID = 1,
112 VMBUS_EVENT_CONNECTION_ID = 2,
113 VMBUS_EVENT_PORT_ID = 2,
114 VMBUS_MONITOR_CONNECTION_ID = 3,
115 VMBUS_MONITOR_PORT_ID = 3,
116 VMBUS_MESSAGE_SINT = 2,
120 * Per cpu state for channel handling
122 struct hv_per_cpu_context {
123 void *synic_message_page;
124 void *synic_event_page;
127 * Starting with win8, we can take channel interrupts on any CPU;
128 * we will manage the tasklet that handles events messages on a per CPU
131 struct tasklet_struct msg_dpc;
135 /* We only support running on top of Hyper-V
136 * So at this point this really can only contain the Hyper-V ID
140 struct hv_per_cpu_context __percpu *cpu_context;
143 * To manage allocations in a NUMA node.
144 * Array indexed by numa node ID.
146 struct cpumask *hv_numa_map;
149 extern struct hv_context hv_context;
153 extern int hv_init(void);
155 extern int hv_post_message(union hv_connection_id connection_id,
156 enum hv_message_type message_type,
157 void *payload, size_t payload_size);
159 extern int hv_synic_alloc(void);
161 extern void hv_synic_free(void);
163 extern void hv_synic_enable_regs(unsigned int cpu);
164 extern int hv_synic_init(unsigned int cpu);
166 extern void hv_synic_disable_regs(unsigned int cpu);
167 extern int hv_synic_cleanup(unsigned int cpu);
171 void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
173 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
174 struct page *pages, u32 pagecnt, u32 max_pkt_size);
176 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
178 int hv_ringbuffer_write(struct vmbus_channel *channel,
179 const struct kvec *kv_list, u32 kv_count,
180 u64 requestid, u64 *trans_id);
182 int hv_ringbuffer_read(struct vmbus_channel *channel,
183 void *buffer, u32 buflen, u32 *buffer_actual_len,
184 u64 *requestid, bool raw);
187 * The Maximum number of channels (16384) is determined by the size of the
188 * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
189 * send endpoint interrupts, and the other is to receive endpoint interrupts.
191 #define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
193 /* The value here must be in multiple of 32 */
194 #define MAX_NUM_CHANNELS_SUPPORTED 256
196 #define MAX_CHANNEL_RELIDS \
197 max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
199 enum vmbus_connect_state {
206 #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
209 * The CPU that Hyper-V will interrupt for VMBUS messages, such as
210 * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
212 #define VMBUS_CONNECT_CPU 0
214 struct vmbus_connection {
217 atomic_t offer_in_progress;
219 enum vmbus_connect_state conn_state;
221 atomic_t next_gpadl_handle;
223 struct completion unload_event;
225 * Represents channel interrupts. Each bit position represents a
226 * channel. When a channel sends an interrupt via VMBUS, it finds its
227 * bit in the sendInterruptPage, set it and calls Hv to generate a port
228 * event. The other end receives the port event and parse the
229 * recvInterruptPage to see which bit is set
236 * 2 pages - 1st page for parent->child notification and 2nd
237 * is child->parent notification
239 struct hv_monitor_page *monitor_pages[2];
240 struct list_head chn_msg_list;
241 spinlock_t channelmsg_lock;
243 /* List of channels */
244 struct list_head chn_list;
245 struct mutex channel_mutex;
247 /* Array of channels */
248 struct vmbus_channel **channels;
251 * An offer message is handled first on the work_queue, and then
252 * is further handled on handle_primary_chan_wq or
253 * handle_sub_chan_wq.
255 struct workqueue_struct *work_queue;
256 struct workqueue_struct *handle_primary_chan_wq;
257 struct workqueue_struct *handle_sub_chan_wq;
258 struct workqueue_struct *rescind_work_queue;
261 * On suspension of the vmbus, the accumulated offer messages
264 bool ignore_any_offer_msg;
267 * The number of sub-channels and hv_sock channels that should be
268 * cleaned up upon suspend: sub-channels will be re-created upon
269 * resume, and hv_sock channels should not survive suspend.
271 atomic_t nr_chan_close_on_suspend;
273 * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
276 struct completion ready_for_suspend_event;
279 * The number of primary channels that should be "fixed up"
280 * upon resume: these channels are re-offered upon resume, and some
281 * fields of the channel offers (i.e. child_relid and connection_id)
282 * can change, so the old offermsg must be fixed up, before the resume
283 * callbacks of the VSC drivers start to further touch the channels.
285 atomic_t nr_chan_fixup_on_resume;
287 * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
290 struct completion ready_for_resume_event;
294 struct vmbus_msginfo {
295 /* Bookkeeping stuff */
296 struct list_head msglist_entry;
298 /* The message itself */
303 extern struct vmbus_connection vmbus_connection;
305 int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
307 static inline void vmbus_send_interrupt(u32 relid)
309 sync_set_bit(relid, vmbus_connection.send_int_page);
312 enum vmbus_message_handler_type {
313 /* The related handler can sleep. */
316 /* The related handler must NOT sleep. */
317 VMHT_NON_BLOCKING = 1,
320 struct vmbus_channel_message_table_entry {
321 enum vmbus_channel_message_type message_type;
322 enum vmbus_message_handler_type handler_type;
323 void (*message_handler)(struct vmbus_channel_message_header *msg);
327 extern const struct vmbus_channel_message_table_entry
328 channel_message_table[CHANNELMSG_COUNT];
331 /* General vmbus interface */
333 struct hv_device *vmbus_device_create(const guid_t *type,
334 const guid_t *instance,
335 struct vmbus_channel *channel);
337 int vmbus_device_register(struct hv_device *child_device_obj);
338 void vmbus_device_unregister(struct hv_device *device_obj);
339 int vmbus_add_channel_kobj(struct hv_device *device_obj,
340 struct vmbus_channel *channel);
342 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
344 void vmbus_channel_map_relid(struct vmbus_channel *channel);
345 void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
347 struct vmbus_channel *relid2channel(u32 relid);
349 void vmbus_free_channels(void);
351 /* Connection interface */
353 int vmbus_connect(void);
354 void vmbus_disconnect(void);
356 int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
358 void vmbus_on_event(unsigned long data);
359 void vmbus_on_msg_dpc(unsigned long data);
361 int hv_kvp_init(struct hv_util_service *srv);
362 void hv_kvp_deinit(void);
363 int hv_kvp_pre_suspend(void);
364 int hv_kvp_pre_resume(void);
365 void hv_kvp_onchannelcallback(void *context);
367 int hv_vss_init(struct hv_util_service *srv);
368 void hv_vss_deinit(void);
369 int hv_vss_pre_suspend(void);
370 int hv_vss_pre_resume(void);
371 void hv_vss_onchannelcallback(void *context);
373 int hv_fcopy_init(struct hv_util_service *srv);
374 void hv_fcopy_deinit(void);
375 int hv_fcopy_pre_suspend(void);
376 int hv_fcopy_pre_resume(void);
377 void hv_fcopy_onchannelcallback(void *context);
378 void vmbus_initiate_unload(bool crash);
380 static inline void hv_poll_channel(struct vmbus_channel *channel,
388 enum hvutil_device_state {
389 HVUTIL_DEVICE_INIT = 0, /* driver is loaded, waiting for userspace */
390 HVUTIL_READY, /* userspace is registered */
391 HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
392 HVUTIL_USERSPACE_REQ, /* request to userspace was sent */
393 HVUTIL_USERSPACE_RECV, /* reply from userspace was received */
394 HVUTIL_DEVICE_DYING, /* driver unload is in progress */
402 extern const struct vmbus_device vmbus_devs[];
404 static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
406 return vmbus_devs[channel->device_id].perf_device;
409 static inline bool hv_is_allocated_cpu(unsigned int cpu)
411 struct vmbus_channel *channel, *sc;
413 lockdep_assert_held(&vmbus_connection.channel_mutex);
415 * List additions/deletions as well as updates of the target CPUs are
416 * protected by channel_mutex.
418 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
419 if (!hv_is_perf_channel(channel))
421 if (channel->target_cpu == cpu)
423 list_for_each_entry(sc, &channel->sc_list, sc_list) {
424 if (sc->target_cpu == cpu)
431 static inline void hv_set_allocated_cpu(unsigned int cpu)
433 cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
436 static inline void hv_clear_allocated_cpu(unsigned int cpu)
438 if (hv_is_allocated_cpu(cpu))
440 cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
443 static inline void hv_update_allocated_cpus(unsigned int old_cpu,
444 unsigned int new_cpu)
446 hv_set_allocated_cpu(new_cpu);
447 hv_clear_allocated_cpu(old_cpu);
450 #ifdef CONFIG_HYPERV_TESTING
452 int hv_debug_add_dev_dir(struct hv_device *dev);
453 void hv_debug_rm_dev_dir(struct hv_device *dev);
454 void hv_debug_rm_all_dir(void);
455 int hv_debug_init(void);
456 void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
458 #else /* CONFIG_HYPERV_TESTING */
460 static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
461 static inline void hv_debug_rm_all_dir(void) {};
462 static inline void hv_debug_delay_test(struct vmbus_channel *channel,
463 enum delay delay_type) {};
464 static inline int hv_debug_init(void)
469 static inline int hv_debug_add_dev_dir(struct hv_device *dev)
474 #endif /* CONFIG_HYPERV_TESTING */
476 #endif /* _HYPERV_VMBUS_H */