1 // SPDX-License-Identifier: GPL-2.0
3 * VMware Balloon driver.
5 * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
7 * This is VMware physical memory management driver for Linux. The driver
8 * acts like a "balloon" that can be inflated to reclaim physical pages by
9 * reserving them in the guest and invalidating them in the monitor,
10 * freeing up the underlying machine pages so they can be allocated to
11 * other guests. The balloon can also be deflated to allow the guest to
12 * use more physical memory. Higher level policies can control the sizes
13 * of balloons in VMs in order to manage physical memory resources.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/types.h>
20 #include <linux/kernel.h>
22 #include <linux/vmalloc.h>
23 #include <linux/sched.h>
24 #include <linux/module.h>
25 #include <linux/workqueue.h>
26 #include <linux/debugfs.h>
27 #include <linux/seq_file.h>
28 #include <linux/rwsem.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/mount.h>
32 #include <linux/balloon_compaction.h>
33 #include <linux/vmw_vmci_defs.h>
34 #include <linux/vmw_vmci_api.h>
35 #include <asm/hypervisor.h>
37 MODULE_AUTHOR("VMware, Inc.");
38 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
39 MODULE_ALIAS("dmi:*:svnVMware*:*");
40 MODULE_ALIAS("vmware_vmmemctl");
41 MODULE_LICENSE("GPL");
43 static bool __read_mostly vmwballoon_shrinker_enable;
44 module_param(vmwballoon_shrinker_enable, bool, 0444);
45 MODULE_PARM_DESC(vmwballoon_shrinker_enable,
46 "Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
48 /* Delay in seconds after shrink before inflation. */
49 #define VMBALLOON_SHRINK_DELAY (5)
51 /* Maximum number of refused pages we accumulate during inflation cycle */
52 #define VMW_BALLOON_MAX_REFUSED 16
54 /* Magic number for the balloon mount-point */
55 #define BALLOON_VMW_MAGIC 0x0ba11007
58 * Hypervisor communication port definitions.
60 #define VMW_BALLOON_HV_PORT 0x5670
61 #define VMW_BALLOON_HV_MAGIC 0x456c6d6f
62 #define VMW_BALLOON_GUEST_ID 1 /* Linux */
64 enum vmwballoon_capabilities {
66 * Bit 0 is reserved and not associated to any capability.
68 VMW_BALLOON_BASIC_CMDS = (1 << 1),
69 VMW_BALLOON_BATCHED_CMDS = (1 << 2),
70 VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
71 VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
72 VMW_BALLOON_64_BIT_TARGET = (1 << 5)
75 #define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
76 | VMW_BALLOON_BATCHED_CMDS \
77 | VMW_BALLOON_BATCHED_2M_CMDS \
78 | VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
80 #define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
83 * 64-bit targets are only supported in 64-bit
86 #define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
87 | VMW_BALLOON_64_BIT_TARGET)
89 #define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
92 enum vmballoon_page_size_type {
95 VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
98 #define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
100 static const char * const vmballoon_page_size_names[] = {
101 [VMW_BALLOON_4K_PAGE] = "4k",
102 [VMW_BALLOON_2M_PAGE] = "2M"
110 enum vmballoon_op_stat_type {
112 VMW_BALLOON_OP_FAIL_STAT
115 #define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
118 * enum vmballoon_cmd_type - backdoor commands.
120 * Availability of the commands is as followed:
122 * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
123 * %VMW_BALLOON_CMD_GUEST_ID are always available.
125 * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
126 * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
128 * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
129 * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
132 * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
133 * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
136 * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
137 * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
139 * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
140 * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
141 * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
142 * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
143 * to be deflated from the balloon.
144 * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
146 * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
147 * ballooned pages (up to 512).
148 * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
149 * pages that are about to be deflated from the
150 * balloon (up to 512).
151 * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
153 * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
154 * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
156 * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
157 * that would be invoked when the balloon
159 * @VMW_BALLOON_CMD_LAST: Value of the last command.
161 enum vmballoon_cmd_type {
162 VMW_BALLOON_CMD_START,
163 VMW_BALLOON_CMD_GET_TARGET,
164 VMW_BALLOON_CMD_LOCK,
165 VMW_BALLOON_CMD_UNLOCK,
166 VMW_BALLOON_CMD_GUEST_ID,
168 VMW_BALLOON_CMD_BATCHED_LOCK = 6,
169 VMW_BALLOON_CMD_BATCHED_UNLOCK,
170 VMW_BALLOON_CMD_BATCHED_2M_LOCK,
171 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
172 VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
173 VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
176 #define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
178 enum vmballoon_error_codes {
180 VMW_BALLOON_ERROR_CMD_INVALID,
181 VMW_BALLOON_ERROR_PPN_INVALID,
182 VMW_BALLOON_ERROR_PPN_LOCKED,
183 VMW_BALLOON_ERROR_PPN_UNLOCKED,
184 VMW_BALLOON_ERROR_PPN_PINNED,
185 VMW_BALLOON_ERROR_PPN_NOTNEEDED,
186 VMW_BALLOON_ERROR_RESET,
187 VMW_BALLOON_ERROR_BUSY
190 #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
192 #define VMW_BALLOON_CMD_WITH_TARGET_MASK \
193 ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
194 (1UL << VMW_BALLOON_CMD_LOCK) | \
195 (1UL << VMW_BALLOON_CMD_UNLOCK) | \
196 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
197 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
198 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
199 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
201 static const char * const vmballoon_cmd_names[] = {
202 [VMW_BALLOON_CMD_START] = "start",
203 [VMW_BALLOON_CMD_GET_TARGET] = "target",
204 [VMW_BALLOON_CMD_LOCK] = "lock",
205 [VMW_BALLOON_CMD_UNLOCK] = "unlock",
206 [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
207 [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
208 [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
209 [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
210 [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
211 [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
214 enum vmballoon_stat_page {
215 VMW_BALLOON_PAGE_STAT_ALLOC,
216 VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
217 VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
218 VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
219 VMW_BALLOON_PAGE_STAT_FREE,
220 VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
223 #define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
225 enum vmballoon_stat_general {
226 VMW_BALLOON_STAT_TIMER,
227 VMW_BALLOON_STAT_DOORBELL,
228 VMW_BALLOON_STAT_RESET,
229 VMW_BALLOON_STAT_SHRINK,
230 VMW_BALLOON_STAT_SHRINK_FREE,
231 VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
234 #define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
236 static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
237 static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
239 struct vmballoon_ctl {
240 struct list_head pages;
241 struct list_head refused_pages;
242 struct list_head prealloc_pages;
243 unsigned int n_refused_pages;
244 unsigned int n_pages;
245 enum vmballoon_page_size_type page_size;
246 enum vmballoon_op op;
250 * struct vmballoon_batch_entry - a batch entry for lock or unlock.
252 * @status: the status of the operation, which is written by the hypervisor.
253 * @reserved: reserved for future use. Must be set to zero.
254 * @pfn: the physical frame number of the page to be locked or unlocked.
256 struct vmballoon_batch_entry {
258 u64 reserved : PAGE_SHIFT - 5;
264 * @max_page_size: maximum supported page size for ballooning.
266 * Protected by @conf_sem
268 enum vmballoon_page_size_type max_page_size;
271 * @size: balloon actual size in basic page size (frames).
273 * While we currently do not support size which is bigger than 32-bit,
274 * in preparation for future support, use 64-bits.
279 * @target: balloon target size in basic page size (frames).
281 * We do not protect the target under the assumption that setting the
282 * value is always done through a single write. If this assumption ever
283 * breaks, we would have to use X_ONCE for accesses, and suffer the less
284 * optimized code. Although we may read stale target value if multiple
285 * accesses happen at once, the performance impact should be minor.
287 unsigned long target;
290 * @reset_required: reset flag
292 * Setting this flag may introduce races, but the code is expected to
293 * handle them gracefully. In the worst case, another operation will
294 * fail as reset did not take place. Clearing the flag is done while
295 * holding @conf_sem for write.
300 * @capabilities: hypervisor balloon capabilities.
302 * Protected by @conf_sem.
304 unsigned long capabilities;
307 * @batch_page: pointer to communication batch page.
309 * When batching is used, batch_page points to a page, which holds up to
310 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
312 struct vmballoon_batch_entry *batch_page;
315 * @batch_max_pages: maximum pages that can be locked/unlocked.
317 * Indicates the number of pages that the hypervisor can lock or unlock
318 * at once, according to whether batching is enabled. If batching is
319 * disabled, only a single page can be locked/unlock on each operation.
321 * Protected by @conf_sem.
323 unsigned int batch_max_pages;
326 * @page: page to be locked/unlocked by the hypervisor
328 * @page is only used when batching is disabled and a single page is
329 * reclaimed on each iteration.
331 * Protected by @comm_lock.
336 * @shrink_timeout: timeout until the next inflation.
338 * After an shrink event, indicates the time in jiffies after which
339 * inflation is allowed again. Can be written concurrently with reads,
340 * so must use READ_ONCE/WRITE_ONCE when accessing.
342 unsigned long shrink_timeout;
345 struct vmballoon_stats *stats;
347 #ifdef CONFIG_DEBUG_FS
348 /* debugfs file exporting statistics */
349 struct dentry *dbg_entry;
353 * @b_dev_info: balloon device information descriptor.
355 struct balloon_dev_info b_dev_info;
357 struct delayed_work dwork;
360 * @huge_pages - list of the inflated 2MB pages.
362 * Protected by @b_dev_info.pages_lock .
364 struct list_head huge_pages;
369 * Protected by @conf_sem.
371 struct vmci_handle vmci_doorbell;
374 * @conf_sem: semaphore to protect the configuration and the statistics.
376 struct rw_semaphore conf_sem;
379 * @comm_lock: lock to protect the communication with the host.
381 * Lock ordering: @conf_sem -> @comm_lock .
383 spinlock_t comm_lock;
386 * @shrinker: shrinker interface that is used to avoid over-inflation.
388 struct shrinker shrinker;
391 * @shrinker_registered: whether the shrinker was registered.
393 * The shrinker interface does not handle gracefully the removal of
394 * shrinker that was not registered before. This indication allows to
395 * simplify the unregistration process.
397 bool shrinker_registered;
400 static struct vmballoon balloon;
402 struct vmballoon_stats {
403 /* timer / doorbell operations */
404 atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
406 /* allocation statistics for huge and small pages */
408 page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
410 /* Monitor operations: total operations, and failures */
411 atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
414 static inline bool is_vmballoon_stats_on(void)
416 return IS_ENABLED(CONFIG_DEBUG_FS) &&
417 static_branch_unlikely(&balloon_stat_enabled);
420 static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
421 enum vmballoon_op_stat_type type)
423 if (is_vmballoon_stats_on())
424 atomic64_inc(&b->stats->ops[op][type]);
427 static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
428 enum vmballoon_stat_general stat)
430 if (is_vmballoon_stats_on())
431 atomic64_inc(&b->stats->general_stat[stat]);
434 static inline void vmballoon_stats_gen_add(struct vmballoon *b,
435 enum vmballoon_stat_general stat,
438 if (is_vmballoon_stats_on())
439 atomic64_add(val, &b->stats->general_stat[stat]);
442 static inline void vmballoon_stats_page_inc(struct vmballoon *b,
443 enum vmballoon_stat_page stat,
444 enum vmballoon_page_size_type size)
446 if (is_vmballoon_stats_on())
447 atomic64_inc(&b->stats->page_stat[stat][size]);
450 static inline void vmballoon_stats_page_add(struct vmballoon *b,
451 enum vmballoon_stat_page stat,
452 enum vmballoon_page_size_type size,
455 if (is_vmballoon_stats_on())
456 atomic64_add(val, &b->stats->page_stat[stat][size]);
459 static inline unsigned long
460 __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
461 unsigned long arg2, unsigned long *result)
463 unsigned long status, dummy1, dummy2, dummy3, local_result;
465 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
467 asm volatile ("inl %%dx" :
473 "0"(VMW_BALLOON_HV_MAGIC),
475 "2"(VMW_BALLOON_HV_PORT),
480 /* update the result if needed */
482 *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
485 /* update target when applicable */
486 if (status == VMW_BALLOON_SUCCESS &&
487 ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
488 WRITE_ONCE(b->target, local_result);
490 if (status != VMW_BALLOON_SUCCESS &&
491 status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
492 vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
493 pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
494 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
498 /* mark reset required accordingly */
499 if (status == VMW_BALLOON_ERROR_RESET)
500 b->reset_required = true;
505 static __always_inline unsigned long
506 vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
511 return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
515 * Send "start" command to the host, communicating supported version
518 static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
520 unsigned long status, capabilities;
522 status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
526 case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
527 b->capabilities = capabilities;
529 case VMW_BALLOON_SUCCESS:
530 b->capabilities = VMW_BALLOON_BASIC_CMDS;
537 * 2MB pages are only supported with batching. If batching is for some
538 * reason disabled, do not use 2MB pages, since otherwise the legacy
539 * mechanism is used with 2MB pages, causing a failure.
541 b->max_page_size = VMW_BALLOON_4K_PAGE;
542 if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
543 (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
544 b->max_page_size = VMW_BALLOON_2M_PAGE;
551 * vmballoon_send_guest_id - communicate guest type to the host.
553 * @b: pointer to the balloon.
555 * Communicate guest type to the host so that it can adjust ballooning
556 * algorithm to the one most appropriate for the guest. This command
557 * is normally issued after sending "start" command and is part of
558 * standard reset sequence.
560 * Return: zero on success or appropriate error code.
562 static int vmballoon_send_guest_id(struct vmballoon *b)
564 unsigned long status;
566 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
567 VMW_BALLOON_GUEST_ID, 0);
569 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
573 * vmballoon_page_order() - return the order of the page
574 * @page_size: the size of the page.
576 * Return: the allocation order.
579 unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
581 return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
585 * vmballoon_page_in_frames() - returns the number of frames in a page.
586 * @page_size: the size of the page.
588 * Return: the number of 4k frames.
590 static inline unsigned int
591 vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
593 return 1 << vmballoon_page_order(page_size);
597 * vmballoon_mark_page_offline() - mark a page as offline
598 * @page: pointer for the page.
599 * @page_size: the size of the page.
602 vmballoon_mark_page_offline(struct page *page,
603 enum vmballoon_page_size_type page_size)
607 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
608 __SetPageOffline(page + i);
612 * vmballoon_mark_page_online() - mark a page as online
613 * @page: pointer for the page.
614 * @page_size: the size of the page.
617 vmballoon_mark_page_online(struct page *page,
618 enum vmballoon_page_size_type page_size)
622 for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
623 __ClearPageOffline(page + i);
627 * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
629 * @b: pointer to the balloon.
631 * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
632 * by the host-guest protocol and EIO if an error occurred in communicating with
635 static int vmballoon_send_get_target(struct vmballoon *b)
637 unsigned long status;
640 limit = totalram_pages();
642 /* Ensure limit fits in 32-bits if 64-bit targets are not supported */
643 if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
647 status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
649 return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
653 * vmballoon_alloc_page_list - allocates a list of pages.
655 * @b: pointer to the balloon.
656 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
657 * @req_n_pages: the number of requested pages.
659 * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
660 * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
662 * Return: zero on success or error code otherwise.
664 static int vmballoon_alloc_page_list(struct vmballoon *b,
665 struct vmballoon_ctl *ctl,
666 unsigned int req_n_pages)
671 for (i = 0; i < req_n_pages; i++) {
673 * First check if we happen to have pages that were allocated
674 * before. This happens when 2MB page rejected during inflation
675 * by the hypervisor, and then split into 4KB pages.
677 if (!list_empty(&ctl->prealloc_pages)) {
678 page = list_first_entry(&ctl->prealloc_pages,
680 list_del(&page->lru);
682 if (ctl->page_size == VMW_BALLOON_2M_PAGE)
683 page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
684 __GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
686 page = balloon_page_alloc();
688 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
693 vmballoon_mark_page_offline(page, ctl->page_size);
694 /* Success. Add the page to the list and continue. */
695 list_add(&page->lru, &ctl->pages);
699 /* Allocation failed. Update statistics and stop. */
700 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
707 return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
711 * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
713 * @b: pointer for %struct vmballoon.
714 * @page: pointer for the page whose result should be handled.
715 * @page_size: size of the page.
716 * @status: status of the operation as provided by the hypervisor.
718 static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
719 enum vmballoon_page_size_type page_size,
720 unsigned long status)
722 /* On success do nothing. The page is already on the balloon list. */
723 if (likely(status == VMW_BALLOON_SUCCESS))
726 pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
727 page_to_pfn(page), status,
728 vmballoon_page_size_names[page_size]);
731 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
738 * vmballoon_status_page - returns the status of (un)lock operation
740 * @b: pointer to the balloon.
741 * @idx: index for the page for which the operation is performed.
742 * @p: pointer to where the page struct is returned.
744 * Following a lock or unlock operation, returns the status of the operation for
745 * an individual page. Provides the page that the operation was performed on on
746 * the @page argument.
748 * Returns: The status of a lock or unlock operation for an individual page.
750 static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
753 if (static_branch_likely(&vmw_balloon_batching)) {
755 *p = pfn_to_page(b->batch_page[idx].pfn);
756 return b->batch_page[idx].status;
759 /* non-batching mode */
763 * If a failure occurs, the indication will be provided in the status
764 * of the entire operation, which is considered before the individual
765 * page status. So for non-batching mode, the indication is always of
768 return VMW_BALLOON_SUCCESS;
772 * vmballoon_lock_op - notifies the host about inflated/deflated pages.
773 * @b: pointer to the balloon.
774 * @num_pages: number of inflated/deflated pages.
775 * @page_size: size of the page.
776 * @op: the type of operation (lock or unlock).
778 * Notify the host about page(s) that were ballooned (or removed from the
779 * balloon) so that host can use it without fear that guest will need it (or
780 * stop using them since the VM does). Host may reject some pages, we need to
781 * check the return value and maybe submit a different page. The pages that are
782 * inflated/deflated are pointed by @b->page.
784 * Return: result as provided by the hypervisor.
786 static unsigned long vmballoon_lock_op(struct vmballoon *b,
787 unsigned int num_pages,
788 enum vmballoon_page_size_type page_size,
789 enum vmballoon_op op)
791 unsigned long cmd, pfn;
793 lockdep_assert_held(&b->comm_lock);
795 if (static_branch_likely(&vmw_balloon_batching)) {
796 if (op == VMW_BALLOON_INFLATE)
797 cmd = page_size == VMW_BALLOON_2M_PAGE ?
798 VMW_BALLOON_CMD_BATCHED_2M_LOCK :
799 VMW_BALLOON_CMD_BATCHED_LOCK;
801 cmd = page_size == VMW_BALLOON_2M_PAGE ?
802 VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
803 VMW_BALLOON_CMD_BATCHED_UNLOCK;
805 pfn = PHYS_PFN(virt_to_phys(b->batch_page));
807 cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
808 VMW_BALLOON_CMD_UNLOCK;
809 pfn = page_to_pfn(b->page);
811 /* In non-batching mode, PFNs must fit in 32-bit */
812 if (unlikely(pfn != (u32)pfn))
813 return VMW_BALLOON_ERROR_PPN_INVALID;
816 return vmballoon_cmd(b, cmd, pfn, num_pages);
820 * vmballoon_add_page - adds a page towards lock/unlock operation.
822 * @b: pointer to the balloon.
823 * @idx: index of the page to be ballooned in this batch.
824 * @p: pointer to the page that is about to be ballooned.
826 * Adds the page to be ballooned. Must be called while holding @comm_lock.
828 static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
831 lockdep_assert_held(&b->comm_lock);
833 if (static_branch_likely(&vmw_balloon_batching))
834 b->batch_page[idx] = (struct vmballoon_batch_entry)
835 { .pfn = page_to_pfn(p) };
841 * vmballoon_lock - lock or unlock a batch of pages.
843 * @b: pointer to the balloon.
844 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
846 * Notifies the host of about ballooned pages (after inflation or deflation,
847 * according to @ctl). If the host rejects the page put it on the
848 * @ctl refuse list. These refused page are then released when moving to the
849 * next size of pages.
851 * Note that we neither free any @page here nor put them back on the ballooned
852 * pages list. Instead we queue it for later processing. We do that for several
853 * reasons. First, we do not want to free the page under the lock. Second, it
854 * allows us to unify the handling of lock and unlock. In the inflate case, the
855 * caller will check if there are too many refused pages and release them.
856 * Although it is not identical to the past behavior, it should not affect
859 static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
861 unsigned long batch_status;
863 unsigned int i, num_pages;
865 num_pages = ctl->n_pages;
869 /* communication with the host is done under the communication lock */
870 spin_lock(&b->comm_lock);
873 list_for_each_entry(page, &ctl->pages, lru)
874 vmballoon_add_page(b, i++, page);
876 batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
880 * Iterate over the pages in the provided list. Since we are changing
881 * @ctl->n_pages we are saving the original value in @num_pages and
882 * use this value to bound the loop.
884 for (i = 0; i < num_pages; i++) {
885 unsigned long status;
887 status = vmballoon_status_page(b, i, &page);
890 * Failure of the whole batch overrides a single operation
893 if (batch_status != VMW_BALLOON_SUCCESS)
894 status = batch_status;
896 /* Continue if no error happened */
897 if (!vmballoon_handle_one_result(b, page, ctl->page_size,
902 * Error happened. Move the pages to the refused list and update
905 list_move(&page->lru, &ctl->refused_pages);
907 ctl->n_refused_pages++;
910 spin_unlock(&b->comm_lock);
912 return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
916 * vmballoon_release_page_list() - Releases a page list
918 * @page_list: list of pages to release.
919 * @n_pages: pointer to the number of pages.
920 * @page_size: whether the pages in the list are 2MB (or else 4KB).
922 * Releases the list of pages and zeros the number of pages.
924 static void vmballoon_release_page_list(struct list_head *page_list,
926 enum vmballoon_page_size_type page_size)
928 struct page *page, *tmp;
930 list_for_each_entry_safe(page, tmp, page_list, lru) {
931 list_del(&page->lru);
932 vmballoon_mark_page_online(page, page_size);
933 __free_pages(page, vmballoon_page_order(page_size));
942 * Release pages that were allocated while attempting to inflate the
943 * balloon but were refused by the host for one reason or another.
945 static void vmballoon_release_refused_pages(struct vmballoon *b,
946 struct vmballoon_ctl *ctl)
948 vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
951 vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
956 * vmballoon_change - retrieve the required balloon change
958 * @b: pointer for the balloon.
960 * Return: the required change for the balloon size. A positive number
961 * indicates inflation, a negative number indicates a deflation.
963 static int64_t vmballoon_change(struct vmballoon *b)
965 int64_t size, target;
967 size = atomic64_read(&b->size);
968 target = READ_ONCE(b->target);
971 * We must cast first because of int sizes
972 * Otherwise we might get huge positives instead of negatives
975 if (b->reset_required)
978 /* consider a 2MB slack on deflate, unless the balloon is emptied */
979 if (target < size && target != 0 &&
980 size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
983 /* If an out-of-memory recently occurred, inflation is disallowed. */
984 if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
987 return target - size;
991 * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
993 * @b: pointer to balloon.
994 * @pages: list of pages to enqueue.
995 * @n_pages: pointer to number of pages in list. The value is zeroed.
996 * @page_size: whether the pages are 2MB or 4KB pages.
998 * Enqueues the provides list of pages in the ballooned page list, clears the
999 * list and zeroes the number of pages that was provided.
1001 static void vmballoon_enqueue_page_list(struct vmballoon *b,
1002 struct list_head *pages,
1003 unsigned int *n_pages,
1004 enum vmballoon_page_size_type page_size)
1006 unsigned long flags;
1008 if (page_size == VMW_BALLOON_4K_PAGE) {
1009 balloon_page_list_enqueue(&b->b_dev_info, pages);
1012 * Keep the huge pages in a local list which is not available
1013 * for the balloon compaction mechanism.
1015 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1016 list_splice_init(pages, &b->huge_pages);
1017 __count_vm_events(BALLOON_INFLATE, *n_pages *
1018 vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1019 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1026 * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1028 * @b: pointer to balloon.
1029 * @pages: list of pages to enqueue.
1030 * @n_pages: pointer to number of pages in list. The value is zeroed.
1031 * @page_size: whether the pages are 2MB or 4KB pages.
1032 * @n_req_pages: the number of requested pages.
1034 * Dequeues the number of requested pages from the balloon for deflation. The
1035 * number of dequeued pages may be lower, if not enough pages in the requested
1036 * size are available.
1038 static void vmballoon_dequeue_page_list(struct vmballoon *b,
1039 struct list_head *pages,
1040 unsigned int *n_pages,
1041 enum vmballoon_page_size_type page_size,
1042 unsigned int n_req_pages)
1044 struct page *page, *tmp;
1046 unsigned long flags;
1048 /* In the case of 4k pages, use the compaction infrastructure */
1049 if (page_size == VMW_BALLOON_4K_PAGE) {
1050 *n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1056 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1057 list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1058 list_move(&page->lru, pages);
1059 if (++i == n_req_pages)
1063 __count_vm_events(BALLOON_DEFLATE,
1064 i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1065 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1070 * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1072 * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1073 * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1074 * then being refused. To prevent this case, this function splits the refused
1075 * pages into 4KB pages and adds them into @prealloc_pages list.
1077 * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1079 static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1081 struct page *page, *tmp;
1082 unsigned int i, order;
1084 order = vmballoon_page_order(ctl->page_size);
1086 list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1087 list_del(&page->lru);
1088 split_page(page, order);
1089 for (i = 0; i < (1 << order); i++)
1090 list_add(&page[i].lru, &ctl->prealloc_pages);
1092 ctl->n_refused_pages = 0;
1096 * vmballoon_inflate() - Inflate the balloon towards its target size.
1098 * @b: pointer to the balloon.
1100 static void vmballoon_inflate(struct vmballoon *b)
1102 int64_t to_inflate_frames;
1103 struct vmballoon_ctl ctl = {
1104 .pages = LIST_HEAD_INIT(ctl.pages),
1105 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1106 .prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1107 .page_size = b->max_page_size,
1108 .op = VMW_BALLOON_INFLATE
1111 while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1112 unsigned int to_inflate_pages, page_in_frames;
1113 int alloc_error, lock_error = 0;
1115 VM_BUG_ON(!list_empty(&ctl.pages));
1116 VM_BUG_ON(ctl.n_pages != 0);
1118 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1120 to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1121 DIV_ROUND_UP_ULL(to_inflate_frames,
1124 /* Start by allocating */
1125 alloc_error = vmballoon_alloc_page_list(b, &ctl,
1128 /* Actually lock the pages by telling the hypervisor */
1129 lock_error = vmballoon_lock(b, &ctl);
1132 * If an error indicates that something serious went wrong,
1133 * stop the inflation.
1138 /* Update the balloon size */
1139 atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1141 vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1145 * If allocation failed or the number of refused pages exceeds
1146 * the maximum allowed, move to the next page size.
1149 ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1150 if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1154 * Split the refused pages to 4k. This will also empty
1155 * the refused pages list.
1157 vmballoon_split_refused_pages(&ctl);
1165 * Release pages that were allocated while attempting to inflate the
1166 * balloon but were refused by the host for one reason or another,
1167 * and update the statistics.
1169 if (ctl.n_refused_pages != 0)
1170 vmballoon_release_refused_pages(b, &ctl);
1172 vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1176 * vmballoon_deflate() - Decrease the size of the balloon.
1178 * @b: pointer to the balloon
1179 * @n_frames: the number of frames to deflate. If zero, automatically
1180 * calculated according to the target size.
1181 * @coordinated: whether to coordinate with the host
1183 * Decrease the size of the balloon allowing guest to use more memory.
1185 * Return: The number of deflated frames (i.e., basic page size units)
1187 static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1190 unsigned long deflated_frames = 0;
1191 unsigned long tried_frames = 0;
1192 struct vmballoon_ctl ctl = {
1193 .pages = LIST_HEAD_INIT(ctl.pages),
1194 .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1195 .page_size = VMW_BALLOON_4K_PAGE,
1196 .op = VMW_BALLOON_DEFLATE
1199 /* free pages to reach target */
1201 unsigned int to_deflate_pages, n_unlocked_frames;
1202 unsigned int page_in_frames;
1203 int64_t to_deflate_frames;
1206 page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1208 VM_BUG_ON(!list_empty(&ctl.pages));
1209 VM_BUG_ON(ctl.n_pages);
1210 VM_BUG_ON(!list_empty(&ctl.refused_pages));
1211 VM_BUG_ON(ctl.n_refused_pages);
1214 * If we were requested a specific number of frames, we try to
1215 * deflate this number of frames. Otherwise, deflation is
1216 * performed according to the target and balloon size.
1218 to_deflate_frames = n_frames ? n_frames - tried_frames :
1219 -vmballoon_change(b);
1221 /* break if no work to do */
1222 if (to_deflate_frames <= 0)
1226 * Calculate the number of frames based on current page size,
1227 * but limit the deflated frames to a single chunk
1229 to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1230 DIV_ROUND_UP_ULL(to_deflate_frames,
1233 /* First take the pages from the balloon pages. */
1234 vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1235 ctl.page_size, to_deflate_pages);
1238 * Before pages are moving to the refused list, count their
1239 * frames as frames that we tried to deflate.
1241 tried_frames += ctl.n_pages * page_in_frames;
1244 * Unlock the pages by communicating with the hypervisor if the
1245 * communication is coordinated (i.e., not pop). We ignore the
1246 * return code. Instead we check if all the pages we manage to
1247 * unlock all the pages. If we failed, we will move to the next
1248 * page size, and would eventually try again later.
1251 vmballoon_lock(b, &ctl);
1254 * Check if we deflated enough. We will move to the next page
1255 * size if we did not manage to do so. This calculation takes
1256 * place now, as once the pages are released, the number of
1259 deflated_all = (ctl.n_pages == to_deflate_pages);
1261 /* Update local and global counters */
1262 n_unlocked_frames = ctl.n_pages * page_in_frames;
1263 atomic64_sub(n_unlocked_frames, &b->size);
1264 deflated_frames += n_unlocked_frames;
1266 vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1267 ctl.page_size, ctl.n_pages);
1269 /* free the ballooned pages */
1270 vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1273 /* Return the refused pages to the ballooned list. */
1274 vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1275 &ctl.n_refused_pages,
1278 /* If we failed to unlock all the pages, move to next size. */
1279 if (!deflated_all) {
1280 if (ctl.page_size == b->max_page_size)
1288 return deflated_frames;
1292 * vmballoon_deinit_batching - disables batching mode.
1294 * @b: pointer to &struct vmballoon.
1296 * Disables batching, by deallocating the page for communication with the
1297 * hypervisor and disabling the static key to indicate that batching is off.
1299 static void vmballoon_deinit_batching(struct vmballoon *b)
1301 free_page((unsigned long)b->batch_page);
1302 b->batch_page = NULL;
1303 static_branch_disable(&vmw_balloon_batching);
1304 b->batch_max_pages = 1;
1308 * vmballoon_init_batching - enable batching mode.
1310 * @b: pointer to &struct vmballoon.
1312 * Enables batching, by allocating a page for communication with the hypervisor
1313 * and enabling the static_key to use batching.
1315 * Return: zero on success or an appropriate error-code.
1317 static int vmballoon_init_batching(struct vmballoon *b)
1321 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1325 b->batch_page = page_address(page);
1326 b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1328 static_branch_enable(&vmw_balloon_batching);
1334 * Receive notification and resize balloon
1336 static void vmballoon_doorbell(void *client_data)
1338 struct vmballoon *b = client_data;
1340 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1342 mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1346 * Clean up vmci doorbell
1348 static void vmballoon_vmci_cleanup(struct vmballoon *b)
1350 vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1351 VMCI_INVALID_ID, VMCI_INVALID_ID);
1353 if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1354 vmci_doorbell_destroy(b->vmci_doorbell);
1355 b->vmci_doorbell = VMCI_INVALID_HANDLE;
1360 * vmballoon_vmci_init - Initialize vmci doorbell.
1362 * @b: pointer to the balloon.
1364 * Return: zero on success or when wakeup command not supported. Error-code
1367 * Initialize vmci doorbell, to get notified as soon as balloon changes.
1369 static int vmballoon_vmci_init(struct vmballoon *b)
1371 unsigned long error;
1373 if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1376 error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1377 VMCI_PRIVILEGE_FLAG_RESTRICTED,
1378 vmballoon_doorbell, b);
1380 if (error != VMCI_SUCCESS)
1383 error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1384 b->vmci_doorbell.context,
1385 b->vmci_doorbell.resource, NULL);
1387 if (error != VMW_BALLOON_SUCCESS)
1392 vmballoon_vmci_cleanup(b);
1397 * vmballoon_pop - Quickly release all pages allocate for the balloon.
1399 * @b: pointer to the balloon.
1401 * This function is called when host decides to "reset" balloon for one reason
1402 * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1403 * pages being released.
1405 static void vmballoon_pop(struct vmballoon *b)
1409 while ((size = atomic64_read(&b->size)))
1410 vmballoon_deflate(b, size, false);
1414 * Perform standard reset sequence by popping the balloon (in case it
1415 * is not empty) and then restarting protocol. This operation normally
1416 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1418 static void vmballoon_reset(struct vmballoon *b)
1422 down_write(&b->conf_sem);
1424 vmballoon_vmci_cleanup(b);
1426 /* free all pages, skipping monitor unlock */
1429 if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1432 if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1433 if (vmballoon_init_batching(b)) {
1435 * We failed to initialize batching, inform the monitor
1436 * about it by sending a null capability.
1438 * The guest will retry in one second.
1440 vmballoon_send_start(b, 0);
1443 } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1444 vmballoon_deinit_batching(b);
1447 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1448 b->reset_required = false;
1450 error = vmballoon_vmci_init(b);
1452 pr_err("failed to initialize vmci doorbell\n");
1454 if (vmballoon_send_guest_id(b))
1455 pr_err("failed to send guest ID to the host\n");
1458 up_write(&b->conf_sem);
1462 * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1464 * @work: pointer to the &work_struct which is provided by the workqueue.
1466 * Resets the protocol if needed, gets the new size and adjusts balloon as
1467 * needed. Repeat in 1 sec.
1469 static void vmballoon_work(struct work_struct *work)
1471 struct delayed_work *dwork = to_delayed_work(work);
1472 struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1475 if (b->reset_required)
1478 down_read(&b->conf_sem);
1481 * Update the stats while holding the semaphore to ensure that
1482 * @stats_enabled is consistent with whether the stats are actually
1485 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1487 if (!vmballoon_send_get_target(b))
1488 change = vmballoon_change(b);
1491 pr_debug("%s - size: %llu, target %lu\n", __func__,
1492 atomic64_read(&b->size), READ_ONCE(b->target));
1495 vmballoon_inflate(b);
1496 else /* (change < 0) */
1497 vmballoon_deflate(b, 0, true);
1500 up_read(&b->conf_sem);
1503 * We are using a freezable workqueue so that balloon operations are
1504 * stopped while the system transitions to/from sleep/hibernation.
1506 queue_delayed_work(system_freezable_wq,
1507 dwork, round_jiffies_relative(HZ));
1512 * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1513 * @shrinker: pointer to the balloon shrinker.
1514 * @sc: page reclaim information.
1516 * Returns: number of pages that were freed during deflation.
1518 static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1519 struct shrink_control *sc)
1521 struct vmballoon *b = &balloon;
1522 unsigned long deflated_frames;
1524 pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1526 vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1529 * If the lock is also contended for read, we cannot easily reclaim and
1532 if (!down_read_trylock(&b->conf_sem))
1535 deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1537 vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1541 * Delay future inflation for some time to mitigate the situations in
1542 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1543 * the access is asynchronous.
1545 WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1547 up_read(&b->conf_sem);
1549 return deflated_frames;
1553 * vmballoon_shrinker_count() - return the number of ballooned pages.
1554 * @shrinker: pointer to the balloon shrinker.
1555 * @sc: page reclaim information.
1557 * Returns: number of 4k pages that are allocated for the balloon and can
1558 * therefore be reclaimed under pressure.
1560 static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1561 struct shrink_control *sc)
1563 struct vmballoon *b = &balloon;
1565 return atomic64_read(&b->size);
1568 static void vmballoon_unregister_shrinker(struct vmballoon *b)
1570 if (b->shrinker_registered)
1571 unregister_shrinker(&b->shrinker);
1572 b->shrinker_registered = false;
1575 static int vmballoon_register_shrinker(struct vmballoon *b)
1579 /* Do nothing if the shrinker is not enabled */
1580 if (!vmwballoon_shrinker_enable)
1583 b->shrinker.scan_objects = vmballoon_shrinker_scan;
1584 b->shrinker.count_objects = vmballoon_shrinker_count;
1585 b->shrinker.seeks = DEFAULT_SEEKS;
1587 r = register_shrinker(&b->shrinker);
1590 b->shrinker_registered = true;
1598 #ifdef CONFIG_DEBUG_FS
1600 static const char * const vmballoon_stat_page_names[] = {
1601 [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
1602 [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
1603 [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
1604 [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
1605 [VMW_BALLOON_PAGE_STAT_FREE] = "free"
1608 static const char * const vmballoon_stat_names[] = {
1609 [VMW_BALLOON_STAT_TIMER] = "timer",
1610 [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
1611 [VMW_BALLOON_STAT_RESET] = "reset",
1612 [VMW_BALLOON_STAT_SHRINK] = "shrink",
1613 [VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree"
1616 static int vmballoon_enable_stats(struct vmballoon *b)
1620 down_write(&b->conf_sem);
1622 /* did we somehow race with another reader which enabled stats? */
1626 b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1629 /* allocation failed */
1633 static_key_enable(&balloon_stat_enabled.key);
1635 up_write(&b->conf_sem);
1640 * vmballoon_debug_show - shows statistics of balloon operations.
1641 * @f: pointer to the &struct seq_file.
1644 * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1645 * To avoid the overhead - mainly that of memory - of collecting the statistics,
1646 * we only collect statistics after the first time the counters are read.
1648 * Return: zero on success or an error code.
1650 static int vmballoon_debug_show(struct seq_file *f, void *offset)
1652 struct vmballoon *b = f->private;
1655 /* enables stats if they are disabled */
1657 int r = vmballoon_enable_stats(b);
1663 /* format capabilities info */
1664 seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1665 VMW_BALLOON_CAPABILITIES);
1666 seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1667 seq_printf(f, "%-22s: %16s\n", "is resetting",
1668 b->reset_required ? "y" : "n");
1670 /* format size info */
1671 seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1672 seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1674 for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1675 if (vmballoon_cmd_names[i] == NULL)
1678 seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1679 vmballoon_cmd_names[i],
1680 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1681 atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1684 for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1685 seq_printf(f, "%-22s: %16llu\n",
1686 vmballoon_stat_names[i],
1687 atomic64_read(&b->stats->general_stat[i]));
1689 for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1690 for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1691 seq_printf(f, "%-18s(%s): %16llu\n",
1692 vmballoon_stat_page_names[i],
1693 vmballoon_page_size_names[j],
1694 atomic64_read(&b->stats->page_stat[i][j]));
1700 DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1702 static void __init vmballoon_debugfs_init(struct vmballoon *b)
1704 b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1705 &vmballoon_debug_fops);
1708 static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1710 static_key_disable(&balloon_stat_enabled.key);
1711 debugfs_remove(b->dbg_entry);
1718 static inline void vmballoon_debugfs_init(struct vmballoon *b)
1722 static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1726 #endif /* CONFIG_DEBUG_FS */
1729 #ifdef CONFIG_BALLOON_COMPACTION
1731 static struct dentry *vmballoon_mount(struct file_system_type *fs_type,
1732 int flags, const char *dev_name,
1735 static const struct dentry_operations ops = {
1736 .d_dname = simple_dname,
1739 return mount_pseudo(fs_type, "balloon-vmware:", NULL, &ops,
1743 static struct file_system_type vmballoon_fs = {
1744 .name = "balloon-vmware",
1745 .mount = vmballoon_mount,
1746 .kill_sb = kill_anon_super,
1749 static struct vfsmount *vmballoon_mnt;
1752 * vmballoon_migratepage() - migrates a balloon page.
1753 * @b_dev_info: balloon device information descriptor.
1754 * @newpage: the page to which @page should be migrated.
1755 * @page: a ballooned page that should be migrated.
1756 * @mode: migration mode, ignored.
1758 * This function is really open-coded, but that is according to the interface
1759 * that balloon_compaction provides.
1761 * Return: zero on success, -EAGAIN when migration cannot be performed
1762 * momentarily, and -EBUSY if migration failed and should be retried
1763 * with that specific page.
1765 static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1766 struct page *newpage, struct page *page,
1767 enum migrate_mode mode)
1769 unsigned long status, flags;
1770 struct vmballoon *b;
1773 b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1776 * If the semaphore is taken, there is ongoing configuration change
1777 * (i.e., balloon reset), so try again.
1779 if (!down_read_trylock(&b->conf_sem))
1782 spin_lock(&b->comm_lock);
1784 * We must start by deflating and not inflating, as otherwise the
1785 * hypervisor may tell us that it has enough memory and the new page is
1786 * not needed. Since the old page is isolated, we cannot use the list
1787 * interface to unlock it, as the LRU field is used for isolation.
1788 * Instead, we use the native interface directly.
1790 vmballoon_add_page(b, 0, page);
1791 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1792 VMW_BALLOON_DEFLATE);
1794 if (status == VMW_BALLOON_SUCCESS)
1795 status = vmballoon_status_page(b, 0, &page);
1798 * If a failure happened, let the migration mechanism know that it
1801 if (status != VMW_BALLOON_SUCCESS) {
1802 spin_unlock(&b->comm_lock);
1808 * The page is isolated, so it is safe to delete it without holding
1809 * @pages_lock . We keep holding @comm_lock since we will need it in a
1812 balloon_page_delete(page);
1817 vmballoon_add_page(b, 0, newpage);
1818 status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1819 VMW_BALLOON_INFLATE);
1821 if (status == VMW_BALLOON_SUCCESS)
1822 status = vmballoon_status_page(b, 0, &newpage);
1824 spin_unlock(&b->comm_lock);
1826 if (status != VMW_BALLOON_SUCCESS) {
1828 * A failure happened. While we can deflate the page we just
1829 * inflated, this deflation can also encounter an error. Instead
1830 * we will decrease the size of the balloon to reflect the
1831 * change and report failure.
1833 atomic64_dec(&b->size);
1837 * Success. Take a reference for the page, and we will add it to
1838 * the list after acquiring the lock.
1841 ret = MIGRATEPAGE_SUCCESS;
1844 /* Update the balloon list under the @pages_lock */
1845 spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1848 * On inflation success, we already took a reference for the @newpage.
1849 * If we succeed just insert it to the list and update the statistics
1852 if (ret == MIGRATEPAGE_SUCCESS) {
1853 balloon_page_insert(&b->b_dev_info, newpage);
1854 __count_vm_event(BALLOON_MIGRATE);
1858 * We deflated successfully, so regardless to the inflation success, we
1859 * need to reduce the number of isolated_pages.
1861 b->b_dev_info.isolated_pages--;
1862 spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1865 up_read(&b->conf_sem);
1870 * vmballoon_compaction_deinit() - removes compaction related data.
1872 * @b: pointer to the balloon.
1874 static void vmballoon_compaction_deinit(struct vmballoon *b)
1876 if (!IS_ERR(b->b_dev_info.inode))
1877 iput(b->b_dev_info.inode);
1879 b->b_dev_info.inode = NULL;
1880 kern_unmount(vmballoon_mnt);
1881 vmballoon_mnt = NULL;
1885 * vmballoon_compaction_init() - initialized compaction for the balloon.
1887 * @b: pointer to the balloon.
1889 * If during the initialization a failure occurred, this function does not
1890 * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1893 * Return: zero on success or error code on failure.
1895 static __init int vmballoon_compaction_init(struct vmballoon *b)
1897 vmballoon_mnt = kern_mount(&vmballoon_fs);
1898 if (IS_ERR(vmballoon_mnt))
1899 return PTR_ERR(vmballoon_mnt);
1901 b->b_dev_info.migratepage = vmballoon_migratepage;
1902 b->b_dev_info.inode = alloc_anon_inode(vmballoon_mnt->mnt_sb);
1904 if (IS_ERR(b->b_dev_info.inode))
1905 return PTR_ERR(b->b_dev_info.inode);
1907 b->b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
1911 #else /* CONFIG_BALLOON_COMPACTION */
1913 static void vmballoon_compaction_deinit(struct vmballoon *b)
1917 static int vmballoon_compaction_init(struct vmballoon *b)
1922 #endif /* CONFIG_BALLOON_COMPACTION */
1924 static int __init vmballoon_init(void)
1929 * Check if we are running on VMware's hypervisor and bail out
1932 if (x86_hyper_type != X86_HYPER_VMWARE)
1935 INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1937 error = vmballoon_register_shrinker(&balloon);
1942 * Initialization of compaction must be done after the call to
1943 * balloon_devinfo_init() .
1945 balloon_devinfo_init(&balloon.b_dev_info);
1946 error = vmballoon_compaction_init(&balloon);
1950 INIT_LIST_HEAD(&balloon.huge_pages);
1951 spin_lock_init(&balloon.comm_lock);
1952 init_rwsem(&balloon.conf_sem);
1953 balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1954 balloon.batch_page = NULL;
1955 balloon.page = NULL;
1956 balloon.reset_required = true;
1958 queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1960 vmballoon_debugfs_init(&balloon);
1964 vmballoon_unregister_shrinker(&balloon);
1965 vmballoon_compaction_deinit(&balloon);
1970 * Using late_initcall() instead of module_init() allows the balloon to use the
1971 * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1972 * VMCI is probed only after the balloon is initialized. If the balloon is used
1973 * as a module, late_initcall() is equivalent to module_init().
1975 late_initcall(vmballoon_init);
1977 static void __exit vmballoon_exit(void)
1979 vmballoon_unregister_shrinker(&balloon);
1980 vmballoon_vmci_cleanup(&balloon);
1981 cancel_delayed_work_sync(&balloon.dwork);
1983 vmballoon_debugfs_exit(&balloon);
1986 * Deallocate all reserved memory, and reset connection with monitor.
1987 * Reset connection before deallocating memory to avoid potential for
1988 * additional spurious resets from guest touching deallocated pages.
1990 vmballoon_send_start(&balloon, 0);
1991 vmballoon_pop(&balloon);
1993 /* Only once we popped the balloon, compaction can be deinit */
1994 vmballoon_compaction_deinit(&balloon);
1996 module_exit(vmballoon_exit);