1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio-mem device driver.
5 * Copyright Red Hat, Inc. 2020
7 * Author(s): David Hildenbrand <david@redhat.com>
10 #include <linux/virtio.h>
11 #include <linux/virtio_mem.h>
12 #include <linux/workqueue.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <linux/memory_hotplug.h>
17 #include <linux/memory.h>
18 #include <linux/hrtimer.h>
19 #include <linux/crash_dump.h>
20 #include <linux/mutex.h>
21 #include <linux/bitmap.h>
22 #include <linux/lockdep.h>
24 #include <acpi/acpi_numa.h>
26 static bool unplug_online = true;
27 module_param(unplug_online, bool, 0644);
28 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
30 static bool force_bbm;
31 module_param(force_bbm, bool, 0444);
32 MODULE_PARM_DESC(force_bbm,
33 "Force Big Block Mode. Default is 0 (auto-selection)");
35 static unsigned long bbm_block_size;
36 module_param(bbm_block_size, ulong, 0444);
37 MODULE_PARM_DESC(bbm_block_size,
38 "Big Block size in bytes. Default is 0 (auto-detection).");
40 static bool bbm_safe_unplug = true;
41 module_param(bbm_safe_unplug, bool, 0444);
42 MODULE_PARM_DESC(bbm_safe_unplug,
43 "Use a safe unplug mechanism in BBM, avoiding long/endless loops");
46 * virtio-mem currently supports the following modes of operation:
48 * * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The
49 * size of a Sub Block (SB) is determined based on the device block size, the
50 * pageblock size, and the maximum allocation granularity of the buddy.
51 * Subblocks within a Linux memory block might either be plugged or unplugged.
52 * Memory is added/removed to Linux MM in Linux memory block granularity.
54 * * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks.
55 * Memory is added/removed to Linux MM in Big Block granularity.
57 * The mode is determined automatically based on the Linux memory block size
58 * and the device block size.
60 * User space / core MM (auto onlining) is responsible for onlining added
61 * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are
62 * always onlined separately, and all memory within a Linux memory block is
63 * onlined to the same zone - virtio-mem relies on this behavior.
67 * State of a Linux memory block in SBM.
69 enum virtio_mem_sbm_mb_state {
70 /* Unplugged, not added to Linux. Can be reused later. */
71 VIRTIO_MEM_SBM_MB_UNUSED = 0,
72 /* (Partially) plugged, not added to Linux. Error on add_memory(). */
73 VIRTIO_MEM_SBM_MB_PLUGGED,
74 /* Fully plugged, fully added to Linux, offline. */
75 VIRTIO_MEM_SBM_MB_OFFLINE,
76 /* Partially plugged, fully added to Linux, offline. */
77 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
78 /* Fully plugged, fully added to Linux, onlined to a kernel zone. */
79 VIRTIO_MEM_SBM_MB_KERNEL,
80 /* Partially plugged, fully added to Linux, online to a kernel zone */
81 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
82 /* Fully plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
83 VIRTIO_MEM_SBM_MB_MOVABLE,
84 /* Partially plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
85 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
86 VIRTIO_MEM_SBM_MB_COUNT
90 * State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks.
92 enum virtio_mem_bbm_bb_state {
93 /* Unplugged, not added to Linux. Can be reused later. */
94 VIRTIO_MEM_BBM_BB_UNUSED = 0,
95 /* Plugged, not added to Linux. Error on add_memory(). */
96 VIRTIO_MEM_BBM_BB_PLUGGED,
97 /* Plugged and added to Linux. */
98 VIRTIO_MEM_BBM_BB_ADDED,
99 /* All online parts are fake-offline, ready to remove. */
100 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE,
101 VIRTIO_MEM_BBM_BB_COUNT
105 struct virtio_device *vdev;
107 /* We might first have to unplug all memory when starting up. */
108 bool unplug_all_required;
110 /* Workqueue that processes the plug/unplug requests. */
111 struct work_struct wq;
113 atomic_t config_changed;
115 /* Virtqueue for guest->host requests. */
116 struct virtqueue *vq;
118 /* Wait for a host response to a guest request. */
119 wait_queue_head_t host_resp;
121 /* Space for one guest request and the host response. */
122 struct virtio_mem_req req;
123 struct virtio_mem_resp resp;
125 /* The current size of the device. */
126 uint64_t plugged_size;
127 /* The requested size of the device. */
128 uint64_t requested_size;
130 /* The device block size (for communicating with the device). */
131 uint64_t device_block_size;
132 /* The determined node id for all memory of the device. */
134 /* Physical start address of the memory region. */
136 /* Maximum region size in bytes. */
137 uint64_t region_size;
139 /* The parent resource for all memory added via this device. */
140 struct resource *parent_resource;
142 * Copy of "System RAM (virtio_mem)" to be used for
143 * add_memory_driver_managed().
145 const char *resource_name;
146 /* Memory group identification. */
150 * We don't want to add too much memory if it's not getting onlined,
151 * to avoid running OOM. Besides this threshold, we allow to have at
152 * least two offline blocks at a time (whatever is bigger).
154 #define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024)
155 atomic64_t offline_size;
156 uint64_t offline_threshold;
158 /* If set, the driver is in SBM, otherwise in BBM. */
163 /* Id of the first memory block of this device. */
164 unsigned long first_mb_id;
165 /* Id of the last usable memory block of this device. */
166 unsigned long last_usable_mb_id;
167 /* Id of the next memory bock to prepare when needed. */
168 unsigned long next_mb_id;
170 /* The subblock size. */
172 /* The number of subblocks per Linux memory block. */
175 /* Summary of all memory block states. */
176 unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
179 * One byte state per memory block. Allocated via
180 * vmalloc(). Resized (alloc+copy+free) on demand.
182 * With 128 MiB memory blocks, we have states for 512
183 * GiB of memory in one 4 KiB page.
188 * Bitmap: one bit per subblock. Allocated similar to
191 * A set bit means the corresponding subblock is
192 * plugged, otherwise it's unblocked.
194 * With 4 MiB subblocks, we manage 128 GiB of memory
197 unsigned long *sb_states;
201 /* Id of the first big block of this device. */
202 unsigned long first_bb_id;
203 /* Id of the last usable big block of this device. */
204 unsigned long last_usable_bb_id;
205 /* Id of the next device bock to prepare when needed. */
206 unsigned long next_bb_id;
208 /* Summary of all big block states. */
209 unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT];
211 /* One byte state per big block. See sbm.mb_states. */
214 /* The block size used for plugging/adding/removing. */
220 * Mutex that protects the sbm.mb_count, sbm.mb_states,
221 * sbm.sb_states, bbm.bb_count, and bbm.bb_states
223 * When this lock is held the pointers can't change, ONLINE and
224 * OFFLINE blocks can't change the state and no subblocks will get
227 * In kdump mode, used to serialize requests, last_block_addr and
228 * last_block_plugged.
230 struct mutex hotplug_mutex;
233 /* An error occurred we cannot handle - stop processing requests. */
236 /* Cached valued of is_kdump_kernel() when the device was probed. */
239 /* The driver is being removed. */
240 spinlock_t removal_lock;
243 /* Timer for retrying to plug/unplug memory. */
244 struct hrtimer retry_timer;
245 unsigned int retry_timer_ms;
246 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
247 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
249 /* Memory notifier (online/offline events). */
250 struct notifier_block memory_notifier;
252 #ifdef CONFIG_PROC_VMCORE
253 /* vmcore callback for /proc/vmcore handling in kdump mode */
254 struct vmcore_cb vmcore_cb;
255 uint64_t last_block_addr;
256 bool last_block_plugged;
257 #endif /* CONFIG_PROC_VMCORE */
259 /* Next device in the list of virtio-mem devices. */
260 struct list_head next;
264 * We have to share a single online_page callback among all virtio-mem
265 * devices. We use RCU to iterate the list in the callback.
267 static DEFINE_MUTEX(virtio_mem_mutex);
268 static LIST_HEAD(virtio_mem_devices);
270 static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
271 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
272 unsigned long nr_pages);
273 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
274 unsigned long nr_pages);
275 static void virtio_mem_retry(struct virtio_mem *vm);
276 static int virtio_mem_create_resource(struct virtio_mem *vm);
277 static void virtio_mem_delete_resource(struct virtio_mem *vm);
280 * Register a virtio-mem device so it will be considered for the online_page
283 static int register_virtio_mem_device(struct virtio_mem *vm)
287 /* First device registers the callback. */
288 mutex_lock(&virtio_mem_mutex);
289 if (list_empty(&virtio_mem_devices))
290 rc = set_online_page_callback(&virtio_mem_online_page_cb);
292 list_add_rcu(&vm->next, &virtio_mem_devices);
293 mutex_unlock(&virtio_mem_mutex);
299 * Unregister a virtio-mem device so it will no longer be considered for the
300 * online_page callback.
302 static void unregister_virtio_mem_device(struct virtio_mem *vm)
304 /* Last device unregisters the callback. */
305 mutex_lock(&virtio_mem_mutex);
306 list_del_rcu(&vm->next);
307 if (list_empty(&virtio_mem_devices))
308 restore_online_page_callback(&virtio_mem_online_page_cb);
309 mutex_unlock(&virtio_mem_mutex);
315 * Calculate the memory block id of a given address.
317 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
319 return addr / memory_block_size_bytes();
323 * Calculate the physical start address of a given memory block id.
325 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
327 return mb_id * memory_block_size_bytes();
331 * Calculate the big block id of a given address.
333 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
336 return addr / vm->bbm.bb_size;
340 * Calculate the physical start address of a given big block id.
342 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
345 return bb_id * vm->bbm.bb_size;
349 * Calculate the subblock id of a given address.
351 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
354 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
355 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
357 return (addr - mb_addr) / vm->sbm.sb_size;
361 * Set the state of a big block, taking care of the state counter.
363 static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
365 enum virtio_mem_bbm_bb_state state)
367 const unsigned long idx = bb_id - vm->bbm.first_bb_id;
368 enum virtio_mem_bbm_bb_state old_state;
370 old_state = vm->bbm.bb_states[idx];
371 vm->bbm.bb_states[idx] = state;
373 BUG_ON(vm->bbm.bb_count[old_state] == 0);
374 vm->bbm.bb_count[old_state]--;
375 vm->bbm.bb_count[state]++;
379 * Get the state of a big block.
381 static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
384 return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
388 * Prepare the big block state array for the next big block.
390 static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
392 unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
393 unsigned long new_bytes = old_bytes + 1;
394 int old_pages = PFN_UP(old_bytes);
395 int new_pages = PFN_UP(new_bytes);
398 if (vm->bbm.bb_states && old_pages == new_pages)
401 new_array = vzalloc(new_pages * PAGE_SIZE);
405 mutex_lock(&vm->hotplug_mutex);
406 if (vm->bbm.bb_states)
407 memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
408 vfree(vm->bbm.bb_states);
409 vm->bbm.bb_states = new_array;
410 mutex_unlock(&vm->hotplug_mutex);
415 #define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \
416 for (_bb_id = vm->bbm.first_bb_id; \
417 _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
419 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
421 #define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \
422 for (_bb_id = vm->bbm.next_bb_id - 1; \
423 _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
425 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
428 * Set the state of a memory block, taking care of the state counter.
430 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
431 unsigned long mb_id, uint8_t state)
433 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
436 old_state = vm->sbm.mb_states[idx];
437 vm->sbm.mb_states[idx] = state;
439 BUG_ON(vm->sbm.mb_count[old_state] == 0);
440 vm->sbm.mb_count[old_state]--;
441 vm->sbm.mb_count[state]++;
445 * Get the state of a memory block.
447 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
450 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
452 return vm->sbm.mb_states[idx];
456 * Prepare the state array for the next memory block.
458 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
460 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
461 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
464 if (vm->sbm.mb_states && old_pages == new_pages)
467 new_array = vzalloc(new_pages * PAGE_SIZE);
471 mutex_lock(&vm->hotplug_mutex);
472 if (vm->sbm.mb_states)
473 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
474 vfree(vm->sbm.mb_states);
475 vm->sbm.mb_states = new_array;
476 mutex_unlock(&vm->hotplug_mutex);
481 #define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
482 for (_mb_id = _vm->sbm.first_mb_id; \
483 _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
485 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
487 #define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
488 for (_mb_id = _vm->sbm.next_mb_id - 1; \
489 _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
491 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
494 * Calculate the bit number in the subblock bitmap for the given subblock
495 * inside the given memory block.
497 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
498 unsigned long mb_id, int sb_id)
500 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
504 * Mark all selected subblocks plugged.
506 * Will not modify the state of the memory block.
508 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
509 unsigned long mb_id, int sb_id,
512 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
514 __bitmap_set(vm->sbm.sb_states, bit, count);
518 * Mark all selected subblocks unplugged.
520 * Will not modify the state of the memory block.
522 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
523 unsigned long mb_id, int sb_id,
526 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
528 __bitmap_clear(vm->sbm.sb_states, bit, count);
532 * Test if all selected subblocks are plugged.
534 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
535 unsigned long mb_id, int sb_id,
538 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
541 return test_bit(bit, vm->sbm.sb_states);
543 /* TODO: Helper similar to bitmap_set() */
544 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
549 * Test if all selected subblocks are unplugged.
551 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
552 unsigned long mb_id, int sb_id,
555 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
557 /* TODO: Helper similar to bitmap_set() */
558 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
563 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
566 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
569 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
571 return find_next_zero_bit(vm->sbm.sb_states,
572 bit + vm->sbm.sbs_per_mb, bit) - bit;
576 * Prepare the subblock bitmap for the next memory block.
578 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
580 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
581 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
582 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
583 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
584 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
585 unsigned long *new_bitmap, *old_bitmap;
587 if (vm->sbm.sb_states && old_pages == new_pages)
590 new_bitmap = vzalloc(new_pages * PAGE_SIZE);
594 mutex_lock(&vm->hotplug_mutex);
596 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
598 old_bitmap = vm->sbm.sb_states;
599 vm->sbm.sb_states = new_bitmap;
600 mutex_unlock(&vm->hotplug_mutex);
607 * Test if we could add memory without creating too much offline memory -
608 * to avoid running OOM if memory is getting onlined deferred.
610 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
612 if (WARN_ON_ONCE(size > vm->offline_threshold))
615 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
619 * Try adding memory to Linux. Will usually only fail if out of memory.
621 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
624 * Will not modify the state of memory blocks in virtio-mem.
626 static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
632 * When force-unloading the driver and we still have memory added to
633 * Linux, the resource name has to stay.
635 if (!vm->resource_name) {
636 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
638 if (!vm->resource_name)
642 dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
644 /* Memory might get onlined immediately. */
645 atomic64_add(size, &vm->offline_size);
646 rc = add_memory_driver_managed(vm->mgid, addr, size, vm->resource_name,
647 MHP_MERGE_RESOURCE | MHP_NID_IS_MGID);
649 atomic64_sub(size, &vm->offline_size);
650 dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
652 * TODO: Linux MM does not properly clean up yet in all cases
653 * where adding of memory failed - especially on -ENOMEM.
660 * See virtio_mem_add_memory(): Try adding a single Linux memory block.
662 static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
664 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
665 const uint64_t size = memory_block_size_bytes();
667 return virtio_mem_add_memory(vm, addr, size);
671 * See virtio_mem_add_memory(): Try adding a big block.
673 static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
675 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
676 const uint64_t size = vm->bbm.bb_size;
678 return virtio_mem_add_memory(vm, addr, size);
682 * Try removing memory from Linux. Will only fail if memory blocks aren't
685 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
688 * Will not modify the state of memory blocks in virtio-mem.
690 static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
695 dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
697 rc = remove_memory(addr, size);
699 atomic64_sub(size, &vm->offline_size);
701 * We might have freed up memory we can now unplug, retry
702 * immediately instead of waiting.
704 virtio_mem_retry(vm);
706 dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
712 * See virtio_mem_remove_memory(): Try removing a single Linux memory block.
714 static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
716 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
717 const uint64_t size = memory_block_size_bytes();
719 return virtio_mem_remove_memory(vm, addr, size);
723 * Try offlining and removing memory from Linux.
725 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
728 * Will not modify the state of memory blocks in virtio-mem.
730 static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
736 dev_dbg(&vm->vdev->dev,
737 "offlining and removing memory: 0x%llx - 0x%llx\n", addr,
740 rc = offline_and_remove_memory(addr, size);
742 atomic64_sub(size, &vm->offline_size);
744 * We might have freed up memory we can now unplug, retry
745 * immediately instead of waiting.
747 virtio_mem_retry(vm);
749 dev_dbg(&vm->vdev->dev,
750 "offlining and removing memory failed: %d\n", rc);
756 * See virtio_mem_offline_and_remove_memory(): Try offlining and removing
757 * a single Linux memory block.
759 static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
762 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
763 const uint64_t size = memory_block_size_bytes();
765 return virtio_mem_offline_and_remove_memory(vm, addr, size);
769 * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
770 * all Linux memory blocks covered by the big block.
772 static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
775 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
776 const uint64_t size = vm->bbm.bb_size;
778 return virtio_mem_offline_and_remove_memory(vm, addr, size);
782 * Trigger the workqueue so the device can perform its magic.
784 static void virtio_mem_retry(struct virtio_mem *vm)
788 spin_lock_irqsave(&vm->removal_lock, flags);
790 queue_work(system_freezable_wq, &vm->wq);
791 spin_unlock_irqrestore(&vm->removal_lock, flags);
794 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
796 int node = NUMA_NO_NODE;
798 #if defined(CONFIG_ACPI_NUMA)
799 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
800 node = pxm_to_node(node_id);
806 * Test if a virtio-mem device overlaps with the given range. Can be called
807 * from (notifier) callbacks lockless.
809 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
812 return start < vm->addr + vm->region_size && vm->addr < start + size;
816 * Test if a virtio-mem device contains a given range. Can be called from
817 * (notifier) callbacks lockless.
819 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
822 return start >= vm->addr && start + size <= vm->addr + vm->region_size;
825 static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
828 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
829 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
830 case VIRTIO_MEM_SBM_MB_OFFLINE:
835 dev_warn_ratelimited(&vm->vdev->dev,
836 "memory block onlining denied\n");
840 static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
843 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
844 case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
845 case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
846 virtio_mem_sbm_set_mb_state(vm, mb_id,
847 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
849 case VIRTIO_MEM_SBM_MB_KERNEL:
850 case VIRTIO_MEM_SBM_MB_MOVABLE:
851 virtio_mem_sbm_set_mb_state(vm, mb_id,
852 VIRTIO_MEM_SBM_MB_OFFLINE);
860 static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
862 unsigned long start_pfn)
864 const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) ==
868 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
869 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
870 new_state = VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL;
872 new_state = VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL;
874 case VIRTIO_MEM_SBM_MB_OFFLINE:
875 new_state = VIRTIO_MEM_SBM_MB_KERNEL;
877 new_state = VIRTIO_MEM_SBM_MB_MOVABLE;
883 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
886 static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
889 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
893 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
894 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
896 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
897 sb_id * vm->sbm.sb_size);
898 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
902 static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
905 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
909 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
910 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
912 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
913 sb_id * vm->sbm.sb_size);
914 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
918 static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
921 unsigned long nr_pages)
924 * When marked as "fake-offline", all online memory of this device block
925 * is allocated by us. Otherwise, we don't have any memory allocated.
927 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
928 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
930 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
933 static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
936 unsigned long nr_pages)
938 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
939 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
941 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
945 * This callback will either be called synchronously from add_memory() or
946 * asynchronously (e.g., triggered via user space). We have to be careful
947 * with locking when calling add_memory().
949 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
950 unsigned long action, void *arg)
952 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
954 struct memory_notify *mhp = arg;
955 const unsigned long start = PFN_PHYS(mhp->start_pfn);
956 const unsigned long size = PFN_PHYS(mhp->nr_pages);
960 if (!virtio_mem_overlaps_range(vm, start, size))
964 id = virtio_mem_phys_to_mb_id(start);
966 * In SBM, we add memory in separate memory blocks - we expect
967 * it to be onlined/offlined in the same granularity. Bail out
968 * if this ever changes.
970 if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
971 !IS_ALIGNED(start, memory_block_size_bytes())))
974 id = virtio_mem_phys_to_bb_id(vm, start);
976 * In BBM, we only care about onlining/offlining happening
977 * within a single big block, we don't care about the
978 * actual granularity as we don't track individual Linux
981 if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
986 * Avoid circular locking lockdep warnings. We lock the mutex
987 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
988 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
989 * between both notifier calls and will bail out. False positive.
994 case MEM_GOING_OFFLINE:
995 mutex_lock(&vm->hotplug_mutex);
997 rc = notifier_from_errno(-EBUSY);
998 mutex_unlock(&vm->hotplug_mutex);
1001 vm->hotplug_active = true;
1003 virtio_mem_sbm_notify_going_offline(vm, id);
1005 virtio_mem_bbm_notify_going_offline(vm, id,
1009 case MEM_GOING_ONLINE:
1010 mutex_lock(&vm->hotplug_mutex);
1012 rc = notifier_from_errno(-EBUSY);
1013 mutex_unlock(&vm->hotplug_mutex);
1016 vm->hotplug_active = true;
1018 rc = virtio_mem_sbm_notify_going_online(vm, id);
1022 virtio_mem_sbm_notify_offline(vm, id);
1024 atomic64_add(size, &vm->offline_size);
1026 * Trigger the workqueue. Now that we have some offline memory,
1027 * maybe we can handle pending unplug requests.
1030 virtio_mem_retry(vm);
1032 vm->hotplug_active = false;
1033 mutex_unlock(&vm->hotplug_mutex);
1037 virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn);
1039 atomic64_sub(size, &vm->offline_size);
1041 * Start adding more memory once we onlined half of our
1042 * threshold. Don't trigger if it's possibly due to our actipn
1043 * (e.g., us adding memory which gets onlined immediately from
1046 if (!atomic_read(&vm->wq_active) &&
1047 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
1048 virtio_mem_retry(vm);
1050 vm->hotplug_active = false;
1051 mutex_unlock(&vm->hotplug_mutex);
1053 case MEM_CANCEL_OFFLINE:
1054 if (!vm->hotplug_active)
1057 virtio_mem_sbm_notify_cancel_offline(vm, id);
1059 virtio_mem_bbm_notify_cancel_offline(vm, id,
1062 vm->hotplug_active = false;
1063 mutex_unlock(&vm->hotplug_mutex);
1065 case MEM_CANCEL_ONLINE:
1066 if (!vm->hotplug_active)
1068 vm->hotplug_active = false;
1069 mutex_unlock(&vm->hotplug_mutex);
1081 * Set a range of pages PG_offline. Remember pages that were never onlined
1082 * (via generic_online_page()) using PageDirty().
1084 static void virtio_mem_set_fake_offline(unsigned long pfn,
1085 unsigned long nr_pages, bool onlined)
1087 page_offline_begin();
1088 for (; nr_pages--; pfn++) {
1089 struct page *page = pfn_to_page(pfn);
1091 __SetPageOffline(page);
1094 /* FIXME: remove after cleanups */
1095 ClearPageReserved(page);
1102 * Clear PG_offline from a range of pages. If the pages were never onlined,
1103 * (via generic_online_page()), clear PageDirty().
1105 static void virtio_mem_clear_fake_offline(unsigned long pfn,
1106 unsigned long nr_pages, bool onlined)
1108 for (; nr_pages--; pfn++) {
1109 struct page *page = pfn_to_page(pfn);
1111 __ClearPageOffline(page);
1113 ClearPageDirty(page);
1118 * Release a range of fake-offline pages to the buddy, effectively
1119 * fake-onlining them.
1121 static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
1123 const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
1127 * We are always called at least with MAX_ORDER_NR_PAGES
1128 * granularity/alignment (e.g., the way subblocks work). All pages
1129 * inside such a block are alike.
1131 for (i = 0; i < nr_pages; i += max_nr_pages) {
1132 struct page *page = pfn_to_page(pfn + i);
1135 * If the page is PageDirty(), it was kept fake-offline when
1136 * onlining the memory block. Otherwise, it was allocated
1137 * using alloc_contig_range(). All pages in a subblock are
1140 if (PageDirty(page)) {
1141 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
1143 generic_online_page(page, MAX_ORDER - 1);
1145 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
1147 free_contig_range(pfn + i, max_nr_pages);
1148 adjust_managed_page_count(page, max_nr_pages);
1154 * Try to allocate a range, marking pages fake-offline, effectively
1155 * fake-offlining them.
1157 static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
1159 const bool is_movable = page_zonenum(pfn_to_page(pfn)) ==
1161 int rc, retry_count;
1164 * TODO: We want an alloc_contig_range() mode that tries to allocate
1165 * harder (e.g., dealing with temporarily pinned pages, PCP), especially
1166 * with ZONE_MOVABLE. So for now, retry a couple of times with
1167 * ZONE_MOVABLE before giving up - because that zone is supposed to give
1170 for (retry_count = 0; retry_count < 5; retry_count++) {
1171 rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
1174 /* whoops, out of memory */
1176 else if (rc && !is_movable)
1181 virtio_mem_set_fake_offline(pfn, nr_pages, true);
1182 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1190 * Handle fake-offline pages when memory is going offline - such that the
1191 * pages can be skipped by mm-core when offlining.
1193 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
1194 unsigned long nr_pages)
1200 * Drop our reference to the pages so the memory can get offlined
1201 * and add the unplugged pages to the managed page counters (so
1202 * offlining code can correctly subtract them again).
1204 adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
1205 /* Drop our reference to the pages so the memory can get offlined. */
1206 for (i = 0; i < nr_pages; i++) {
1207 page = pfn_to_page(pfn + i);
1208 if (WARN_ON(!page_ref_dec_and_test(page)))
1209 dump_page(page, "fake-offline page referenced");
1214 * Handle fake-offline pages when memory offlining is canceled - to undo
1215 * what we did in virtio_mem_fake_offline_going_offline().
1217 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
1218 unsigned long nr_pages)
1223 * Get the reference we dropped when going offline and subtract the
1224 * unplugged pages from the managed page counters.
1226 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1227 for (i = 0; i < nr_pages; i++)
1228 page_ref_inc(pfn_to_page(pfn + i));
1231 static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
1233 const unsigned long addr = page_to_phys(page);
1234 unsigned long id, sb_id;
1235 struct virtio_mem *vm;
1239 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
1240 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
1245 * We exploit here that subblocks have at least
1246 * MAX_ORDER_NR_PAGES size/alignment - so we cannot
1247 * cross subblocks within one call.
1249 id = virtio_mem_phys_to_mb_id(addr);
1250 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
1251 do_online = virtio_mem_sbm_test_sb_plugged(vm, id,
1255 * If the whole block is marked fake offline, keep
1256 * everything that way.
1258 id = virtio_mem_phys_to_bb_id(vm, addr);
1259 do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
1260 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
1264 * virtio_mem_set_fake_offline() might sleep, we don't need
1265 * the device anymore. See virtio_mem_remove() how races
1266 * between memory onlining and device removal are handled.
1271 generic_online_page(page, order);
1273 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
1279 /* not virtio-mem memory, but e.g., a DIMM. online it */
1280 generic_online_page(page, order);
1283 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
1284 const struct virtio_mem_req *req)
1286 struct scatterlist *sgs[2], sg_req, sg_resp;
1290 /* don't use the request residing on the stack (vaddr) */
1293 /* out: buffer for request */
1294 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
1297 /* in: buffer for response */
1298 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
1301 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
1305 virtqueue_kick(vm->vq);
1307 /* wait for a response */
1308 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
1310 return virtio16_to_cpu(vm->vdev, vm->resp.type);
1313 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
1316 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1317 const struct virtio_mem_req req = {
1318 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
1319 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
1320 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1324 if (atomic_read(&vm->config_changed))
1327 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
1330 switch (virtio_mem_send_request(vm, &req)) {
1331 case VIRTIO_MEM_RESP_ACK:
1332 vm->plugged_size += size;
1334 case VIRTIO_MEM_RESP_NACK:
1337 case VIRTIO_MEM_RESP_BUSY:
1340 case VIRTIO_MEM_RESP_ERROR:
1347 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
1351 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
1354 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1355 const struct virtio_mem_req req = {
1356 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
1357 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
1358 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1362 if (atomic_read(&vm->config_changed))
1365 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
1368 switch (virtio_mem_send_request(vm, &req)) {
1369 case VIRTIO_MEM_RESP_ACK:
1370 vm->plugged_size -= size;
1372 case VIRTIO_MEM_RESP_BUSY:
1375 case VIRTIO_MEM_RESP_ERROR:
1382 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
1386 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
1388 const struct virtio_mem_req req = {
1389 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
1393 dev_dbg(&vm->vdev->dev, "unplugging all memory");
1395 switch (virtio_mem_send_request(vm, &req)) {
1396 case VIRTIO_MEM_RESP_ACK:
1397 vm->unplug_all_required = false;
1398 vm->plugged_size = 0;
1399 /* usable region might have shrunk */
1400 atomic_set(&vm->config_changed, 1);
1402 case VIRTIO_MEM_RESP_BUSY:
1409 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
1414 * Plug selected subblocks. Updates the plugged state, but not the state
1415 * of the memory block.
1417 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
1418 int sb_id, int count)
1420 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1421 sb_id * vm->sbm.sb_size;
1422 const uint64_t size = count * vm->sbm.sb_size;
1425 rc = virtio_mem_send_plug_request(vm, addr, size);
1427 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
1432 * Unplug selected subblocks. Updates the plugged state, but not the state
1433 * of the memory block.
1435 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
1436 int sb_id, int count)
1438 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1439 sb_id * vm->sbm.sb_size;
1440 const uint64_t size = count * vm->sbm.sb_size;
1443 rc = virtio_mem_send_unplug_request(vm, addr, size);
1445 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
1450 * Request to unplug a big block.
1452 * Will not modify the state of the big block.
1454 static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
1456 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1457 const uint64_t size = vm->bbm.bb_size;
1459 return virtio_mem_send_unplug_request(vm, addr, size);
1463 * Request to plug a big block.
1465 * Will not modify the state of the big block.
1467 static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
1469 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1470 const uint64_t size = vm->bbm.bb_size;
1472 return virtio_mem_send_plug_request(vm, addr, size);
1476 * Unplug the desired number of plugged subblocks of a offline or not-added
1477 * memory block. Will fail if any subblock cannot get unplugged (instead of
1480 * Will not modify the state of the memory block.
1482 * Note: can fail after some subblocks were unplugged.
1484 static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm,
1485 unsigned long mb_id, uint64_t *nb_sb)
1490 sb_id = vm->sbm.sbs_per_mb - 1;
1492 /* Find the next candidate subblock */
1493 while (sb_id >= 0 &&
1494 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
1498 /* Try to unplug multiple subblocks at a time */
1500 while (count < *nb_sb && sb_id > 0 &&
1501 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1506 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1517 * Unplug all plugged subblocks of an offline or not-added memory block.
1519 * Will not modify the state of the memory block.
1521 * Note: can fail after some subblocks were unplugged.
1523 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
1525 uint64_t nb_sb = vm->sbm.sbs_per_mb;
1527 return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb);
1531 * Prepare tracking data for the next memory block.
1533 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
1534 unsigned long *mb_id)
1538 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
1541 /* Resize the state array if required. */
1542 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
1546 /* Resize the subblock bitmap if required. */
1547 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
1551 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
1552 *mb_id = vm->sbm.next_mb_id++;
1557 * Try to plug the desired number of subblocks and add the memory block
1560 * Will modify the state of the memory block.
1562 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
1563 unsigned long mb_id, uint64_t *nb_sb)
1565 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
1568 if (WARN_ON_ONCE(!count))
1572 * Plug the requested number of subblocks before adding it to linux,
1573 * so that onlining will directly online all plugged subblocks.
1575 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
1580 * Mark the block properly offline before adding it to Linux,
1581 * so the memory notifiers will find the block in the right state.
1583 if (count == vm->sbm.sbs_per_mb)
1584 virtio_mem_sbm_set_mb_state(vm, mb_id,
1585 VIRTIO_MEM_SBM_MB_OFFLINE);
1587 virtio_mem_sbm_set_mb_state(vm, mb_id,
1588 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1590 /* Add the memory block to linux - if that fails, try to unplug. */
1591 rc = virtio_mem_sbm_add_mb(vm, mb_id);
1593 int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
1595 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
1596 new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
1597 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
1606 * Try to plug the desired number of subblocks of a memory block that
1607 * is already added to Linux.
1609 * Will modify the state of the memory block.
1611 * Note: Can fail after some subblocks were successfully plugged.
1613 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
1614 unsigned long mb_id, uint64_t *nb_sb)
1616 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1617 unsigned long pfn, nr_pages;
1621 if (WARN_ON_ONCE(!*nb_sb))
1625 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
1626 if (sb_id >= vm->sbm.sbs_per_mb)
1629 while (count < *nb_sb &&
1630 sb_id + count < vm->sbm.sbs_per_mb &&
1631 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
1634 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
1638 if (old_state == VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
1641 /* fake-online the pages if the memory block is online */
1642 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1643 sb_id * vm->sbm.sb_size);
1644 nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
1645 virtio_mem_fake_online(pfn, nr_pages);
1648 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1649 virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1);
1654 static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1656 const int mb_states[] = {
1657 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
1658 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
1659 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
1661 uint64_t nb_sb = diff / vm->sbm.sb_size;
1662 unsigned long mb_id;
1668 /* Don't race with onlining/offlining */
1669 mutex_lock(&vm->hotplug_mutex);
1671 for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
1672 virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) {
1673 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
1681 * We won't be working on online/offline memory blocks from this point,
1682 * so we can't race with memory onlining/offlining. Drop the mutex.
1684 mutex_unlock(&vm->hotplug_mutex);
1686 /* Try to plug and add unused blocks */
1687 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
1688 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1691 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1697 /* Try to prepare, plug and add new blocks */
1699 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1702 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
1705 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1713 mutex_unlock(&vm->hotplug_mutex);
1718 * Plug a big block and add it to Linux.
1720 * Will modify the state of the big block.
1722 static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
1723 unsigned long bb_id)
1727 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
1728 VIRTIO_MEM_BBM_BB_UNUSED))
1731 rc = virtio_mem_bbm_plug_bb(vm, bb_id);
1734 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
1736 rc = virtio_mem_bbm_add_bb(vm, bb_id);
1738 if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
1739 virtio_mem_bbm_set_bb_state(vm, bb_id,
1740 VIRTIO_MEM_BBM_BB_UNUSED);
1742 /* Retry from the main loop. */
1743 virtio_mem_bbm_set_bb_state(vm, bb_id,
1744 VIRTIO_MEM_BBM_BB_PLUGGED);
1751 * Prepare tracking data for the next big block.
1753 static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
1754 unsigned long *bb_id)
1758 if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
1761 /* Resize the big block state array if required. */
1762 rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
1766 vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
1767 *bb_id = vm->bbm.next_bb_id;
1768 vm->bbm.next_bb_id++;
1772 static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1774 uint64_t nb_bb = diff / vm->bbm.bb_size;
1775 unsigned long bb_id;
1781 /* Try to plug and add unused big blocks */
1782 virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
1783 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1786 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1794 /* Try to prepare, plug and add new big blocks */
1796 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1799 rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
1802 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1814 * Try to plug the requested amount of memory.
1816 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1819 return virtio_mem_sbm_plug_request(vm, diff);
1820 return virtio_mem_bbm_plug_request(vm, diff);
1824 * Unplug the desired number of plugged subblocks of an offline memory block.
1825 * Will fail if any subblock cannot get unplugged (instead of skipping it).
1827 * Will modify the state of the memory block. Might temporarily drop the
1830 * Note: Can fail after some subblocks were successfully unplugged.
1832 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
1833 unsigned long mb_id,
1838 rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb);
1840 /* some subblocks might have been unplugged even on failure */
1841 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1842 virtio_mem_sbm_set_mb_state(vm, mb_id,
1843 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1847 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1849 * Remove the block from Linux - this should never fail.
1850 * Hinder the block from getting onlined by marking it
1851 * unplugged. Temporarily drop the mutex, so
1852 * any pending GOING_ONLINE requests can be serviced/rejected.
1854 virtio_mem_sbm_set_mb_state(vm, mb_id,
1855 VIRTIO_MEM_SBM_MB_UNUSED);
1857 mutex_unlock(&vm->hotplug_mutex);
1858 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
1860 mutex_lock(&vm->hotplug_mutex);
1866 * Unplug the given plugged subblocks of an online memory block.
1868 * Will modify the state of the memory block.
1870 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
1871 unsigned long mb_id, int sb_id,
1874 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
1875 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1876 unsigned long start_pfn;
1879 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1880 sb_id * vm->sbm.sb_size);
1882 rc = virtio_mem_fake_offline(start_pfn, nr_pages);
1886 /* Try to unplug the allocated memory */
1887 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1889 /* Return the memory to the buddy. */
1890 virtio_mem_fake_online(start_pfn, nr_pages);
1894 switch (old_state) {
1895 case VIRTIO_MEM_SBM_MB_KERNEL:
1896 virtio_mem_sbm_set_mb_state(vm, mb_id,
1897 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL);
1899 case VIRTIO_MEM_SBM_MB_MOVABLE:
1900 virtio_mem_sbm_set_mb_state(vm, mb_id,
1901 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL);
1909 * Unplug the desired number of plugged subblocks of an online memory block.
1910 * Will skip subblock that are busy.
1912 * Will modify the state of the memory block. Might temporarily drop the
1915 * Note: Can fail after some subblocks were successfully unplugged. Can
1916 * return 0 even if subblocks were busy and could not get unplugged.
1918 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
1919 unsigned long mb_id,
1924 /* If possible, try to unplug the complete block in one shot. */
1925 if (*nb_sb >= vm->sbm.sbs_per_mb &&
1926 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1927 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
1928 vm->sbm.sbs_per_mb);
1930 *nb_sb -= vm->sbm.sbs_per_mb;
1932 } else if (rc != -EBUSY)
1936 /* Fallback to single subblocks. */
1937 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1938 /* Find the next candidate subblock */
1939 while (sb_id >= 0 &&
1940 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
1945 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
1955 * Once all subblocks of a memory block were unplugged, offline and
1956 * remove it. This will usually not fail, as no memory is in use
1957 * anymore - however some other notifiers might NACK the request.
1959 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1960 mutex_unlock(&vm->hotplug_mutex);
1961 rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
1962 mutex_lock(&vm->hotplug_mutex);
1964 virtio_mem_sbm_set_mb_state(vm, mb_id,
1965 VIRTIO_MEM_SBM_MB_UNUSED);
1972 * Unplug the desired number of plugged subblocks of a memory block that is
1973 * already added to Linux. Will skip subblock of online memory blocks that are
1974 * busy (by the OS). Will fail if any subblock that's not busy cannot get
1977 * Will modify the state of the memory block. Might temporarily drop the
1980 * Note: Can fail after some subblocks were successfully unplugged. Can
1981 * return 0 even if subblocks were busy and could not get unplugged.
1983 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
1984 unsigned long mb_id,
1987 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1989 switch (old_state) {
1990 case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
1991 case VIRTIO_MEM_SBM_MB_KERNEL:
1992 case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
1993 case VIRTIO_MEM_SBM_MB_MOVABLE:
1994 return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb);
1995 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
1996 case VIRTIO_MEM_SBM_MB_OFFLINE:
1997 return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb);
2002 static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2004 const int mb_states[] = {
2005 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
2006 VIRTIO_MEM_SBM_MB_OFFLINE,
2007 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
2008 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
2009 VIRTIO_MEM_SBM_MB_MOVABLE,
2010 VIRTIO_MEM_SBM_MB_KERNEL,
2012 uint64_t nb_sb = diff / vm->sbm.sb_size;
2013 unsigned long mb_id;
2020 * We'll drop the mutex a couple of times when it is safe to do so.
2021 * This might result in some blocks switching the state (online/offline)
2022 * and we could miss them in this run - we will retry again later.
2024 mutex_lock(&vm->hotplug_mutex);
2027 * We try unplug from partially plugged blocks first, to try removing
2028 * whole memory blocks along with metadata. We prioritize ZONE_MOVABLE
2029 * as it's more reliable to unplug memory and remove whole memory
2030 * blocks, and we don't want to trigger a zone imbalances by
2031 * accidentially removing too much kernel memory.
2033 for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
2034 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) {
2035 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
2038 mutex_unlock(&vm->hotplug_mutex);
2040 mutex_lock(&vm->hotplug_mutex);
2042 if (!unplug_online && i == 1) {
2043 mutex_unlock(&vm->hotplug_mutex);
2048 mutex_unlock(&vm->hotplug_mutex);
2049 return nb_sb ? -EBUSY : 0;
2051 mutex_unlock(&vm->hotplug_mutex);
2056 * Try to offline and remove a big block from Linux and unplug it. Will fail
2057 * with -EBUSY if some memory is busy and cannot get unplugged.
2059 * Will modify the state of the memory block. Might temporarily drop the
2062 static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
2063 unsigned long bb_id)
2065 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2066 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2067 unsigned long end_pfn = start_pfn + nr_pages;
2072 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
2073 VIRTIO_MEM_BBM_BB_ADDED))
2076 if (bbm_safe_unplug) {
2078 * Start by fake-offlining all memory. Once we marked the device
2079 * block as fake-offline, all newly onlined memory will
2080 * automatically be kept fake-offline. Protect from concurrent
2081 * onlining/offlining until we have a consistent state.
2083 mutex_lock(&vm->hotplug_mutex);
2084 virtio_mem_bbm_set_bb_state(vm, bb_id,
2085 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
2087 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2088 page = pfn_to_online_page(pfn);
2092 rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
2095 goto rollback_safe_unplug;
2098 mutex_unlock(&vm->hotplug_mutex);
2101 rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
2103 if (bbm_safe_unplug) {
2104 mutex_lock(&vm->hotplug_mutex);
2105 goto rollback_safe_unplug;
2110 rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
2112 virtio_mem_bbm_set_bb_state(vm, bb_id,
2113 VIRTIO_MEM_BBM_BB_PLUGGED);
2115 virtio_mem_bbm_set_bb_state(vm, bb_id,
2116 VIRTIO_MEM_BBM_BB_UNUSED);
2119 rollback_safe_unplug:
2120 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2121 page = pfn_to_online_page(pfn);
2124 virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
2126 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
2127 mutex_unlock(&vm->hotplug_mutex);
2132 * Test if a big block is completely offline.
2134 static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
2135 unsigned long bb_id)
2137 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2138 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2141 for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2142 pfn += PAGES_PER_SECTION) {
2143 if (pfn_to_online_page(pfn))
2151 * Test if a big block is completely onlined to ZONE_MOVABLE (or offline).
2153 static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm,
2154 unsigned long bb_id)
2156 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2157 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2161 for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2162 pfn += PAGES_PER_SECTION) {
2163 page = pfn_to_online_page(pfn);
2166 if (page_zonenum(page) != ZONE_MOVABLE)
2173 static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2175 uint64_t nb_bb = diff / vm->bbm.bb_size;
2183 * Try to unplug big blocks. Similar to SBM, start with offline
2186 for (i = 0; i < 3; i++) {
2187 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
2191 * As we're holding no locks, these checks are racy,
2192 * but we don't care.
2194 if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id))
2196 if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id))
2198 rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
2206 if (i == 0 && !unplug_online)
2210 return nb_bb ? -EBUSY : 0;
2214 * Try to unplug the requested amount of memory.
2216 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
2219 return virtio_mem_sbm_unplug_request(vm, diff);
2220 return virtio_mem_bbm_unplug_request(vm, diff);
2224 * Try to unplug all blocks that couldn't be unplugged before, for example,
2225 * because the hypervisor was busy.
2227 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
2233 virtio_mem_bbm_for_each_bb(vm, id,
2234 VIRTIO_MEM_BBM_BB_PLUGGED) {
2235 rc = virtio_mem_bbm_unplug_bb(vm, id);
2238 virtio_mem_bbm_set_bb_state(vm, id,
2239 VIRTIO_MEM_BBM_BB_UNUSED);
2244 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
2245 rc = virtio_mem_sbm_unplug_mb(vm, id);
2248 virtio_mem_sbm_set_mb_state(vm, id,
2249 VIRTIO_MEM_SBM_MB_UNUSED);
2256 * Update all parts of the config that could have changed.
2258 static void virtio_mem_refresh_config(struct virtio_mem *vm)
2260 const struct range pluggable_range = mhp_get_pluggable_range(true);
2261 uint64_t new_plugged_size, usable_region_size, end_addr;
2263 /* the plugged_size is just a reflection of what _we_ did previously */
2264 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2266 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
2267 vm->plugged_size = new_plugged_size;
2269 /* calculate the last usable memory block id */
2270 virtio_cread_le(vm->vdev, struct virtio_mem_config,
2271 usable_region_size, &usable_region_size);
2272 end_addr = min(vm->addr + usable_region_size - 1,
2273 pluggable_range.end);
2276 vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
2277 if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes()))
2278 vm->sbm.last_usable_mb_id--;
2280 vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
2282 if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
2283 vm->bbm.last_usable_bb_id--;
2286 * If we cannot plug any of our device memory (e.g., nothing in the
2287 * usable region is addressable), the last usable memory block id will
2288 * be smaller than the first usable memory block id. We'll stop
2289 * attempting to add memory with -ENOSPC from our main loop.
2292 /* see if there is a request to change the size */
2293 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
2294 &vm->requested_size);
2296 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
2297 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
2301 * Workqueue function for handling plug/unplug requests and config updates.
2303 static void virtio_mem_run_wq(struct work_struct *work)
2305 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
2309 if (unlikely(vm->in_kdump)) {
2310 dev_warn_once(&vm->vdev->dev,
2311 "unexpected workqueue run in kdump kernel\n");
2315 hrtimer_cancel(&vm->retry_timer);
2320 atomic_set(&vm->wq_active, 1);
2324 /* Make sure we start with a clean state if there are leftovers. */
2325 if (unlikely(vm->unplug_all_required))
2326 rc = virtio_mem_send_unplug_all_request(vm);
2328 if (atomic_read(&vm->config_changed)) {
2329 atomic_set(&vm->config_changed, 0);
2330 virtio_mem_refresh_config(vm);
2333 /* Unplug any leftovers from previous runs */
2335 rc = virtio_mem_unplug_pending_mb(vm);
2337 if (!rc && vm->requested_size != vm->plugged_size) {
2338 if (vm->requested_size > vm->plugged_size) {
2339 diff = vm->requested_size - vm->plugged_size;
2340 rc = virtio_mem_plug_request(vm, diff);
2342 diff = vm->plugged_size - vm->requested_size;
2343 rc = virtio_mem_unplug_request(vm, diff);
2349 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2353 * We cannot add any more memory (alignment, physical limit)
2354 * or we have too many offline memory blocks.
2359 * The hypervisor cannot process our request right now
2360 * (e.g., out of memory, migrating);
2364 * We cannot free up any memory to unplug it (all plugged memory
2368 /* Out of memory, try again later. */
2369 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
2373 /* Retry immediately (e.g., the config changed). */
2376 /* Unknown error, mark as broken */
2377 dev_err(&vm->vdev->dev,
2378 "unknown error, marking device broken: %d\n", rc);
2382 atomic_set(&vm->wq_active, 0);
2385 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
2387 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
2390 virtio_mem_retry(vm);
2391 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
2392 VIRTIO_MEM_RETRY_TIMER_MAX_MS);
2393 return HRTIMER_NORESTART;
2396 static void virtio_mem_handle_response(struct virtqueue *vq)
2398 struct virtio_mem *vm = vq->vdev->priv;
2400 wake_up(&vm->host_resp);
2403 static int virtio_mem_init_vq(struct virtio_mem *vm)
2405 struct virtqueue *vq;
2407 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
2416 static int virtio_mem_init_hotplug(struct virtio_mem *vm)
2418 const struct range pluggable_range = mhp_get_pluggable_range(true);
2419 uint64_t unit_pages, sb_size, addr;
2422 /* bad device setup - warn only */
2423 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
2424 dev_warn(&vm->vdev->dev,
2425 "The alignment of the physical start address can make some memory unusable.\n");
2426 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
2427 dev_warn(&vm->vdev->dev,
2428 "The alignment of the physical end address can make some memory unusable.\n");
2429 if (vm->addr < pluggable_range.start ||
2430 vm->addr + vm->region_size - 1 > pluggable_range.end)
2431 dev_warn(&vm->vdev->dev,
2432 "Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
2434 /* Prepare the offline threshold - make sure we can add two blocks. */
2435 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
2436 VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
2439 * We want subblocks to span at least MAX_ORDER_NR_PAGES and
2440 * pageblock_nr_pages pages. This:
2441 * - Simplifies our page onlining code (virtio_mem_online_page_cb)
2442 * and fake page onlining code (virtio_mem_fake_online).
2443 * - Is required for now for alloc_contig_range() to work reliably -
2444 * it doesn't properly handle smaller granularity on ZONE_NORMAL.
2446 sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES,
2447 pageblock_nr_pages) * PAGE_SIZE;
2448 sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
2450 if (sb_size < memory_block_size_bytes() && !force_bbm) {
2451 /* SBM: At least two subblocks per Linux memory block. */
2453 vm->sbm.sb_size = sb_size;
2454 vm->sbm.sbs_per_mb = memory_block_size_bytes() /
2457 /* Round up to the next full memory block */
2458 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2459 memory_block_size_bytes() - 1;
2460 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
2461 vm->sbm.next_mb_id = vm->sbm.first_mb_id;
2463 /* BBM: At least one Linux memory block. */
2464 vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
2465 memory_block_size_bytes());
2467 if (bbm_block_size) {
2468 if (!is_power_of_2(bbm_block_size)) {
2469 dev_warn(&vm->vdev->dev,
2470 "bbm_block_size is not a power of 2");
2471 } else if (bbm_block_size < vm->bbm.bb_size) {
2472 dev_warn(&vm->vdev->dev,
2473 "bbm_block_size is too small");
2475 vm->bbm.bb_size = bbm_block_size;
2479 /* Round up to the next aligned big block */
2480 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2481 vm->bbm.bb_size - 1;
2482 vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
2483 vm->bbm.next_bb_id = vm->bbm.first_bb_id;
2485 /* Make sure we can add two big blocks. */
2486 vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
2487 vm->offline_threshold);
2490 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
2491 memory_block_size_bytes());
2493 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
2494 (unsigned long long)vm->sbm.sb_size);
2496 dev_info(&vm->vdev->dev, "big block size: 0x%llx",
2497 (unsigned long long)vm->bbm.bb_size);
2499 /* create the parent resource for all memory */
2500 rc = virtio_mem_create_resource(vm);
2504 /* use a single dynamic memory group to cover the whole memory device */
2506 unit_pages = PHYS_PFN(memory_block_size_bytes());
2508 unit_pages = PHYS_PFN(vm->bbm.bb_size);
2509 rc = memory_group_register_dynamic(vm->nid, unit_pages);
2511 goto out_del_resource;
2515 * If we still have memory plugged, we have to unplug all memory first.
2516 * Registering our parent resource makes sure that this memory isn't
2517 * actually in use (e.g., trying to reload the driver).
2519 if (vm->plugged_size) {
2520 vm->unplug_all_required = true;
2521 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
2524 /* register callbacks */
2525 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
2526 rc = register_memory_notifier(&vm->memory_notifier);
2528 goto out_unreg_group;
2529 rc = register_virtio_mem_device(vm);
2535 unregister_memory_notifier(&vm->memory_notifier);
2537 memory_group_unregister(vm->mgid);
2539 virtio_mem_delete_resource(vm);
2543 #ifdef CONFIG_PROC_VMCORE
2544 static int virtio_mem_send_state_request(struct virtio_mem *vm, uint64_t addr,
2547 const uint64_t nb_vm_blocks = size / vm->device_block_size;
2548 const struct virtio_mem_req req = {
2549 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_STATE),
2550 .u.state.addr = cpu_to_virtio64(vm->vdev, addr),
2551 .u.state.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
2555 dev_dbg(&vm->vdev->dev, "requesting state: 0x%llx - 0x%llx\n", addr,
2558 switch (virtio_mem_send_request(vm, &req)) {
2559 case VIRTIO_MEM_RESP_ACK:
2560 return virtio16_to_cpu(vm->vdev, vm->resp.u.state.state);
2561 case VIRTIO_MEM_RESP_ERROR:
2568 dev_dbg(&vm->vdev->dev, "requesting state failed: %d\n", rc);
2572 static bool virtio_mem_vmcore_pfn_is_ram(struct vmcore_cb *cb,
2575 struct virtio_mem *vm = container_of(cb, struct virtio_mem,
2577 uint64_t addr = PFN_PHYS(pfn);
2581 if (!virtio_mem_contains_range(vm, addr, PAGE_SIZE))
2583 if (!vm->plugged_size)
2587 * We have to serialize device requests and access to the information
2588 * about the block queried last.
2590 mutex_lock(&vm->hotplug_mutex);
2592 addr = ALIGN_DOWN(addr, vm->device_block_size);
2593 if (addr != vm->last_block_addr) {
2594 rc = virtio_mem_send_state_request(vm, addr,
2595 vm->device_block_size);
2596 /* On any kind of error, we're going to signal !ram. */
2597 if (rc == VIRTIO_MEM_STATE_PLUGGED)
2598 vm->last_block_plugged = true;
2600 vm->last_block_plugged = false;
2601 vm->last_block_addr = addr;
2604 is_ram = vm->last_block_plugged;
2605 mutex_unlock(&vm->hotplug_mutex);
2608 #endif /* CONFIG_PROC_VMCORE */
2610 static int virtio_mem_init_kdump(struct virtio_mem *vm)
2612 #ifdef CONFIG_PROC_VMCORE
2613 dev_info(&vm->vdev->dev, "memory hot(un)plug disabled in kdump kernel\n");
2614 vm->vmcore_cb.pfn_is_ram = virtio_mem_vmcore_pfn_is_ram;
2615 register_vmcore_cb(&vm->vmcore_cb);
2617 #else /* CONFIG_PROC_VMCORE */
2618 dev_warn(&vm->vdev->dev, "disabled in kdump kernel without vmcore\n");
2620 #endif /* CONFIG_PROC_VMCORE */
2623 static int virtio_mem_init(struct virtio_mem *vm)
2627 if (!vm->vdev->config->get) {
2628 dev_err(&vm->vdev->dev, "config access disabled\n");
2632 /* Fetch all properties that can't change. */
2633 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2635 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
2636 &vm->device_block_size);
2637 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
2639 vm->nid = virtio_mem_translate_node_id(vm, node_id);
2640 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
2641 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
2644 /* Determine the nid for the device based on the lowest address. */
2645 if (vm->nid == NUMA_NO_NODE)
2646 vm->nid = memory_add_physaddr_to_nid(vm->addr);
2648 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
2649 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
2650 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
2651 (unsigned long long)vm->device_block_size);
2652 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
2653 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
2656 * We don't want to (un)plug or reuse any memory when in kdump. The
2657 * memory is still accessible (but not exposed to Linux).
2660 return virtio_mem_init_kdump(vm);
2661 return virtio_mem_init_hotplug(vm);
2664 static int virtio_mem_create_resource(struct virtio_mem *vm)
2667 * When force-unloading the driver and removing the device, we
2668 * could have a garbage pointer. Duplicate the string.
2670 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
2675 /* Disallow mapping device memory via /dev/mem completely. */
2676 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
2677 name, IORESOURCE_SYSTEM_RAM |
2678 IORESOURCE_EXCLUSIVE);
2679 if (!vm->parent_resource) {
2681 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
2682 dev_info(&vm->vdev->dev,
2683 "reloading the driver is not supported\n");
2687 /* The memory is not actually busy - make add_memory() work. */
2688 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
2692 static void virtio_mem_delete_resource(struct virtio_mem *vm)
2696 if (!vm->parent_resource)
2699 name = vm->parent_resource->name;
2700 release_resource(vm->parent_resource);
2701 kfree(vm->parent_resource);
2703 vm->parent_resource = NULL;
2706 static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
2711 static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
2713 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
2715 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
2716 vm->addr + vm->region_size, NULL,
2717 virtio_mem_range_has_system_ram) == 1;
2720 static int virtio_mem_probe(struct virtio_device *vdev)
2722 struct virtio_mem *vm;
2725 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
2726 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
2728 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2732 init_waitqueue_head(&vm->host_resp);
2734 INIT_WORK(&vm->wq, virtio_mem_run_wq);
2735 mutex_init(&vm->hotplug_mutex);
2736 INIT_LIST_HEAD(&vm->next);
2737 spin_lock_init(&vm->removal_lock);
2738 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2739 vm->retry_timer.function = virtio_mem_timer_expired;
2740 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2741 vm->in_kdump = is_kdump_kernel();
2743 /* register the virtqueue */
2744 rc = virtio_mem_init_vq(vm);
2748 /* initialize the device by querying the config */
2749 rc = virtio_mem_init(vm);
2753 virtio_device_ready(vdev);
2755 /* trigger a config update to start processing the requested_size */
2756 if (!vm->in_kdump) {
2757 atomic_set(&vm->config_changed, 1);
2758 queue_work(system_freezable_wq, &vm->wq);
2763 vdev->config->del_vqs(vdev);
2771 static void virtio_mem_deinit_hotplug(struct virtio_mem *vm)
2773 unsigned long mb_id;
2777 * Make sure the workqueue won't be triggered anymore and no memory
2778 * blocks can be onlined/offlined until we're finished here.
2780 mutex_lock(&vm->hotplug_mutex);
2781 spin_lock_irq(&vm->removal_lock);
2782 vm->removing = true;
2783 spin_unlock_irq(&vm->removal_lock);
2784 mutex_unlock(&vm->hotplug_mutex);
2786 /* wait until the workqueue stopped */
2787 cancel_work_sync(&vm->wq);
2788 hrtimer_cancel(&vm->retry_timer);
2792 * After we unregistered our callbacks, user space can online
2793 * partially plugged offline blocks. Make sure to remove them.
2795 virtio_mem_sbm_for_each_mb(vm, mb_id,
2796 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
2797 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
2799 virtio_mem_sbm_set_mb_state(vm, mb_id,
2800 VIRTIO_MEM_SBM_MB_UNUSED);
2803 * After we unregistered our callbacks, user space can no longer
2804 * offline partially plugged online memory blocks. No need to
2809 /* unregister callbacks */
2810 unregister_virtio_mem_device(vm);
2811 unregister_memory_notifier(&vm->memory_notifier);
2814 * There is no way we could reliably remove all memory we have added to
2815 * the system. And there is no way to stop the driver/device from going
2816 * away. Warn at least.
2818 if (virtio_mem_has_memory_added(vm)) {
2819 dev_warn(&vm->vdev->dev,
2820 "device still has system memory added\n");
2822 virtio_mem_delete_resource(vm);
2823 kfree_const(vm->resource_name);
2824 memory_group_unregister(vm->mgid);
2827 /* remove all tracking data - no locking needed */
2829 vfree(vm->sbm.mb_states);
2830 vfree(vm->sbm.sb_states);
2832 vfree(vm->bbm.bb_states);
2836 static void virtio_mem_deinit_kdump(struct virtio_mem *vm)
2838 #ifdef CONFIG_PROC_VMCORE
2839 unregister_vmcore_cb(&vm->vmcore_cb);
2840 #endif /* CONFIG_PROC_VMCORE */
2843 static void virtio_mem_remove(struct virtio_device *vdev)
2845 struct virtio_mem *vm = vdev->priv;
2848 virtio_mem_deinit_kdump(vm);
2850 virtio_mem_deinit_hotplug(vm);
2852 /* reset the device and cleanup the queues */
2853 virtio_reset_device(vdev);
2854 vdev->config->del_vqs(vdev);
2860 static void virtio_mem_config_changed(struct virtio_device *vdev)
2862 struct virtio_mem *vm = vdev->priv;
2864 if (unlikely(vm->in_kdump))
2867 atomic_set(&vm->config_changed, 1);
2868 virtio_mem_retry(vm);
2871 #ifdef CONFIG_PM_SLEEP
2872 static int virtio_mem_freeze(struct virtio_device *vdev)
2875 * When restarting the VM, all memory is usually unplugged. Don't
2876 * allow to suspend/hibernate.
2878 dev_err(&vdev->dev, "save/restore not supported.\n");
2882 static int virtio_mem_restore(struct virtio_device *vdev)
2888 static unsigned int virtio_mem_features[] = {
2889 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
2890 VIRTIO_MEM_F_ACPI_PXM,
2892 VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE,
2895 static const struct virtio_device_id virtio_mem_id_table[] = {
2896 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
2900 static struct virtio_driver virtio_mem_driver = {
2901 .feature_table = virtio_mem_features,
2902 .feature_table_size = ARRAY_SIZE(virtio_mem_features),
2903 .driver.name = KBUILD_MODNAME,
2904 .driver.owner = THIS_MODULE,
2905 .id_table = virtio_mem_id_table,
2906 .probe = virtio_mem_probe,
2907 .remove = virtio_mem_remove,
2908 .config_changed = virtio_mem_config_changed,
2909 #ifdef CONFIG_PM_SLEEP
2910 .freeze = virtio_mem_freeze,
2911 .restore = virtio_mem_restore,
2915 module_virtio_driver(virtio_mem_driver);
2916 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
2917 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
2918 MODULE_DESCRIPTION("Virtio-mem driver");
2919 MODULE_LICENSE("GPL");