1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio-mem device driver.
5 * Copyright Red Hat, Inc. 2020
7 * Author(s): David Hildenbrand <david@redhat.com>
10 #include <linux/virtio.h>
11 #include <linux/virtio_mem.h>
12 #include <linux/workqueue.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <linux/memory_hotplug.h>
17 #include <linux/memory.h>
18 #include <linux/hrtimer.h>
19 #include <linux/crash_dump.h>
20 #include <linux/mutex.h>
21 #include <linux/bitmap.h>
22 #include <linux/lockdep.h>
24 #include <acpi/acpi_numa.h>
26 static bool unplug_online = true;
27 module_param(unplug_online, bool, 0644);
28 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
30 static bool force_bbm;
31 module_param(force_bbm, bool, 0444);
32 MODULE_PARM_DESC(force_bbm,
33 "Force Big Block Mode. Default is 0 (auto-selection)");
35 static unsigned long bbm_block_size;
36 module_param(bbm_block_size, ulong, 0444);
37 MODULE_PARM_DESC(bbm_block_size,
38 "Big Block size in bytes. Default is 0 (auto-detection).");
40 static bool bbm_safe_unplug = true;
41 module_param(bbm_safe_unplug, bool, 0444);
42 MODULE_PARM_DESC(bbm_safe_unplug,
43 "Use a safe unplug mechanism in BBM, avoiding long/endless loops");
46 * virtio-mem currently supports the following modes of operation:
48 * * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The
49 * size of a Sub Block (SB) is determined based on the device block size, the
50 * pageblock size, and the maximum allocation granularity of the buddy.
51 * Subblocks within a Linux memory block might either be plugged or unplugged.
52 * Memory is added/removed to Linux MM in Linux memory block granularity.
54 * * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks.
55 * Memory is added/removed to Linux MM in Big Block granularity.
57 * The mode is determined automatically based on the Linux memory block size
58 * and the device block size.
60 * User space / core MM (auto onlining) is responsible for onlining added
61 * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are
62 * always onlined separately, and all memory within a Linux memory block is
63 * onlined to the same zone - virtio-mem relies on this behavior.
67 * State of a Linux memory block in SBM.
69 enum virtio_mem_sbm_mb_state {
70 /* Unplugged, not added to Linux. Can be reused later. */
71 VIRTIO_MEM_SBM_MB_UNUSED = 0,
72 /* (Partially) plugged, not added to Linux. Error on add_memory(). */
73 VIRTIO_MEM_SBM_MB_PLUGGED,
74 /* Fully plugged, fully added to Linux, offline. */
75 VIRTIO_MEM_SBM_MB_OFFLINE,
76 /* Partially plugged, fully added to Linux, offline. */
77 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
78 /* Fully plugged, fully added to Linux, onlined to a kernel zone. */
79 VIRTIO_MEM_SBM_MB_KERNEL,
80 /* Partially plugged, fully added to Linux, online to a kernel zone */
81 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
82 /* Fully plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
83 VIRTIO_MEM_SBM_MB_MOVABLE,
84 /* Partially plugged, fully added to Linux, onlined to ZONE_MOVABLE. */
85 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
86 VIRTIO_MEM_SBM_MB_COUNT
90 * State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks.
92 enum virtio_mem_bbm_bb_state {
93 /* Unplugged, not added to Linux. Can be reused later. */
94 VIRTIO_MEM_BBM_BB_UNUSED = 0,
95 /* Plugged, not added to Linux. Error on add_memory(). */
96 VIRTIO_MEM_BBM_BB_PLUGGED,
97 /* Plugged and added to Linux. */
98 VIRTIO_MEM_BBM_BB_ADDED,
99 /* All online parts are fake-offline, ready to remove. */
100 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE,
101 VIRTIO_MEM_BBM_BB_COUNT
105 struct virtio_device *vdev;
107 /* We might first have to unplug all memory when starting up. */
108 bool unplug_all_required;
110 /* Workqueue that processes the plug/unplug requests. */
111 struct work_struct wq;
113 atomic_t config_changed;
115 /* Virtqueue for guest->host requests. */
116 struct virtqueue *vq;
118 /* Wait for a host response to a guest request. */
119 wait_queue_head_t host_resp;
121 /* Space for one guest request and the host response. */
122 struct virtio_mem_req req;
123 struct virtio_mem_resp resp;
125 /* The current size of the device. */
126 uint64_t plugged_size;
127 /* The requested size of the device. */
128 uint64_t requested_size;
130 /* The device block size (for communicating with the device). */
131 uint64_t device_block_size;
132 /* The determined node id for all memory of the device. */
134 /* Physical start address of the memory region. */
136 /* Maximum region size in bytes. */
137 uint64_t region_size;
139 /* The parent resource for all memory added via this device. */
140 struct resource *parent_resource;
142 * Copy of "System RAM (virtio_mem)" to be used for
143 * add_memory_driver_managed().
145 const char *resource_name;
148 * We don't want to add too much memory if it's not getting onlined,
149 * to avoid running OOM. Besides this threshold, we allow to have at
150 * least two offline blocks at a time (whatever is bigger).
152 #define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024)
153 atomic64_t offline_size;
154 uint64_t offline_threshold;
156 /* If set, the driver is in SBM, otherwise in BBM. */
161 /* Id of the first memory block of this device. */
162 unsigned long first_mb_id;
163 /* Id of the last usable memory block of this device. */
164 unsigned long last_usable_mb_id;
165 /* Id of the next memory bock to prepare when needed. */
166 unsigned long next_mb_id;
168 /* The subblock size. */
170 /* The number of subblocks per Linux memory block. */
173 /* Summary of all memory block states. */
174 unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
177 * One byte state per memory block. Allocated via
178 * vmalloc(). Resized (alloc+copy+free) on demand.
180 * With 128 MiB memory blocks, we have states for 512
181 * GiB of memory in one 4 KiB page.
186 * Bitmap: one bit per subblock. Allocated similar to
189 * A set bit means the corresponding subblock is
190 * plugged, otherwise it's unblocked.
192 * With 4 MiB subblocks, we manage 128 GiB of memory
195 unsigned long *sb_states;
199 /* Id of the first big block of this device. */
200 unsigned long first_bb_id;
201 /* Id of the last usable big block of this device. */
202 unsigned long last_usable_bb_id;
203 /* Id of the next device bock to prepare when needed. */
204 unsigned long next_bb_id;
206 /* Summary of all big block states. */
207 unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT];
209 /* One byte state per big block. See sbm.mb_states. */
212 /* The block size used for plugging/adding/removing. */
218 * Mutex that protects the sbm.mb_count, sbm.mb_states,
219 * sbm.sb_states, bbm.bb_count, and bbm.bb_states
221 * When this lock is held the pointers can't change, ONLINE and
222 * OFFLINE blocks can't change the state and no subblocks will get
225 struct mutex hotplug_mutex;
228 /* An error occurred we cannot handle - stop processing requests. */
231 /* The driver is being removed. */
232 spinlock_t removal_lock;
235 /* Timer for retrying to plug/unplug memory. */
236 struct hrtimer retry_timer;
237 unsigned int retry_timer_ms;
238 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
239 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
241 /* Memory notifier (online/offline events). */
242 struct notifier_block memory_notifier;
244 /* Next device in the list of virtio-mem devices. */
245 struct list_head next;
249 * We have to share a single online_page callback among all virtio-mem
250 * devices. We use RCU to iterate the list in the callback.
252 static DEFINE_MUTEX(virtio_mem_mutex);
253 static LIST_HEAD(virtio_mem_devices);
255 static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
256 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
257 unsigned long nr_pages);
258 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
259 unsigned long nr_pages);
260 static void virtio_mem_retry(struct virtio_mem *vm);
263 * Register a virtio-mem device so it will be considered for the online_page
266 static int register_virtio_mem_device(struct virtio_mem *vm)
270 /* First device registers the callback. */
271 mutex_lock(&virtio_mem_mutex);
272 if (list_empty(&virtio_mem_devices))
273 rc = set_online_page_callback(&virtio_mem_online_page_cb);
275 list_add_rcu(&vm->next, &virtio_mem_devices);
276 mutex_unlock(&virtio_mem_mutex);
282 * Unregister a virtio-mem device so it will no longer be considered for the
283 * online_page callback.
285 static void unregister_virtio_mem_device(struct virtio_mem *vm)
287 /* Last device unregisters the callback. */
288 mutex_lock(&virtio_mem_mutex);
289 list_del_rcu(&vm->next);
290 if (list_empty(&virtio_mem_devices))
291 restore_online_page_callback(&virtio_mem_online_page_cb);
292 mutex_unlock(&virtio_mem_mutex);
298 * Calculate the memory block id of a given address.
300 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
302 return addr / memory_block_size_bytes();
306 * Calculate the physical start address of a given memory block id.
308 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
310 return mb_id * memory_block_size_bytes();
314 * Calculate the big block id of a given address.
316 static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
319 return addr / vm->bbm.bb_size;
323 * Calculate the physical start address of a given big block id.
325 static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
328 return bb_id * vm->bbm.bb_size;
332 * Calculate the subblock id of a given address.
334 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
337 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
338 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
340 return (addr - mb_addr) / vm->sbm.sb_size;
344 * Set the state of a big block, taking care of the state counter.
346 static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
348 enum virtio_mem_bbm_bb_state state)
350 const unsigned long idx = bb_id - vm->bbm.first_bb_id;
351 enum virtio_mem_bbm_bb_state old_state;
353 old_state = vm->bbm.bb_states[idx];
354 vm->bbm.bb_states[idx] = state;
356 BUG_ON(vm->bbm.bb_count[old_state] == 0);
357 vm->bbm.bb_count[old_state]--;
358 vm->bbm.bb_count[state]++;
362 * Get the state of a big block.
364 static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
367 return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
371 * Prepare the big block state array for the next big block.
373 static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
375 unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
376 unsigned long new_bytes = old_bytes + 1;
377 int old_pages = PFN_UP(old_bytes);
378 int new_pages = PFN_UP(new_bytes);
381 if (vm->bbm.bb_states && old_pages == new_pages)
384 new_array = vzalloc(new_pages * PAGE_SIZE);
388 mutex_lock(&vm->hotplug_mutex);
389 if (vm->bbm.bb_states)
390 memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
391 vfree(vm->bbm.bb_states);
392 vm->bbm.bb_states = new_array;
393 mutex_unlock(&vm->hotplug_mutex);
398 #define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \
399 for (_bb_id = vm->bbm.first_bb_id; \
400 _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
402 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
404 #define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \
405 for (_bb_id = vm->bbm.next_bb_id - 1; \
406 _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
408 if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
411 * Set the state of a memory block, taking care of the state counter.
413 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
414 unsigned long mb_id, uint8_t state)
416 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
419 old_state = vm->sbm.mb_states[idx];
420 vm->sbm.mb_states[idx] = state;
422 BUG_ON(vm->sbm.mb_count[old_state] == 0);
423 vm->sbm.mb_count[old_state]--;
424 vm->sbm.mb_count[state]++;
428 * Get the state of a memory block.
430 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
433 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
435 return vm->sbm.mb_states[idx];
439 * Prepare the state array for the next memory block.
441 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
443 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
444 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
447 if (vm->sbm.mb_states && old_pages == new_pages)
450 new_array = vzalloc(new_pages * PAGE_SIZE);
454 mutex_lock(&vm->hotplug_mutex);
455 if (vm->sbm.mb_states)
456 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
457 vfree(vm->sbm.mb_states);
458 vm->sbm.mb_states = new_array;
459 mutex_unlock(&vm->hotplug_mutex);
464 #define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
465 for (_mb_id = _vm->sbm.first_mb_id; \
466 _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
468 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
470 #define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
471 for (_mb_id = _vm->sbm.next_mb_id - 1; \
472 _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
474 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
477 * Calculate the bit number in the subblock bitmap for the given subblock
478 * inside the given memory block.
480 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
481 unsigned long mb_id, int sb_id)
483 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
487 * Mark all selected subblocks plugged.
489 * Will not modify the state of the memory block.
491 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
492 unsigned long mb_id, int sb_id,
495 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
497 __bitmap_set(vm->sbm.sb_states, bit, count);
501 * Mark all selected subblocks unplugged.
503 * Will not modify the state of the memory block.
505 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
506 unsigned long mb_id, int sb_id,
509 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
511 __bitmap_clear(vm->sbm.sb_states, bit, count);
515 * Test if all selected subblocks are plugged.
517 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
518 unsigned long mb_id, int sb_id,
521 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
524 return test_bit(bit, vm->sbm.sb_states);
526 /* TODO: Helper similar to bitmap_set() */
527 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
532 * Test if all selected subblocks are unplugged.
534 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
535 unsigned long mb_id, int sb_id,
538 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
540 /* TODO: Helper similar to bitmap_set() */
541 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
546 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
549 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
552 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
554 return find_next_zero_bit(vm->sbm.sb_states,
555 bit + vm->sbm.sbs_per_mb, bit) - bit;
559 * Prepare the subblock bitmap for the next memory block.
561 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
563 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
564 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
565 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
566 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
567 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
568 unsigned long *new_bitmap, *old_bitmap;
570 if (vm->sbm.sb_states && old_pages == new_pages)
573 new_bitmap = vzalloc(new_pages * PAGE_SIZE);
577 mutex_lock(&vm->hotplug_mutex);
579 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
581 old_bitmap = vm->sbm.sb_states;
582 vm->sbm.sb_states = new_bitmap;
583 mutex_unlock(&vm->hotplug_mutex);
590 * Test if we could add memory without creating too much offline memory -
591 * to avoid running OOM if memory is getting onlined deferred.
593 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
595 if (WARN_ON_ONCE(size > vm->offline_threshold))
598 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
602 * Try adding memory to Linux. Will usually only fail if out of memory.
604 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
607 * Will not modify the state of memory blocks in virtio-mem.
609 static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
615 * When force-unloading the driver and we still have memory added to
616 * Linux, the resource name has to stay.
618 if (!vm->resource_name) {
619 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
621 if (!vm->resource_name)
625 dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
627 /* Memory might get onlined immediately. */
628 atomic64_add(size, &vm->offline_size);
629 rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name,
632 atomic64_sub(size, &vm->offline_size);
633 dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
635 * TODO: Linux MM does not properly clean up yet in all cases
636 * where adding of memory failed - especially on -ENOMEM.
643 * See virtio_mem_add_memory(): Try adding a single Linux memory block.
645 static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
647 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
648 const uint64_t size = memory_block_size_bytes();
650 return virtio_mem_add_memory(vm, addr, size);
654 * See virtio_mem_add_memory(): Try adding a big block.
656 static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
658 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
659 const uint64_t size = vm->bbm.bb_size;
661 return virtio_mem_add_memory(vm, addr, size);
665 * Try removing memory from Linux. Will only fail if memory blocks aren't
668 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
671 * Will not modify the state of memory blocks in virtio-mem.
673 static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
678 dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
680 rc = remove_memory(vm->nid, addr, size);
682 atomic64_sub(size, &vm->offline_size);
684 * We might have freed up memory we can now unplug, retry
685 * immediately instead of waiting.
687 virtio_mem_retry(vm);
689 dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
695 * See virtio_mem_remove_memory(): Try removing a single Linux memory block.
697 static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
699 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
700 const uint64_t size = memory_block_size_bytes();
702 return virtio_mem_remove_memory(vm, addr, size);
706 * Try offlining and removing memory from Linux.
708 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
711 * Will not modify the state of memory blocks in virtio-mem.
713 static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
719 dev_dbg(&vm->vdev->dev,
720 "offlining and removing memory: 0x%llx - 0x%llx\n", addr,
723 rc = offline_and_remove_memory(vm->nid, addr, size);
725 atomic64_sub(size, &vm->offline_size);
727 * We might have freed up memory we can now unplug, retry
728 * immediately instead of waiting.
730 virtio_mem_retry(vm);
732 dev_dbg(&vm->vdev->dev,
733 "offlining and removing memory failed: %d\n", rc);
739 * See virtio_mem_offline_and_remove_memory(): Try offlining and removing
740 * a single Linux memory block.
742 static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
745 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
746 const uint64_t size = memory_block_size_bytes();
748 return virtio_mem_offline_and_remove_memory(vm, addr, size);
752 * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
753 * all Linux memory blocks covered by the big block.
755 static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
758 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
759 const uint64_t size = vm->bbm.bb_size;
761 return virtio_mem_offline_and_remove_memory(vm, addr, size);
765 * Trigger the workqueue so the device can perform its magic.
767 static void virtio_mem_retry(struct virtio_mem *vm)
771 spin_lock_irqsave(&vm->removal_lock, flags);
773 queue_work(system_freezable_wq, &vm->wq);
774 spin_unlock_irqrestore(&vm->removal_lock, flags);
777 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
779 int node = NUMA_NO_NODE;
781 #if defined(CONFIG_ACPI_NUMA)
782 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
783 node = pxm_to_node(node_id);
789 * Test if a virtio-mem device overlaps with the given range. Can be called
790 * from (notifier) callbacks lockless.
792 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
795 return start < vm->addr + vm->region_size && vm->addr < start + size;
799 * Test if a virtio-mem device contains a given range. Can be called from
800 * (notifier) callbacks lockless.
802 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
805 return start >= vm->addr && start + size <= vm->addr + vm->region_size;
808 static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
811 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
812 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
813 case VIRTIO_MEM_SBM_MB_OFFLINE:
818 dev_warn_ratelimited(&vm->vdev->dev,
819 "memory block onlining denied\n");
823 static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
826 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
827 case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
828 case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
829 virtio_mem_sbm_set_mb_state(vm, mb_id,
830 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
832 case VIRTIO_MEM_SBM_MB_KERNEL:
833 case VIRTIO_MEM_SBM_MB_MOVABLE:
834 virtio_mem_sbm_set_mb_state(vm, mb_id,
835 VIRTIO_MEM_SBM_MB_OFFLINE);
843 static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
845 unsigned long start_pfn)
847 const bool is_movable = page_zonenum(pfn_to_page(start_pfn)) ==
851 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
852 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
853 new_state = VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL;
855 new_state = VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL;
857 case VIRTIO_MEM_SBM_MB_OFFLINE:
858 new_state = VIRTIO_MEM_SBM_MB_KERNEL;
860 new_state = VIRTIO_MEM_SBM_MB_MOVABLE;
866 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
869 static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
872 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
876 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
877 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
879 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
880 sb_id * vm->sbm.sb_size);
881 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
885 static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
888 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
892 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
893 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
895 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
896 sb_id * vm->sbm.sb_size);
897 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
901 static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
904 unsigned long nr_pages)
907 * When marked as "fake-offline", all online memory of this device block
908 * is allocated by us. Otherwise, we don't have any memory allocated.
910 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
911 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
913 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
916 static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
919 unsigned long nr_pages)
921 if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
922 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
924 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
928 * This callback will either be called synchronously from add_memory() or
929 * asynchronously (e.g., triggered via user space). We have to be careful
930 * with locking when calling add_memory().
932 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
933 unsigned long action, void *arg)
935 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
937 struct memory_notify *mhp = arg;
938 const unsigned long start = PFN_PHYS(mhp->start_pfn);
939 const unsigned long size = PFN_PHYS(mhp->nr_pages);
943 if (!virtio_mem_overlaps_range(vm, start, size))
947 id = virtio_mem_phys_to_mb_id(start);
949 * In SBM, we add memory in separate memory blocks - we expect
950 * it to be onlined/offlined in the same granularity. Bail out
951 * if this ever changes.
953 if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
954 !IS_ALIGNED(start, memory_block_size_bytes())))
957 id = virtio_mem_phys_to_bb_id(vm, start);
959 * In BBM, we only care about onlining/offlining happening
960 * within a single big block, we don't care about the
961 * actual granularity as we don't track individual Linux
964 if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
969 * Avoid circular locking lockdep warnings. We lock the mutex
970 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
971 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
972 * between both notifier calls and will bail out. False positive.
977 case MEM_GOING_OFFLINE:
978 mutex_lock(&vm->hotplug_mutex);
980 rc = notifier_from_errno(-EBUSY);
981 mutex_unlock(&vm->hotplug_mutex);
984 vm->hotplug_active = true;
986 virtio_mem_sbm_notify_going_offline(vm, id);
988 virtio_mem_bbm_notify_going_offline(vm, id,
992 case MEM_GOING_ONLINE:
993 mutex_lock(&vm->hotplug_mutex);
995 rc = notifier_from_errno(-EBUSY);
996 mutex_unlock(&vm->hotplug_mutex);
999 vm->hotplug_active = true;
1001 rc = virtio_mem_sbm_notify_going_online(vm, id);
1005 virtio_mem_sbm_notify_offline(vm, id);
1007 atomic64_add(size, &vm->offline_size);
1009 * Trigger the workqueue. Now that we have some offline memory,
1010 * maybe we can handle pending unplug requests.
1013 virtio_mem_retry(vm);
1015 vm->hotplug_active = false;
1016 mutex_unlock(&vm->hotplug_mutex);
1020 virtio_mem_sbm_notify_online(vm, id, mhp->start_pfn);
1022 atomic64_sub(size, &vm->offline_size);
1024 * Start adding more memory once we onlined half of our
1025 * threshold. Don't trigger if it's possibly due to our actipn
1026 * (e.g., us adding memory which gets onlined immediately from
1029 if (!atomic_read(&vm->wq_active) &&
1030 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
1031 virtio_mem_retry(vm);
1033 vm->hotplug_active = false;
1034 mutex_unlock(&vm->hotplug_mutex);
1036 case MEM_CANCEL_OFFLINE:
1037 if (!vm->hotplug_active)
1040 virtio_mem_sbm_notify_cancel_offline(vm, id);
1042 virtio_mem_bbm_notify_cancel_offline(vm, id,
1045 vm->hotplug_active = false;
1046 mutex_unlock(&vm->hotplug_mutex);
1048 case MEM_CANCEL_ONLINE:
1049 if (!vm->hotplug_active)
1051 vm->hotplug_active = false;
1052 mutex_unlock(&vm->hotplug_mutex);
1064 * Set a range of pages PG_offline. Remember pages that were never onlined
1065 * (via generic_online_page()) using PageDirty().
1067 static void virtio_mem_set_fake_offline(unsigned long pfn,
1068 unsigned long nr_pages, bool onlined)
1070 page_offline_begin();
1071 for (; nr_pages--; pfn++) {
1072 struct page *page = pfn_to_page(pfn);
1074 __SetPageOffline(page);
1077 /* FIXME: remove after cleanups */
1078 ClearPageReserved(page);
1085 * Clear PG_offline from a range of pages. If the pages were never onlined,
1086 * (via generic_online_page()), clear PageDirty().
1088 static void virtio_mem_clear_fake_offline(unsigned long pfn,
1089 unsigned long nr_pages, bool onlined)
1091 for (; nr_pages--; pfn++) {
1092 struct page *page = pfn_to_page(pfn);
1094 __ClearPageOffline(page);
1096 ClearPageDirty(page);
1101 * Release a range of fake-offline pages to the buddy, effectively
1102 * fake-onlining them.
1104 static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
1106 const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
1110 * We are always called at least with MAX_ORDER_NR_PAGES
1111 * granularity/alignment (e.g., the way subblocks work). All pages
1112 * inside such a block are alike.
1114 for (i = 0; i < nr_pages; i += max_nr_pages) {
1115 struct page *page = pfn_to_page(pfn + i);
1118 * If the page is PageDirty(), it was kept fake-offline when
1119 * onlining the memory block. Otherwise, it was allocated
1120 * using alloc_contig_range(). All pages in a subblock are
1123 if (PageDirty(page)) {
1124 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
1126 generic_online_page(page, MAX_ORDER - 1);
1128 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
1130 free_contig_range(pfn + i, max_nr_pages);
1131 adjust_managed_page_count(page, max_nr_pages);
1137 * Try to allocate a range, marking pages fake-offline, effectively
1138 * fake-offlining them.
1140 static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
1142 const bool is_movable = page_zonenum(pfn_to_page(pfn)) ==
1144 int rc, retry_count;
1147 * TODO: We want an alloc_contig_range() mode that tries to allocate
1148 * harder (e.g., dealing with temporarily pinned pages, PCP), especially
1149 * with ZONE_MOVABLE. So for now, retry a couple of times with
1150 * ZONE_MOVABLE before giving up - because that zone is supposed to give
1153 for (retry_count = 0; retry_count < 5; retry_count++) {
1154 rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
1157 /* whoops, out of memory */
1159 else if (rc && !is_movable)
1164 virtio_mem_set_fake_offline(pfn, nr_pages, true);
1165 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1173 * Handle fake-offline pages when memory is going offline - such that the
1174 * pages can be skipped by mm-core when offlining.
1176 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
1177 unsigned long nr_pages)
1183 * Drop our reference to the pages so the memory can get offlined
1184 * and add the unplugged pages to the managed page counters (so
1185 * offlining code can correctly subtract them again).
1187 adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
1188 /* Drop our reference to the pages so the memory can get offlined. */
1189 for (i = 0; i < nr_pages; i++) {
1190 page = pfn_to_page(pfn + i);
1191 if (WARN_ON(!page_ref_dec_and_test(page)))
1192 dump_page(page, "fake-offline page referenced");
1197 * Handle fake-offline pages when memory offlining is canceled - to undo
1198 * what we did in virtio_mem_fake_offline_going_offline().
1200 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
1201 unsigned long nr_pages)
1206 * Get the reference we dropped when going offline and subtract the
1207 * unplugged pages from the managed page counters.
1209 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
1210 for (i = 0; i < nr_pages; i++)
1211 page_ref_inc(pfn_to_page(pfn + i));
1214 static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
1216 const unsigned long addr = page_to_phys(page);
1217 unsigned long id, sb_id;
1218 struct virtio_mem *vm;
1222 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
1223 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
1228 * We exploit here that subblocks have at least
1229 * MAX_ORDER_NR_PAGES size/alignment - so we cannot
1230 * cross subblocks within one call.
1232 id = virtio_mem_phys_to_mb_id(addr);
1233 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
1234 do_online = virtio_mem_sbm_test_sb_plugged(vm, id,
1238 * If the whole block is marked fake offline, keep
1239 * everything that way.
1241 id = virtio_mem_phys_to_bb_id(vm, addr);
1242 do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
1243 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
1247 * virtio_mem_set_fake_offline() might sleep, we don't need
1248 * the device anymore. See virtio_mem_remove() how races
1249 * between memory onlining and device removal are handled.
1254 generic_online_page(page, order);
1256 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
1262 /* not virtio-mem memory, but e.g., a DIMM. online it */
1263 generic_online_page(page, order);
1266 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
1267 const struct virtio_mem_req *req)
1269 struct scatterlist *sgs[2], sg_req, sg_resp;
1273 /* don't use the request residing on the stack (vaddr) */
1276 /* out: buffer for request */
1277 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
1280 /* in: buffer for response */
1281 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
1284 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
1288 virtqueue_kick(vm->vq);
1290 /* wait for a response */
1291 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
1293 return virtio16_to_cpu(vm->vdev, vm->resp.type);
1296 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
1299 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1300 const struct virtio_mem_req req = {
1301 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
1302 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
1303 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1307 if (atomic_read(&vm->config_changed))
1310 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
1313 switch (virtio_mem_send_request(vm, &req)) {
1314 case VIRTIO_MEM_RESP_ACK:
1315 vm->plugged_size += size;
1317 case VIRTIO_MEM_RESP_NACK:
1320 case VIRTIO_MEM_RESP_BUSY:
1323 case VIRTIO_MEM_RESP_ERROR:
1330 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
1334 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
1337 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1338 const struct virtio_mem_req req = {
1339 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
1340 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
1341 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1345 if (atomic_read(&vm->config_changed))
1348 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
1351 switch (virtio_mem_send_request(vm, &req)) {
1352 case VIRTIO_MEM_RESP_ACK:
1353 vm->plugged_size -= size;
1355 case VIRTIO_MEM_RESP_BUSY:
1358 case VIRTIO_MEM_RESP_ERROR:
1365 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
1369 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
1371 const struct virtio_mem_req req = {
1372 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
1376 dev_dbg(&vm->vdev->dev, "unplugging all memory");
1378 switch (virtio_mem_send_request(vm, &req)) {
1379 case VIRTIO_MEM_RESP_ACK:
1380 vm->unplug_all_required = false;
1381 vm->plugged_size = 0;
1382 /* usable region might have shrunk */
1383 atomic_set(&vm->config_changed, 1);
1385 case VIRTIO_MEM_RESP_BUSY:
1392 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
1397 * Plug selected subblocks. Updates the plugged state, but not the state
1398 * of the memory block.
1400 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
1401 int sb_id, int count)
1403 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1404 sb_id * vm->sbm.sb_size;
1405 const uint64_t size = count * vm->sbm.sb_size;
1408 rc = virtio_mem_send_plug_request(vm, addr, size);
1410 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
1415 * Unplug selected subblocks. Updates the plugged state, but not the state
1416 * of the memory block.
1418 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
1419 int sb_id, int count)
1421 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1422 sb_id * vm->sbm.sb_size;
1423 const uint64_t size = count * vm->sbm.sb_size;
1426 rc = virtio_mem_send_unplug_request(vm, addr, size);
1428 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
1433 * Request to unplug a big block.
1435 * Will not modify the state of the big block.
1437 static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
1439 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1440 const uint64_t size = vm->bbm.bb_size;
1442 return virtio_mem_send_unplug_request(vm, addr, size);
1446 * Request to plug a big block.
1448 * Will not modify the state of the big block.
1450 static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
1452 const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
1453 const uint64_t size = vm->bbm.bb_size;
1455 return virtio_mem_send_plug_request(vm, addr, size);
1459 * Unplug the desired number of plugged subblocks of a offline or not-added
1460 * memory block. Will fail if any subblock cannot get unplugged (instead of
1463 * Will not modify the state of the memory block.
1465 * Note: can fail after some subblocks were unplugged.
1467 static int virtio_mem_sbm_unplug_any_sb_raw(struct virtio_mem *vm,
1468 unsigned long mb_id, uint64_t *nb_sb)
1473 sb_id = vm->sbm.sbs_per_mb - 1;
1475 /* Find the next candidate subblock */
1476 while (sb_id >= 0 &&
1477 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
1481 /* Try to unplug multiple subblocks at a time */
1483 while (count < *nb_sb && sb_id > 0 &&
1484 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1489 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1500 * Unplug all plugged subblocks of an offline or not-added memory block.
1502 * Will not modify the state of the memory block.
1504 * Note: can fail after some subblocks were unplugged.
1506 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
1508 uint64_t nb_sb = vm->sbm.sbs_per_mb;
1510 return virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, &nb_sb);
1514 * Prepare tracking data for the next memory block.
1516 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
1517 unsigned long *mb_id)
1521 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
1524 /* Resize the state array if required. */
1525 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
1529 /* Resize the subblock bitmap if required. */
1530 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
1534 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
1535 *mb_id = vm->sbm.next_mb_id++;
1540 * Try to plug the desired number of subblocks and add the memory block
1543 * Will modify the state of the memory block.
1545 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
1546 unsigned long mb_id, uint64_t *nb_sb)
1548 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
1551 if (WARN_ON_ONCE(!count))
1555 * Plug the requested number of subblocks before adding it to linux,
1556 * so that onlining will directly online all plugged subblocks.
1558 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
1563 * Mark the block properly offline before adding it to Linux,
1564 * so the memory notifiers will find the block in the right state.
1566 if (count == vm->sbm.sbs_per_mb)
1567 virtio_mem_sbm_set_mb_state(vm, mb_id,
1568 VIRTIO_MEM_SBM_MB_OFFLINE);
1570 virtio_mem_sbm_set_mb_state(vm, mb_id,
1571 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1573 /* Add the memory block to linux - if that fails, try to unplug. */
1574 rc = virtio_mem_sbm_add_mb(vm, mb_id);
1576 int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
1578 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
1579 new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
1580 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
1589 * Try to plug the desired number of subblocks of a memory block that
1590 * is already added to Linux.
1592 * Will modify the state of the memory block.
1594 * Note: Can fail after some subblocks were successfully plugged.
1596 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
1597 unsigned long mb_id, uint64_t *nb_sb)
1599 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1600 unsigned long pfn, nr_pages;
1604 if (WARN_ON_ONCE(!*nb_sb))
1608 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
1609 if (sb_id >= vm->sbm.sbs_per_mb)
1612 while (count < *nb_sb &&
1613 sb_id + count < vm->sbm.sbs_per_mb &&
1614 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
1617 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
1621 if (old_state == VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL)
1624 /* fake-online the pages if the memory block is online */
1625 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1626 sb_id * vm->sbm.sb_size);
1627 nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
1628 virtio_mem_fake_online(pfn, nr_pages);
1631 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1632 virtio_mem_sbm_set_mb_state(vm, mb_id, old_state - 1);
1637 static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1639 const int mb_states[] = {
1640 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
1641 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
1642 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
1644 uint64_t nb_sb = diff / vm->sbm.sb_size;
1645 unsigned long mb_id;
1651 /* Don't race with onlining/offlining */
1652 mutex_lock(&vm->hotplug_mutex);
1654 for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
1655 virtio_mem_sbm_for_each_mb(vm, mb_id, mb_states[i]) {
1656 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb);
1664 * We won't be working on online/offline memory blocks from this point,
1665 * so we can't race with memory onlining/offlining. Drop the mutex.
1667 mutex_unlock(&vm->hotplug_mutex);
1669 /* Try to plug and add unused blocks */
1670 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
1671 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1674 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1680 /* Try to prepare, plug and add new blocks */
1682 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1685 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
1688 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1696 mutex_unlock(&vm->hotplug_mutex);
1701 * Plug a big block and add it to Linux.
1703 * Will modify the state of the big block.
1705 static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
1706 unsigned long bb_id)
1710 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
1711 VIRTIO_MEM_BBM_BB_UNUSED))
1714 rc = virtio_mem_bbm_plug_bb(vm, bb_id);
1717 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
1719 rc = virtio_mem_bbm_add_bb(vm, bb_id);
1721 if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
1722 virtio_mem_bbm_set_bb_state(vm, bb_id,
1723 VIRTIO_MEM_BBM_BB_UNUSED);
1725 /* Retry from the main loop. */
1726 virtio_mem_bbm_set_bb_state(vm, bb_id,
1727 VIRTIO_MEM_BBM_BB_PLUGGED);
1734 * Prepare tracking data for the next big block.
1736 static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
1737 unsigned long *bb_id)
1741 if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
1744 /* Resize the big block state array if required. */
1745 rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
1749 vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
1750 *bb_id = vm->bbm.next_bb_id;
1751 vm->bbm.next_bb_id++;
1755 static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
1757 uint64_t nb_bb = diff / vm->bbm.bb_size;
1758 unsigned long bb_id;
1764 /* Try to plug and add unused big blocks */
1765 virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
1766 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1769 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1777 /* Try to prepare, plug and add new big blocks */
1779 if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
1782 rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
1785 rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
1797 * Try to plug the requested amount of memory.
1799 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1802 return virtio_mem_sbm_plug_request(vm, diff);
1803 return virtio_mem_bbm_plug_request(vm, diff);
1807 * Unplug the desired number of plugged subblocks of an offline memory block.
1808 * Will fail if any subblock cannot get unplugged (instead of skipping it).
1810 * Will modify the state of the memory block. Might temporarily drop the
1813 * Note: Can fail after some subblocks were successfully unplugged.
1815 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
1816 unsigned long mb_id,
1821 rc = virtio_mem_sbm_unplug_any_sb_raw(vm, mb_id, nb_sb);
1823 /* some subblocks might have been unplugged even on failure */
1824 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1825 virtio_mem_sbm_set_mb_state(vm, mb_id,
1826 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1830 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1832 * Remove the block from Linux - this should never fail.
1833 * Hinder the block from getting onlined by marking it
1834 * unplugged. Temporarily drop the mutex, so
1835 * any pending GOING_ONLINE requests can be serviced/rejected.
1837 virtio_mem_sbm_set_mb_state(vm, mb_id,
1838 VIRTIO_MEM_SBM_MB_UNUSED);
1840 mutex_unlock(&vm->hotplug_mutex);
1841 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
1843 mutex_lock(&vm->hotplug_mutex);
1849 * Unplug the given plugged subblocks of an online memory block.
1851 * Will modify the state of the memory block.
1853 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
1854 unsigned long mb_id, int sb_id,
1857 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
1858 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1859 unsigned long start_pfn;
1862 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1863 sb_id * vm->sbm.sb_size);
1865 rc = virtio_mem_fake_offline(start_pfn, nr_pages);
1869 /* Try to unplug the allocated memory */
1870 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1872 /* Return the memory to the buddy. */
1873 virtio_mem_fake_online(start_pfn, nr_pages);
1877 switch (old_state) {
1878 case VIRTIO_MEM_SBM_MB_KERNEL:
1879 virtio_mem_sbm_set_mb_state(vm, mb_id,
1880 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL);
1882 case VIRTIO_MEM_SBM_MB_MOVABLE:
1883 virtio_mem_sbm_set_mb_state(vm, mb_id,
1884 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL);
1892 * Unplug the desired number of plugged subblocks of an online memory block.
1893 * Will skip subblock that are busy.
1895 * Will modify the state of the memory block. Might temporarily drop the
1898 * Note: Can fail after some subblocks were successfully unplugged. Can
1899 * return 0 even if subblocks were busy and could not get unplugged.
1901 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
1902 unsigned long mb_id,
1907 /* If possible, try to unplug the complete block in one shot. */
1908 if (*nb_sb >= vm->sbm.sbs_per_mb &&
1909 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1910 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
1911 vm->sbm.sbs_per_mb);
1913 *nb_sb -= vm->sbm.sbs_per_mb;
1915 } else if (rc != -EBUSY)
1919 /* Fallback to single subblocks. */
1920 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1921 /* Find the next candidate subblock */
1922 while (sb_id >= 0 &&
1923 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
1928 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
1938 * Once all subblocks of a memory block were unplugged, offline and
1939 * remove it. This will usually not fail, as no memory is in use
1940 * anymore - however some other notifiers might NACK the request.
1942 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1943 mutex_unlock(&vm->hotplug_mutex);
1944 rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
1945 mutex_lock(&vm->hotplug_mutex);
1947 virtio_mem_sbm_set_mb_state(vm, mb_id,
1948 VIRTIO_MEM_SBM_MB_UNUSED);
1955 * Unplug the desired number of plugged subblocks of a memory block that is
1956 * already added to Linux. Will skip subblock of online memory blocks that are
1957 * busy (by the OS). Will fail if any subblock that's not busy cannot get
1960 * Will modify the state of the memory block. Might temporarily drop the
1963 * Note: Can fail after some subblocks were successfully unplugged. Can
1964 * return 0 even if subblocks were busy and could not get unplugged.
1966 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
1967 unsigned long mb_id,
1970 const int old_state = virtio_mem_sbm_get_mb_state(vm, mb_id);
1972 switch (old_state) {
1973 case VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL:
1974 case VIRTIO_MEM_SBM_MB_KERNEL:
1975 case VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL:
1976 case VIRTIO_MEM_SBM_MB_MOVABLE:
1977 return virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, nb_sb);
1978 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
1979 case VIRTIO_MEM_SBM_MB_OFFLINE:
1980 return virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, nb_sb);
1985 static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
1987 const int mb_states[] = {
1988 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
1989 VIRTIO_MEM_SBM_MB_OFFLINE,
1990 VIRTIO_MEM_SBM_MB_MOVABLE_PARTIAL,
1991 VIRTIO_MEM_SBM_MB_KERNEL_PARTIAL,
1992 VIRTIO_MEM_SBM_MB_MOVABLE,
1993 VIRTIO_MEM_SBM_MB_KERNEL,
1995 uint64_t nb_sb = diff / vm->sbm.sb_size;
1996 unsigned long mb_id;
2003 * We'll drop the mutex a couple of times when it is safe to do so.
2004 * This might result in some blocks switching the state (online/offline)
2005 * and we could miss them in this run - we will retry again later.
2007 mutex_lock(&vm->hotplug_mutex);
2010 * We try unplug from partially plugged blocks first, to try removing
2011 * whole memory blocks along with metadata. We prioritize ZONE_MOVABLE
2012 * as it's more reliable to unplug memory and remove whole memory
2013 * blocks, and we don't want to trigger a zone imbalances by
2014 * accidentially removing too much kernel memory.
2016 for (i = 0; i < ARRAY_SIZE(mb_states); i++) {
2017 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, mb_states[i]) {
2018 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
2021 mutex_unlock(&vm->hotplug_mutex);
2023 mutex_lock(&vm->hotplug_mutex);
2025 if (!unplug_online && i == 1) {
2026 mutex_unlock(&vm->hotplug_mutex);
2031 mutex_unlock(&vm->hotplug_mutex);
2032 return nb_sb ? -EBUSY : 0;
2034 mutex_unlock(&vm->hotplug_mutex);
2039 * Try to offline and remove a big block from Linux and unplug it. Will fail
2040 * with -EBUSY if some memory is busy and cannot get unplugged.
2042 * Will modify the state of the memory block. Might temporarily drop the
2045 static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
2046 unsigned long bb_id)
2048 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2049 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2050 unsigned long end_pfn = start_pfn + nr_pages;
2055 if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
2056 VIRTIO_MEM_BBM_BB_ADDED))
2059 if (bbm_safe_unplug) {
2061 * Start by fake-offlining all memory. Once we marked the device
2062 * block as fake-offline, all newly onlined memory will
2063 * automatically be kept fake-offline. Protect from concurrent
2064 * onlining/offlining until we have a consistent state.
2066 mutex_lock(&vm->hotplug_mutex);
2067 virtio_mem_bbm_set_bb_state(vm, bb_id,
2068 VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
2070 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2071 page = pfn_to_online_page(pfn);
2075 rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
2078 goto rollback_safe_unplug;
2081 mutex_unlock(&vm->hotplug_mutex);
2084 rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
2086 if (bbm_safe_unplug) {
2087 mutex_lock(&vm->hotplug_mutex);
2088 goto rollback_safe_unplug;
2093 rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
2095 virtio_mem_bbm_set_bb_state(vm, bb_id,
2096 VIRTIO_MEM_BBM_BB_PLUGGED);
2098 virtio_mem_bbm_set_bb_state(vm, bb_id,
2099 VIRTIO_MEM_BBM_BB_UNUSED);
2102 rollback_safe_unplug:
2103 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
2104 page = pfn_to_online_page(pfn);
2107 virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
2109 virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
2110 mutex_unlock(&vm->hotplug_mutex);
2115 * Test if a big block is completely offline.
2117 static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
2118 unsigned long bb_id)
2120 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2121 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2124 for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2125 pfn += PAGES_PER_SECTION) {
2126 if (pfn_to_online_page(pfn))
2134 * Test if a big block is completely onlined to ZONE_MOVABLE (or offline).
2136 static bool virtio_mem_bbm_bb_is_movable(struct virtio_mem *vm,
2137 unsigned long bb_id)
2139 const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
2140 const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
2144 for (pfn = start_pfn; pfn < start_pfn + nr_pages;
2145 pfn += PAGES_PER_SECTION) {
2146 page = pfn_to_online_page(pfn);
2149 if (page_zonenum(page) != ZONE_MOVABLE)
2156 static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
2158 uint64_t nb_bb = diff / vm->bbm.bb_size;
2166 * Try to unplug big blocks. Similar to SBM, start with offline
2169 for (i = 0; i < 3; i++) {
2170 virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
2174 * As we're holding no locks, these checks are racy,
2175 * but we don't care.
2177 if (i == 0 && !virtio_mem_bbm_bb_is_offline(vm, bb_id))
2179 if (i == 1 && !virtio_mem_bbm_bb_is_movable(vm, bb_id))
2181 rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
2189 if (i == 0 && !unplug_online)
2193 return nb_bb ? -EBUSY : 0;
2197 * Try to unplug the requested amount of memory.
2199 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
2202 return virtio_mem_sbm_unplug_request(vm, diff);
2203 return virtio_mem_bbm_unplug_request(vm, diff);
2207 * Try to unplug all blocks that couldn't be unplugged before, for example,
2208 * because the hypervisor was busy.
2210 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
2216 virtio_mem_bbm_for_each_bb(vm, id,
2217 VIRTIO_MEM_BBM_BB_PLUGGED) {
2218 rc = virtio_mem_bbm_unplug_bb(vm, id);
2221 virtio_mem_bbm_set_bb_state(vm, id,
2222 VIRTIO_MEM_BBM_BB_UNUSED);
2227 virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
2228 rc = virtio_mem_sbm_unplug_mb(vm, id);
2231 virtio_mem_sbm_set_mb_state(vm, id,
2232 VIRTIO_MEM_SBM_MB_UNUSED);
2239 * Update all parts of the config that could have changed.
2241 static void virtio_mem_refresh_config(struct virtio_mem *vm)
2243 const struct range pluggable_range = mhp_get_pluggable_range(true);
2244 uint64_t new_plugged_size, usable_region_size, end_addr;
2246 /* the plugged_size is just a reflection of what _we_ did previously */
2247 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2249 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
2250 vm->plugged_size = new_plugged_size;
2252 /* calculate the last usable memory block id */
2253 virtio_cread_le(vm->vdev, struct virtio_mem_config,
2254 usable_region_size, &usable_region_size);
2255 end_addr = min(vm->addr + usable_region_size - 1,
2256 pluggable_range.end);
2259 vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr);
2260 if (!IS_ALIGNED(end_addr + 1, memory_block_size_bytes()))
2261 vm->sbm.last_usable_mb_id--;
2263 vm->bbm.last_usable_bb_id = virtio_mem_phys_to_bb_id(vm,
2265 if (!IS_ALIGNED(end_addr + 1, vm->bbm.bb_size))
2266 vm->bbm.last_usable_bb_id--;
2269 * If we cannot plug any of our device memory (e.g., nothing in the
2270 * usable region is addressable), the last usable memory block id will
2271 * be smaller than the first usable memory block id. We'll stop
2272 * attempting to add memory with -ENOSPC from our main loop.
2275 /* see if there is a request to change the size */
2276 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
2277 &vm->requested_size);
2279 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
2280 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
2284 * Workqueue function for handling plug/unplug requests and config updates.
2286 static void virtio_mem_run_wq(struct work_struct *work)
2288 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
2292 hrtimer_cancel(&vm->retry_timer);
2297 atomic_set(&vm->wq_active, 1);
2301 /* Make sure we start with a clean state if there are leftovers. */
2302 if (unlikely(vm->unplug_all_required))
2303 rc = virtio_mem_send_unplug_all_request(vm);
2305 if (atomic_read(&vm->config_changed)) {
2306 atomic_set(&vm->config_changed, 0);
2307 virtio_mem_refresh_config(vm);
2310 /* Unplug any leftovers from previous runs */
2312 rc = virtio_mem_unplug_pending_mb(vm);
2314 if (!rc && vm->requested_size != vm->plugged_size) {
2315 if (vm->requested_size > vm->plugged_size) {
2316 diff = vm->requested_size - vm->plugged_size;
2317 rc = virtio_mem_plug_request(vm, diff);
2319 diff = vm->plugged_size - vm->requested_size;
2320 rc = virtio_mem_unplug_request(vm, diff);
2326 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2330 * We cannot add any more memory (alignment, physical limit)
2331 * or we have too many offline memory blocks.
2336 * The hypervisor cannot process our request right now
2337 * (e.g., out of memory, migrating);
2341 * We cannot free up any memory to unplug it (all plugged memory
2345 /* Out of memory, try again later. */
2346 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
2350 /* Retry immediately (e.g., the config changed). */
2353 /* Unknown error, mark as broken */
2354 dev_err(&vm->vdev->dev,
2355 "unknown error, marking device broken: %d\n", rc);
2359 atomic_set(&vm->wq_active, 0);
2362 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
2364 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
2367 virtio_mem_retry(vm);
2368 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
2369 VIRTIO_MEM_RETRY_TIMER_MAX_MS);
2370 return HRTIMER_NORESTART;
2373 static void virtio_mem_handle_response(struct virtqueue *vq)
2375 struct virtio_mem *vm = vq->vdev->priv;
2377 wake_up(&vm->host_resp);
2380 static int virtio_mem_init_vq(struct virtio_mem *vm)
2382 struct virtqueue *vq;
2384 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
2393 static int virtio_mem_init(struct virtio_mem *vm)
2395 const struct range pluggable_range = mhp_get_pluggable_range(true);
2396 uint64_t sb_size, addr;
2399 if (!vm->vdev->config->get) {
2400 dev_err(&vm->vdev->dev, "config access disabled\n");
2405 * We don't want to (un)plug or reuse any memory when in kdump. The
2406 * memory is still accessible (but not mapped).
2408 if (is_kdump_kernel()) {
2409 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
2413 /* Fetch all properties that can't change. */
2414 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
2416 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
2417 &vm->device_block_size);
2418 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
2420 vm->nid = virtio_mem_translate_node_id(vm, node_id);
2421 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
2422 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
2425 /* Determine the nid for the device based on the lowest address. */
2426 if (vm->nid == NUMA_NO_NODE)
2427 vm->nid = memory_add_physaddr_to_nid(vm->addr);
2429 /* bad device setup - warn only */
2430 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
2431 dev_warn(&vm->vdev->dev,
2432 "The alignment of the physical start address can make some memory unusable.\n");
2433 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
2434 dev_warn(&vm->vdev->dev,
2435 "The alignment of the physical end address can make some memory unusable.\n");
2436 if (vm->addr < pluggable_range.start ||
2437 vm->addr + vm->region_size - 1 > pluggable_range.end)
2438 dev_warn(&vm->vdev->dev,
2439 "Some device memory is not addressable/pluggable. This can make some memory unusable.\n");
2441 /* Prepare the offline threshold - make sure we can add two blocks. */
2442 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
2443 VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
2446 * We want subblocks to span at least MAX_ORDER_NR_PAGES and
2447 * pageblock_nr_pages pages. This:
2448 * - Simplifies our page onlining code (virtio_mem_online_page_cb)
2449 * and fake page onlining code (virtio_mem_fake_online).
2450 * - Is required for now for alloc_contig_range() to work reliably -
2451 * it doesn't properly handle smaller granularity on ZONE_NORMAL.
2453 sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES,
2454 pageblock_nr_pages) * PAGE_SIZE;
2455 sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
2457 if (sb_size < memory_block_size_bytes() && !force_bbm) {
2458 /* SBM: At least two subblocks per Linux memory block. */
2460 vm->sbm.sb_size = sb_size;
2461 vm->sbm.sbs_per_mb = memory_block_size_bytes() /
2464 /* Round up to the next full memory block */
2465 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2466 memory_block_size_bytes() - 1;
2467 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
2468 vm->sbm.next_mb_id = vm->sbm.first_mb_id;
2470 /* BBM: At least one Linux memory block. */
2471 vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
2472 memory_block_size_bytes());
2474 if (bbm_block_size) {
2475 if (!is_power_of_2(bbm_block_size)) {
2476 dev_warn(&vm->vdev->dev,
2477 "bbm_block_size is not a power of 2");
2478 } else if (bbm_block_size < vm->bbm.bb_size) {
2479 dev_warn(&vm->vdev->dev,
2480 "bbm_block_size is too small");
2482 vm->bbm.bb_size = bbm_block_size;
2486 /* Round up to the next aligned big block */
2487 addr = max_t(uint64_t, vm->addr, pluggable_range.start) +
2488 vm->bbm.bb_size - 1;
2489 vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
2490 vm->bbm.next_bb_id = vm->bbm.first_bb_id;
2492 /* Make sure we can add two big blocks. */
2493 vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
2494 vm->offline_threshold);
2497 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
2498 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
2499 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
2500 (unsigned long long)vm->device_block_size);
2501 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
2502 memory_block_size_bytes());
2504 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
2505 (unsigned long long)vm->sbm.sb_size);
2507 dev_info(&vm->vdev->dev, "big block size: 0x%llx",
2508 (unsigned long long)vm->bbm.bb_size);
2509 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
2510 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
2515 static int virtio_mem_create_resource(struct virtio_mem *vm)
2518 * When force-unloading the driver and removing the device, we
2519 * could have a garbage pointer. Duplicate the string.
2521 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
2526 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
2527 name, IORESOURCE_SYSTEM_RAM);
2528 if (!vm->parent_resource) {
2530 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
2531 dev_info(&vm->vdev->dev,
2532 "reloading the driver is not supported\n");
2536 /* The memory is not actually busy - make add_memory() work. */
2537 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
2541 static void virtio_mem_delete_resource(struct virtio_mem *vm)
2545 if (!vm->parent_resource)
2548 name = vm->parent_resource->name;
2549 release_resource(vm->parent_resource);
2550 kfree(vm->parent_resource);
2552 vm->parent_resource = NULL;
2555 static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
2560 static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
2562 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
2564 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
2565 vm->addr + vm->region_size, NULL,
2566 virtio_mem_range_has_system_ram) == 1;
2569 static int virtio_mem_probe(struct virtio_device *vdev)
2571 struct virtio_mem *vm;
2574 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
2575 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
2577 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
2581 init_waitqueue_head(&vm->host_resp);
2583 INIT_WORK(&vm->wq, virtio_mem_run_wq);
2584 mutex_init(&vm->hotplug_mutex);
2585 INIT_LIST_HEAD(&vm->next);
2586 spin_lock_init(&vm->removal_lock);
2587 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2588 vm->retry_timer.function = virtio_mem_timer_expired;
2589 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
2591 /* register the virtqueue */
2592 rc = virtio_mem_init_vq(vm);
2596 /* initialize the device by querying the config */
2597 rc = virtio_mem_init(vm);
2601 /* create the parent resource for all memory */
2602 rc = virtio_mem_create_resource(vm);
2607 * If we still have memory plugged, we have to unplug all memory first.
2608 * Registering our parent resource makes sure that this memory isn't
2609 * actually in use (e.g., trying to reload the driver).
2611 if (vm->plugged_size) {
2612 vm->unplug_all_required = true;
2613 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
2616 /* register callbacks */
2617 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
2618 rc = register_memory_notifier(&vm->memory_notifier);
2620 goto out_del_resource;
2621 rc = register_virtio_mem_device(vm);
2625 virtio_device_ready(vdev);
2627 /* trigger a config update to start processing the requested_size */
2628 atomic_set(&vm->config_changed, 1);
2629 queue_work(system_freezable_wq, &vm->wq);
2633 unregister_memory_notifier(&vm->memory_notifier);
2635 virtio_mem_delete_resource(vm);
2637 vdev->config->del_vqs(vdev);
2645 static void virtio_mem_remove(struct virtio_device *vdev)
2647 struct virtio_mem *vm = vdev->priv;
2648 unsigned long mb_id;
2652 * Make sure the workqueue won't be triggered anymore and no memory
2653 * blocks can be onlined/offlined until we're finished here.
2655 mutex_lock(&vm->hotplug_mutex);
2656 spin_lock_irq(&vm->removal_lock);
2657 vm->removing = true;
2658 spin_unlock_irq(&vm->removal_lock);
2659 mutex_unlock(&vm->hotplug_mutex);
2661 /* wait until the workqueue stopped */
2662 cancel_work_sync(&vm->wq);
2663 hrtimer_cancel(&vm->retry_timer);
2667 * After we unregistered our callbacks, user space can online
2668 * partially plugged offline blocks. Make sure to remove them.
2670 virtio_mem_sbm_for_each_mb(vm, mb_id,
2671 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
2672 rc = virtio_mem_sbm_remove_mb(vm, mb_id);
2674 virtio_mem_sbm_set_mb_state(vm, mb_id,
2675 VIRTIO_MEM_SBM_MB_UNUSED);
2678 * After we unregistered our callbacks, user space can no longer
2679 * offline partially plugged online memory blocks. No need to
2684 /* unregister callbacks */
2685 unregister_virtio_mem_device(vm);
2686 unregister_memory_notifier(&vm->memory_notifier);
2689 * There is no way we could reliably remove all memory we have added to
2690 * the system. And there is no way to stop the driver/device from going
2691 * away. Warn at least.
2693 if (virtio_mem_has_memory_added(vm)) {
2694 dev_warn(&vdev->dev, "device still has system memory added\n");
2696 virtio_mem_delete_resource(vm);
2697 kfree_const(vm->resource_name);
2700 /* remove all tracking data - no locking needed */
2702 vfree(vm->sbm.mb_states);
2703 vfree(vm->sbm.sb_states);
2705 vfree(vm->bbm.bb_states);
2708 /* reset the device and cleanup the queues */
2709 vdev->config->reset(vdev);
2710 vdev->config->del_vqs(vdev);
2716 static void virtio_mem_config_changed(struct virtio_device *vdev)
2718 struct virtio_mem *vm = vdev->priv;
2720 atomic_set(&vm->config_changed, 1);
2721 virtio_mem_retry(vm);
2724 #ifdef CONFIG_PM_SLEEP
2725 static int virtio_mem_freeze(struct virtio_device *vdev)
2728 * When restarting the VM, all memory is usually unplugged. Don't
2729 * allow to suspend/hibernate.
2731 dev_err(&vdev->dev, "save/restore not supported.\n");
2735 static int virtio_mem_restore(struct virtio_device *vdev)
2741 static unsigned int virtio_mem_features[] = {
2742 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
2743 VIRTIO_MEM_F_ACPI_PXM,
2747 static const struct virtio_device_id virtio_mem_id_table[] = {
2748 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
2752 static struct virtio_driver virtio_mem_driver = {
2753 .feature_table = virtio_mem_features,
2754 .feature_table_size = ARRAY_SIZE(virtio_mem_features),
2755 .driver.name = KBUILD_MODNAME,
2756 .driver.owner = THIS_MODULE,
2757 .id_table = virtio_mem_id_table,
2758 .probe = virtio_mem_probe,
2759 .remove = virtio_mem_remove,
2760 .config_changed = virtio_mem_config_changed,
2761 #ifdef CONFIG_PM_SLEEP
2762 .freeze = virtio_mem_freeze,
2763 .restore = virtio_mem_restore,
2767 module_virtio_driver(virtio_mem_driver);
2768 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
2769 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
2770 MODULE_DESCRIPTION("Virtio-mem driver");
2771 MODULE_LICENSE("GPL");