1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio-mem device driver.
5 * Copyright Red Hat, Inc. 2020
7 * Author(s): David Hildenbrand <david@redhat.com>
10 #include <linux/virtio.h>
11 #include <linux/virtio_mem.h>
12 #include <linux/workqueue.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <linux/memory_hotplug.h>
17 #include <linux/memory.h>
18 #include <linux/hrtimer.h>
19 #include <linux/crash_dump.h>
20 #include <linux/mutex.h>
21 #include <linux/bitmap.h>
22 #include <linux/lockdep.h>
24 #include <acpi/acpi_numa.h>
26 static bool unplug_online = true;
27 module_param(unplug_online, bool, 0644);
28 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
31 * virtio-mem currently supports the following modes of operation:
33 * * Sub Block Mode (SBM): A Linux memory block spans 1..X subblocks (SB). The
34 * size of a Sub Block (SB) is determined based on the device block size, the
35 * pageblock size, and the maximum allocation granularity of the buddy.
36 * Subblocks within a Linux memory block might either be plugged or unplugged.
37 * Memory is added/removed to Linux MM in Linux memory block granularity.
39 * User space / core MM (auto onlining) is responsible for onlining added
40 * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are
41 * always onlined separately, and all memory within a Linux memory block is
42 * onlined to the same zone - virtio-mem relies on this behavior.
46 * State of a Linux memory block in SBM.
48 enum virtio_mem_sbm_mb_state {
49 /* Unplugged, not added to Linux. Can be reused later. */
50 VIRTIO_MEM_SBM_MB_UNUSED = 0,
51 /* (Partially) plugged, not added to Linux. Error on add_memory(). */
52 VIRTIO_MEM_SBM_MB_PLUGGED,
53 /* Fully plugged, fully added to Linux, offline. */
54 VIRTIO_MEM_SBM_MB_OFFLINE,
55 /* Partially plugged, fully added to Linux, offline. */
56 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
57 /* Fully plugged, fully added to Linux, online. */
58 VIRTIO_MEM_SBM_MB_ONLINE,
59 /* Partially plugged, fully added to Linux, online. */
60 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL,
61 VIRTIO_MEM_SBM_MB_COUNT
65 struct virtio_device *vdev;
67 /* We might first have to unplug all memory when starting up. */
68 bool unplug_all_required;
70 /* Workqueue that processes the plug/unplug requests. */
71 struct work_struct wq;
73 atomic_t config_changed;
75 /* Virtqueue for guest->host requests. */
78 /* Wait for a host response to a guest request. */
79 wait_queue_head_t host_resp;
81 /* Space for one guest request and the host response. */
82 struct virtio_mem_req req;
83 struct virtio_mem_resp resp;
85 /* The current size of the device. */
86 uint64_t plugged_size;
87 /* The requested size of the device. */
88 uint64_t requested_size;
90 /* The device block size (for communicating with the device). */
91 uint64_t device_block_size;
92 /* The determined node id for all memory of the device. */
94 /* Physical start address of the memory region. */
96 /* Maximum region size in bytes. */
99 /* The parent resource for all memory added via this device. */
100 struct resource *parent_resource;
102 * Copy of "System RAM (virtio_mem)" to be used for
103 * add_memory_driver_managed().
105 const char *resource_name;
108 * We don't want to add too much memory if it's not getting onlined,
109 * to avoid running OOM. Besides this threshold, we allow to have at
110 * least two offline blocks at a time (whatever is bigger).
112 #define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024)
113 atomic64_t offline_size;
114 uint64_t offline_threshold;
117 /* Id of the first memory block of this device. */
118 unsigned long first_mb_id;
119 /* Id of the last usable memory block of this device. */
120 unsigned long last_usable_mb_id;
121 /* Id of the next memory bock to prepare when needed. */
122 unsigned long next_mb_id;
124 /* The subblock size. */
126 /* The number of subblocks per Linux memory block. */
129 /* Summary of all memory block states. */
130 unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
133 * One byte state per memory block. Allocated via vmalloc().
134 * Resized (alloc+copy+free) on demand.
136 * With 128 MiB memory blocks, we have states for 512 GiB of
137 * memory in one 4 KiB page.
142 * Bitmap: one bit per subblock. Allocated similar to
145 * A set bit means the corresponding subblock is plugged,
146 * otherwise it's unblocked.
148 * With 4 MiB subblocks, we manage 128 GiB of memory in one
151 unsigned long *sb_states;
155 * Mutex that protects the sbm.mb_count, sbm.mb_states, and
158 * When this lock is held the pointers can't change, ONLINE and
159 * OFFLINE blocks can't change the state and no subblocks will get
162 struct mutex hotplug_mutex;
165 /* An error occurred we cannot handle - stop processing requests. */
168 /* The driver is being removed. */
169 spinlock_t removal_lock;
172 /* Timer for retrying to plug/unplug memory. */
173 struct hrtimer retry_timer;
174 unsigned int retry_timer_ms;
175 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
176 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
178 /* Memory notifier (online/offline events). */
179 struct notifier_block memory_notifier;
181 /* Next device in the list of virtio-mem devices. */
182 struct list_head next;
186 * We have to share a single online_page callback among all virtio-mem
187 * devices. We use RCU to iterate the list in the callback.
189 static DEFINE_MUTEX(virtio_mem_mutex);
190 static LIST_HEAD(virtio_mem_devices);
192 static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
193 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
194 unsigned long nr_pages);
195 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
196 unsigned long nr_pages);
197 static void virtio_mem_retry(struct virtio_mem *vm);
200 * Register a virtio-mem device so it will be considered for the online_page
203 static int register_virtio_mem_device(struct virtio_mem *vm)
207 /* First device registers the callback. */
208 mutex_lock(&virtio_mem_mutex);
209 if (list_empty(&virtio_mem_devices))
210 rc = set_online_page_callback(&virtio_mem_online_page_cb);
212 list_add_rcu(&vm->next, &virtio_mem_devices);
213 mutex_unlock(&virtio_mem_mutex);
219 * Unregister a virtio-mem device so it will no longer be considered for the
220 * online_page callback.
222 static void unregister_virtio_mem_device(struct virtio_mem *vm)
224 /* Last device unregisters the callback. */
225 mutex_lock(&virtio_mem_mutex);
226 list_del_rcu(&vm->next);
227 if (list_empty(&virtio_mem_devices))
228 restore_online_page_callback(&virtio_mem_online_page_cb);
229 mutex_unlock(&virtio_mem_mutex);
235 * Calculate the memory block id of a given address.
237 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
239 return addr / memory_block_size_bytes();
243 * Calculate the physical start address of a given memory block id.
245 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
247 return mb_id * memory_block_size_bytes();
251 * Calculate the subblock id of a given address.
253 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
256 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
257 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
259 return (addr - mb_addr) / vm->sbm.sb_size;
263 * Set the state of a memory block, taking care of the state counter.
265 static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
266 unsigned long mb_id, uint8_t state)
268 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
271 old_state = vm->sbm.mb_states[idx];
272 vm->sbm.mb_states[idx] = state;
274 BUG_ON(vm->sbm.mb_count[old_state] == 0);
275 vm->sbm.mb_count[old_state]--;
276 vm->sbm.mb_count[state]++;
280 * Get the state of a memory block.
282 static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
285 const unsigned long idx = mb_id - vm->sbm.first_mb_id;
287 return vm->sbm.mb_states[idx];
291 * Prepare the state array for the next memory block.
293 static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
295 int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
296 int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
299 if (vm->sbm.mb_states && old_pages == new_pages)
302 new_array = vzalloc(new_pages * PAGE_SIZE);
306 mutex_lock(&vm->hotplug_mutex);
307 if (vm->sbm.mb_states)
308 memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
309 vfree(vm->sbm.mb_states);
310 vm->sbm.mb_states = new_array;
311 mutex_unlock(&vm->hotplug_mutex);
316 #define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
317 for (_mb_id = _vm->sbm.first_mb_id; \
318 _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
320 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
322 #define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
323 for (_mb_id = _vm->sbm.next_mb_id - 1; \
324 _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
326 if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
329 * Calculate the bit number in the subblock bitmap for the given subblock
330 * inside the given memory block.
332 static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
333 unsigned long mb_id, int sb_id)
335 return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
339 * Mark all selected subblocks plugged.
341 * Will not modify the state of the memory block.
343 static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
344 unsigned long mb_id, int sb_id,
347 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
349 __bitmap_set(vm->sbm.sb_states, bit, count);
353 * Mark all selected subblocks unplugged.
355 * Will not modify the state of the memory block.
357 static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
358 unsigned long mb_id, int sb_id,
361 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
363 __bitmap_clear(vm->sbm.sb_states, bit, count);
367 * Test if all selected subblocks are plugged.
369 static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
370 unsigned long mb_id, int sb_id,
373 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
376 return test_bit(bit, vm->sbm.sb_states);
378 /* TODO: Helper similar to bitmap_set() */
379 return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
384 * Test if all selected subblocks are unplugged.
386 static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
387 unsigned long mb_id, int sb_id,
390 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
392 /* TODO: Helper similar to bitmap_set() */
393 return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
398 * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
401 static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
404 const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
406 return find_next_zero_bit(vm->sbm.sb_states,
407 bit + vm->sbm.sbs_per_mb, bit) - bit;
411 * Prepare the subblock bitmap for the next memory block.
413 static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
415 const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
416 const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
417 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
418 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
419 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
420 unsigned long *new_bitmap, *old_bitmap;
422 if (vm->sbm.sb_states && old_pages == new_pages)
425 new_bitmap = vzalloc(new_pages * PAGE_SIZE);
429 mutex_lock(&vm->hotplug_mutex);
431 memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
433 old_bitmap = vm->sbm.sb_states;
434 vm->sbm.sb_states = new_bitmap;
435 mutex_unlock(&vm->hotplug_mutex);
442 * Test if we could add memory without creating too much offline memory -
443 * to avoid running OOM if memory is getting onlined deferred.
445 static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
447 if (WARN_ON_ONCE(size > vm->offline_threshold))
450 return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
454 * Try to add a memory block to Linux. This will usually only fail
457 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
460 * Will not modify the state of the memory block.
462 static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
464 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
465 const uint64_t size = memory_block_size_bytes();
469 * When force-unloading the driver and we still have memory added to
470 * Linux, the resource name has to stay.
472 if (!vm->resource_name) {
473 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
475 if (!vm->resource_name)
479 dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
480 /* Memory might get onlined immediately. */
481 atomic64_add(size, &vm->offline_size);
482 rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name,
483 MEMHP_MERGE_RESOURCE);
485 atomic64_sub(size, &vm->offline_size);
490 * Try to remove a memory block from Linux. Will only fail if the memory block
493 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
496 * Will not modify the state of the memory block.
498 static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
500 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
501 const uint64_t size = memory_block_size_bytes();
504 dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
505 rc = remove_memory(vm->nid, addr, size);
507 atomic64_sub(size, &vm->offline_size);
509 * We might have freed up memory we can now unplug, retry
510 * immediately instead of waiting.
512 virtio_mem_retry(vm);
518 * Try to offline and remove a memory block from Linux.
520 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
523 * Will not modify the state of the memory block.
525 static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
528 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
529 const uint64_t size = memory_block_size_bytes();
532 dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
534 rc = offline_and_remove_memory(vm->nid, addr, size);
536 atomic64_sub(size, &vm->offline_size);
538 * We might have freed up memory we can now unplug, retry
539 * immediately instead of waiting.
541 virtio_mem_retry(vm);
547 * Trigger the workqueue so the device can perform its magic.
549 static void virtio_mem_retry(struct virtio_mem *vm)
553 spin_lock_irqsave(&vm->removal_lock, flags);
555 queue_work(system_freezable_wq, &vm->wq);
556 spin_unlock_irqrestore(&vm->removal_lock, flags);
559 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
561 int node = NUMA_NO_NODE;
563 #if defined(CONFIG_ACPI_NUMA)
564 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
565 node = pxm_to_node(node_id);
571 * Test if a virtio-mem device overlaps with the given range. Can be called
572 * from (notifier) callbacks lockless.
574 static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
577 return start < vm->addr + vm->region_size && vm->addr < start + size;
581 * Test if a virtio-mem device contains a given range. Can be called from
582 * (notifier) callbacks lockless.
584 static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
587 return start >= vm->addr && start + size <= vm->addr + vm->region_size;
590 static int virtio_mem_notify_going_online(struct virtio_mem *vm,
593 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
594 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
595 case VIRTIO_MEM_SBM_MB_OFFLINE:
600 dev_warn_ratelimited(&vm->vdev->dev,
601 "memory block onlining denied\n");
605 static void virtio_mem_notify_offline(struct virtio_mem *vm,
608 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
609 case VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL:
610 virtio_mem_sbm_set_mb_state(vm, mb_id,
611 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
613 case VIRTIO_MEM_SBM_MB_ONLINE:
614 virtio_mem_sbm_set_mb_state(vm, mb_id,
615 VIRTIO_MEM_SBM_MB_OFFLINE);
623 static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id)
625 switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
626 case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
627 virtio_mem_sbm_set_mb_state(vm, mb_id,
628 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL);
630 case VIRTIO_MEM_SBM_MB_OFFLINE:
631 virtio_mem_sbm_set_mb_state(vm, mb_id,
632 VIRTIO_MEM_SBM_MB_ONLINE);
640 static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
643 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
647 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
648 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
650 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
651 sb_id * vm->sbm.sb_size);
652 virtio_mem_fake_offline_going_offline(pfn, nr_pages);
656 static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
659 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
663 for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
664 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
666 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
667 sb_id * vm->sbm.sb_size);
668 virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
673 * This callback will either be called synchronously from add_memory() or
674 * asynchronously (e.g., triggered via user space). We have to be careful
675 * with locking when calling add_memory().
677 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
678 unsigned long action, void *arg)
680 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
682 struct memory_notify *mhp = arg;
683 const unsigned long start = PFN_PHYS(mhp->start_pfn);
684 const unsigned long size = PFN_PHYS(mhp->nr_pages);
685 const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
688 if (!virtio_mem_overlaps_range(vm, start, size))
692 * Memory is onlined/offlined in memory block granularity. We cannot
693 * cross virtio-mem device boundaries and memory block boundaries. Bail
694 * out if this ever changes.
696 if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
697 !IS_ALIGNED(start, memory_block_size_bytes())))
701 * Avoid circular locking lockdep warnings. We lock the mutex
702 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
703 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
704 * between both notifier calls and will bail out. False positive.
709 case MEM_GOING_OFFLINE:
710 mutex_lock(&vm->hotplug_mutex);
712 rc = notifier_from_errno(-EBUSY);
713 mutex_unlock(&vm->hotplug_mutex);
716 vm->hotplug_active = true;
717 virtio_mem_notify_going_offline(vm, mb_id);
719 case MEM_GOING_ONLINE:
720 mutex_lock(&vm->hotplug_mutex);
722 rc = notifier_from_errno(-EBUSY);
723 mutex_unlock(&vm->hotplug_mutex);
726 vm->hotplug_active = true;
727 rc = virtio_mem_notify_going_online(vm, mb_id);
730 virtio_mem_notify_offline(vm, mb_id);
732 atomic64_add(size, &vm->offline_size);
734 * Trigger the workqueue. Now that we have some offline memory,
735 * maybe we can handle pending unplug requests.
738 virtio_mem_retry(vm);
740 vm->hotplug_active = false;
741 mutex_unlock(&vm->hotplug_mutex);
744 virtio_mem_notify_online(vm, mb_id);
746 atomic64_sub(size, &vm->offline_size);
748 * Start adding more memory once we onlined half of our
749 * threshold. Don't trigger if it's possibly due to our actipn
750 * (e.g., us adding memory which gets onlined immediately from
753 if (!atomic_read(&vm->wq_active) &&
754 virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
755 virtio_mem_retry(vm);
757 vm->hotplug_active = false;
758 mutex_unlock(&vm->hotplug_mutex);
760 case MEM_CANCEL_OFFLINE:
761 if (!vm->hotplug_active)
763 virtio_mem_notify_cancel_offline(vm, mb_id);
764 vm->hotplug_active = false;
765 mutex_unlock(&vm->hotplug_mutex);
767 case MEM_CANCEL_ONLINE:
768 if (!vm->hotplug_active)
770 vm->hotplug_active = false;
771 mutex_unlock(&vm->hotplug_mutex);
783 * Set a range of pages PG_offline. Remember pages that were never onlined
784 * (via generic_online_page()) using PageDirty().
786 static void virtio_mem_set_fake_offline(unsigned long pfn,
787 unsigned long nr_pages, bool onlined)
789 for (; nr_pages--; pfn++) {
790 struct page *page = pfn_to_page(pfn);
792 __SetPageOffline(page);
795 /* FIXME: remove after cleanups */
796 ClearPageReserved(page);
802 * Clear PG_offline from a range of pages. If the pages were never onlined,
803 * (via generic_online_page()), clear PageDirty().
805 static void virtio_mem_clear_fake_offline(unsigned long pfn,
806 unsigned long nr_pages, bool onlined)
808 for (; nr_pages--; pfn++) {
809 struct page *page = pfn_to_page(pfn);
811 __ClearPageOffline(page);
813 ClearPageDirty(page);
818 * Release a range of fake-offline pages to the buddy, effectively
819 * fake-onlining them.
821 static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
823 const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
827 * We are always called at least with MAX_ORDER_NR_PAGES
828 * granularity/alignment (e.g., the way subblocks work). All pages
829 * inside such a block are alike.
831 for (i = 0; i < nr_pages; i += max_nr_pages) {
832 struct page *page = pfn_to_page(pfn + i);
835 * If the page is PageDirty(), it was kept fake-offline when
836 * onlining the memory block. Otherwise, it was allocated
837 * using alloc_contig_range(). All pages in a subblock are
840 if (PageDirty(page)) {
841 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
843 generic_online_page(page, MAX_ORDER - 1);
845 virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
847 free_contig_range(pfn + i, max_nr_pages);
848 adjust_managed_page_count(page, max_nr_pages);
854 * Try to allocate a range, marking pages fake-offline, effectively
855 * fake-offlining them.
857 static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
859 const bool is_movable = zone_idx(page_zone(pfn_to_page(pfn))) ==
864 * TODO: We want an alloc_contig_range() mode that tries to allocate
865 * harder (e.g., dealing with temporarily pinned pages, PCP), especially
866 * with ZONE_MOVABLE. So for now, retry a couple of times with
867 * ZONE_MOVABLE before giving up - because that zone is supposed to give
870 for (retry_count = 0; retry_count < 5; retry_count++) {
871 rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
874 /* whoops, out of memory */
876 else if (rc && !is_movable)
881 virtio_mem_set_fake_offline(pfn, nr_pages, true);
882 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
890 * Handle fake-offline pages when memory is going offline - such that the
891 * pages can be skipped by mm-core when offlining.
893 static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
894 unsigned long nr_pages)
900 * Drop our reference to the pages so the memory can get offlined
901 * and add the unplugged pages to the managed page counters (so
902 * offlining code can correctly subtract them again).
904 adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
905 /* Drop our reference to the pages so the memory can get offlined. */
906 for (i = 0; i < nr_pages; i++) {
907 page = pfn_to_page(pfn + i);
908 if (WARN_ON(!page_ref_dec_and_test(page)))
909 dump_page(page, "fake-offline page referenced");
914 * Handle fake-offline pages when memory offlining is canceled - to undo
915 * what we did in virtio_mem_fake_offline_going_offline().
917 static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
918 unsigned long nr_pages)
923 * Get the reference we dropped when going offline and subtract the
924 * unplugged pages from the managed page counters.
926 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
927 for (i = 0; i < nr_pages; i++)
928 page_ref_inc(pfn_to_page(pfn + i));
931 static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
933 const unsigned long addr = page_to_phys(page);
934 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
935 struct virtio_mem *vm;
939 * We exploit here that subblocks have at least MAX_ORDER_NR_PAGES.
940 * size/alignment and that this callback is is called with such a
941 * size/alignment. So we cannot cross subblocks and therefore
942 * also not memory blocks.
945 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
946 if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
949 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
951 * If plugged, online the pages, otherwise, set them fake
952 * offline (PageOffline).
954 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
955 generic_online_page(page, order);
957 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
964 /* not virtio-mem memory, but e.g., a DIMM. online it */
965 generic_online_page(page, order);
968 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
969 const struct virtio_mem_req *req)
971 struct scatterlist *sgs[2], sg_req, sg_resp;
975 /* don't use the request residing on the stack (vaddr) */
978 /* out: buffer for request */
979 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
982 /* in: buffer for response */
983 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
986 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
990 virtqueue_kick(vm->vq);
992 /* wait for a response */
993 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
995 return virtio16_to_cpu(vm->vdev, vm->resp.type);
998 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
1001 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1002 const struct virtio_mem_req req = {
1003 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
1004 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
1005 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1009 if (atomic_read(&vm->config_changed))
1012 dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
1015 switch (virtio_mem_send_request(vm, &req)) {
1016 case VIRTIO_MEM_RESP_ACK:
1017 vm->plugged_size += size;
1019 case VIRTIO_MEM_RESP_NACK:
1022 case VIRTIO_MEM_RESP_BUSY:
1025 case VIRTIO_MEM_RESP_ERROR:
1032 dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
1036 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
1039 const uint64_t nb_vm_blocks = size / vm->device_block_size;
1040 const struct virtio_mem_req req = {
1041 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
1042 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
1043 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
1047 if (atomic_read(&vm->config_changed))
1050 dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
1053 switch (virtio_mem_send_request(vm, &req)) {
1054 case VIRTIO_MEM_RESP_ACK:
1055 vm->plugged_size -= size;
1057 case VIRTIO_MEM_RESP_BUSY:
1060 case VIRTIO_MEM_RESP_ERROR:
1067 dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
1071 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
1073 const struct virtio_mem_req req = {
1074 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
1078 dev_dbg(&vm->vdev->dev, "unplugging all memory");
1080 switch (virtio_mem_send_request(vm, &req)) {
1081 case VIRTIO_MEM_RESP_ACK:
1082 vm->unplug_all_required = false;
1083 vm->plugged_size = 0;
1084 /* usable region might have shrunk */
1085 atomic_set(&vm->config_changed, 1);
1087 case VIRTIO_MEM_RESP_BUSY:
1094 dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
1099 * Plug selected subblocks. Updates the plugged state, but not the state
1100 * of the memory block.
1102 static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
1103 int sb_id, int count)
1105 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1106 sb_id * vm->sbm.sb_size;
1107 const uint64_t size = count * vm->sbm.sb_size;
1110 rc = virtio_mem_send_plug_request(vm, addr, size);
1112 virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
1117 * Unplug selected subblocks. Updates the plugged state, but not the state
1118 * of the memory block.
1120 static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
1121 int sb_id, int count)
1123 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
1124 sb_id * vm->sbm.sb_size;
1125 const uint64_t size = count * vm->sbm.sb_size;
1128 rc = virtio_mem_send_unplug_request(vm, addr, size);
1130 virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
1135 * Unplug the desired number of plugged subblocks of a offline or not-added
1136 * memory block. Will fail if any subblock cannot get unplugged (instead of
1139 * Will not modify the state of the memory block.
1141 * Note: can fail after some subblocks were unplugged.
1143 static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
1144 unsigned long mb_id, uint64_t *nb_sb)
1149 sb_id = vm->sbm.sbs_per_mb - 1;
1151 /* Find the next candidate subblock */
1152 while (sb_id >= 0 &&
1153 virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
1157 /* Try to unplug multiple subblocks at a time */
1159 while (count < *nb_sb && sb_id > 0 &&
1160 virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1165 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1176 * Unplug all plugged subblocks of an offline or not-added memory block.
1178 * Will not modify the state of the memory block.
1180 * Note: can fail after some subblocks were unplugged.
1182 static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
1184 uint64_t nb_sb = vm->sbm.sbs_per_mb;
1186 return virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
1190 * Prepare tracking data for the next memory block.
1192 static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
1193 unsigned long *mb_id)
1197 if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
1200 /* Resize the state array if required. */
1201 rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
1205 /* Resize the subblock bitmap if required. */
1206 rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
1210 vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
1211 *mb_id = vm->sbm.next_mb_id++;
1216 * Try to plug the desired number of subblocks and add the memory block
1219 * Will modify the state of the memory block.
1221 static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
1222 unsigned long mb_id, uint64_t *nb_sb)
1224 const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
1227 if (WARN_ON_ONCE(!count))
1231 * Plug the requested number of subblocks before adding it to linux,
1232 * so that onlining will directly online all plugged subblocks.
1234 rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
1239 * Mark the block properly offline before adding it to Linux,
1240 * so the memory notifiers will find the block in the right state.
1242 if (count == vm->sbm.sbs_per_mb)
1243 virtio_mem_sbm_set_mb_state(vm, mb_id,
1244 VIRTIO_MEM_SBM_MB_OFFLINE);
1246 virtio_mem_sbm_set_mb_state(vm, mb_id,
1247 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1249 /* Add the memory block to linux - if that fails, try to unplug. */
1250 rc = virtio_mem_mb_add(vm, mb_id);
1252 int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
1254 dev_err(&vm->vdev->dev,
1255 "adding memory block %lu failed with %d\n", mb_id, rc);
1258 * TODO: Linux MM does not properly clean up yet in all cases
1259 * where adding of memory failed - especially on -ENOMEM.
1261 if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
1262 new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
1263 virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
1272 * Try to plug the desired number of subblocks of a memory block that
1273 * is already added to Linux.
1275 * Will modify the state of the memory block.
1277 * Note: Can fail after some subblocks were successfully plugged.
1279 static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
1280 unsigned long mb_id, uint64_t *nb_sb,
1283 unsigned long pfn, nr_pages;
1287 if (WARN_ON_ONCE(!*nb_sb))
1291 sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
1292 if (sb_id >= vm->sbm.sbs_per_mb)
1295 while (count < *nb_sb &&
1296 sb_id + count < vm->sbm.sbs_per_mb &&
1297 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
1300 rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
1307 /* fake-online the pages if the memory block is online */
1308 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1309 sb_id * vm->sbm.sb_size);
1310 nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
1311 virtio_mem_fake_online(pfn, nr_pages);
1314 if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1316 virtio_mem_sbm_set_mb_state(vm, mb_id,
1317 VIRTIO_MEM_SBM_MB_ONLINE);
1319 virtio_mem_sbm_set_mb_state(vm, mb_id,
1320 VIRTIO_MEM_SBM_MB_OFFLINE);
1327 * Try to plug the requested amount of memory.
1329 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1331 uint64_t nb_sb = diff / vm->sbm.sb_size;
1332 unsigned long mb_id;
1338 /* Don't race with onlining/offlining */
1339 mutex_lock(&vm->hotplug_mutex);
1341 /* Try to plug subblocks of partially plugged online blocks. */
1342 virtio_mem_sbm_for_each_mb(vm, mb_id,
1343 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) {
1344 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, true);
1350 /* Try to plug subblocks of partially plugged offline blocks. */
1351 virtio_mem_sbm_for_each_mb(vm, mb_id,
1352 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
1353 rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, false);
1360 * We won't be working on online/offline memory blocks from this point,
1361 * so we can't race with memory onlining/offlining. Drop the mutex.
1363 mutex_unlock(&vm->hotplug_mutex);
1365 /* Try to plug and add unused blocks */
1366 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
1367 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1370 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1376 /* Try to prepare, plug and add new blocks */
1378 if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
1381 rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
1384 rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
1392 mutex_unlock(&vm->hotplug_mutex);
1397 * Unplug the desired number of plugged subblocks of an offline memory block.
1398 * Will fail if any subblock cannot get unplugged (instead of skipping it).
1400 * Will modify the state of the memory block. Might temporarily drop the
1403 * Note: Can fail after some subblocks were successfully unplugged.
1405 static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
1406 unsigned long mb_id,
1411 rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, nb_sb);
1413 /* some subblocks might have been unplugged even on failure */
1414 if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
1415 virtio_mem_sbm_set_mb_state(vm, mb_id,
1416 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
1420 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1422 * Remove the block from Linux - this should never fail.
1423 * Hinder the block from getting onlined by marking it
1424 * unplugged. Temporarily drop the mutex, so
1425 * any pending GOING_ONLINE requests can be serviced/rejected.
1427 virtio_mem_sbm_set_mb_state(vm, mb_id,
1428 VIRTIO_MEM_SBM_MB_UNUSED);
1430 mutex_unlock(&vm->hotplug_mutex);
1431 rc = virtio_mem_mb_remove(vm, mb_id);
1433 mutex_lock(&vm->hotplug_mutex);
1439 * Unplug the given plugged subblocks of an online memory block.
1441 * Will modify the state of the memory block.
1443 static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
1444 unsigned long mb_id, int sb_id,
1447 const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
1448 unsigned long start_pfn;
1451 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1452 sb_id * vm->sbm.sb_size);
1454 rc = virtio_mem_fake_offline(start_pfn, nr_pages);
1458 /* Try to unplug the allocated memory */
1459 rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
1461 /* Return the memory to the buddy. */
1462 virtio_mem_fake_online(start_pfn, nr_pages);
1466 virtio_mem_sbm_set_mb_state(vm, mb_id,
1467 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL);
1472 * Unplug the desired number of plugged subblocks of an online memory block.
1473 * Will skip subblock that are busy.
1475 * Will modify the state of the memory block. Might temporarily drop the
1478 * Note: Can fail after some subblocks were successfully unplugged. Can
1479 * return 0 even if subblocks were busy and could not get unplugged.
1481 static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
1482 unsigned long mb_id,
1487 /* If possible, try to unplug the complete block in one shot. */
1488 if (*nb_sb >= vm->sbm.sbs_per_mb &&
1489 virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1490 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
1491 vm->sbm.sbs_per_mb);
1493 *nb_sb -= vm->sbm.sbs_per_mb;
1495 } else if (rc != -EBUSY)
1499 /* Fallback to single subblocks. */
1500 for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1501 /* Find the next candidate subblock */
1502 while (sb_id >= 0 &&
1503 !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
1508 rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
1518 * Once all subblocks of a memory block were unplugged, offline and
1519 * remove it. This will usually not fail, as no memory is in use
1520 * anymore - however some other notifiers might NACK the request.
1522 if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
1523 mutex_unlock(&vm->hotplug_mutex);
1524 rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
1525 mutex_lock(&vm->hotplug_mutex);
1527 virtio_mem_sbm_set_mb_state(vm, mb_id,
1528 VIRTIO_MEM_SBM_MB_UNUSED);
1535 * Try to unplug the requested amount of memory.
1537 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
1539 uint64_t nb_sb = diff / vm->sbm.sb_size;
1540 unsigned long mb_id;
1547 * We'll drop the mutex a couple of times when it is safe to do so.
1548 * This might result in some blocks switching the state (online/offline)
1549 * and we could miss them in this run - we will retry again later.
1551 mutex_lock(&vm->hotplug_mutex);
1553 /* Try to unplug subblocks of partially plugged offline blocks. */
1554 virtio_mem_sbm_for_each_mb_rev(vm, mb_id,
1555 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
1556 rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb);
1562 /* Try to unplug subblocks of plugged offline blocks. */
1563 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_OFFLINE) {
1564 rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb);
1570 if (!unplug_online) {
1571 mutex_unlock(&vm->hotplug_mutex);
1575 /* Try to unplug subblocks of partially plugged online blocks. */
1576 virtio_mem_sbm_for_each_mb_rev(vm, mb_id,
1577 VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) {
1578 rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb);
1581 mutex_unlock(&vm->hotplug_mutex);
1583 mutex_lock(&vm->hotplug_mutex);
1586 /* Try to unplug subblocks of plugged online blocks. */
1587 virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_ONLINE) {
1588 rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb);
1591 mutex_unlock(&vm->hotplug_mutex);
1593 mutex_lock(&vm->hotplug_mutex);
1596 mutex_unlock(&vm->hotplug_mutex);
1597 return nb_sb ? -EBUSY : 0;
1599 mutex_unlock(&vm->hotplug_mutex);
1604 * Try to unplug all blocks that couldn't be unplugged before, for example,
1605 * because the hypervisor was busy.
1607 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
1609 unsigned long mb_id;
1612 virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_PLUGGED) {
1613 rc = virtio_mem_sbm_unplug_mb(vm, mb_id);
1616 virtio_mem_sbm_set_mb_state(vm, mb_id,
1617 VIRTIO_MEM_SBM_MB_UNUSED);
1624 * Update all parts of the config that could have changed.
1626 static void virtio_mem_refresh_config(struct virtio_mem *vm)
1628 const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1629 uint64_t new_plugged_size, usable_region_size, end_addr;
1631 /* the plugged_size is just a reflection of what _we_ did previously */
1632 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1634 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
1635 vm->plugged_size = new_plugged_size;
1637 /* calculate the last usable memory block id */
1638 virtio_cread_le(vm->vdev, struct virtio_mem_config,
1639 usable_region_size, &usable_region_size);
1640 end_addr = vm->addr + usable_region_size;
1641 end_addr = min(end_addr, phys_limit);
1642 vm->sbm.last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
1644 /* see if there is a request to change the size */
1645 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
1646 &vm->requested_size);
1648 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
1649 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
1653 * Workqueue function for handling plug/unplug requests and config updates.
1655 static void virtio_mem_run_wq(struct work_struct *work)
1657 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
1661 hrtimer_cancel(&vm->retry_timer);
1666 atomic_set(&vm->wq_active, 1);
1670 /* Make sure we start with a clean state if there are leftovers. */
1671 if (unlikely(vm->unplug_all_required))
1672 rc = virtio_mem_send_unplug_all_request(vm);
1674 if (atomic_read(&vm->config_changed)) {
1675 atomic_set(&vm->config_changed, 0);
1676 virtio_mem_refresh_config(vm);
1679 /* Unplug any leftovers from previous runs */
1681 rc = virtio_mem_unplug_pending_mb(vm);
1683 if (!rc && vm->requested_size != vm->plugged_size) {
1684 if (vm->requested_size > vm->plugged_size) {
1685 diff = vm->requested_size - vm->plugged_size;
1686 rc = virtio_mem_plug_request(vm, diff);
1688 diff = vm->plugged_size - vm->requested_size;
1689 rc = virtio_mem_unplug_request(vm, diff);
1695 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1699 * We cannot add any more memory (alignment, physical limit)
1700 * or we have too many offline memory blocks.
1705 * The hypervisor cannot process our request right now
1706 * (e.g., out of memory, migrating);
1710 * We cannot free up any memory to unplug it (all plugged memory
1714 /* Out of memory, try again later. */
1715 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
1719 /* Retry immediately (e.g., the config changed). */
1722 /* Unknown error, mark as broken */
1723 dev_err(&vm->vdev->dev,
1724 "unknown error, marking device broken: %d\n", rc);
1728 atomic_set(&vm->wq_active, 0);
1731 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
1733 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
1736 virtio_mem_retry(vm);
1737 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
1738 VIRTIO_MEM_RETRY_TIMER_MAX_MS);
1739 return HRTIMER_NORESTART;
1742 static void virtio_mem_handle_response(struct virtqueue *vq)
1744 struct virtio_mem *vm = vq->vdev->priv;
1746 wake_up(&vm->host_resp);
1749 static int virtio_mem_init_vq(struct virtio_mem *vm)
1751 struct virtqueue *vq;
1753 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
1762 static int virtio_mem_init(struct virtio_mem *vm)
1764 const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1767 if (!vm->vdev->config->get) {
1768 dev_err(&vm->vdev->dev, "config access disabled\n");
1773 * We don't want to (un)plug or reuse any memory when in kdump. The
1774 * memory is still accessible (but not mapped).
1776 if (is_kdump_kernel()) {
1777 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
1781 /* Fetch all properties that can't change. */
1782 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1784 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
1785 &vm->device_block_size);
1786 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
1788 vm->nid = virtio_mem_translate_node_id(vm, node_id);
1789 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
1790 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
1793 /* Determine the nid for the device based on the lowest address. */
1794 if (vm->nid == NUMA_NO_NODE)
1795 vm->nid = memory_add_physaddr_to_nid(vm->addr);
1798 * We always hotplug memory in memory block granularity. This way,
1799 * we have to wait for exactly one memory block to online.
1801 if (vm->device_block_size > memory_block_size_bytes()) {
1802 dev_err(&vm->vdev->dev,
1803 "The block size is not supported (too big).\n");
1807 /* bad device setup - warn only */
1808 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
1809 dev_warn(&vm->vdev->dev,
1810 "The alignment of the physical start address can make some memory unusable.\n");
1811 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
1812 dev_warn(&vm->vdev->dev,
1813 "The alignment of the physical end address can make some memory unusable.\n");
1814 if (vm->addr + vm->region_size > phys_limit)
1815 dev_warn(&vm->vdev->dev,
1816 "Some memory is not addressable. This can make some memory unusable.\n");
1819 * We want subblocks to span at least MAX_ORDER_NR_PAGES and
1820 * pageblock_nr_pages pages. This:
1821 * - Simplifies our page onlining code (virtio_mem_online_page_cb)
1822 * and fake page onlining code (virtio_mem_fake_online).
1823 * - Is required for now for alloc_contig_range() to work reliably -
1824 * it doesn't properly handle smaller granularity on ZONE_NORMAL.
1826 vm->sbm.sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES,
1827 pageblock_nr_pages) * PAGE_SIZE;
1828 vm->sbm.sb_size = max_t(uint64_t, vm->device_block_size,
1830 vm->sbm.sbs_per_mb = memory_block_size_bytes() / vm->sbm.sb_size;
1832 /* Round up to the next full memory block */
1833 vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
1834 memory_block_size_bytes());
1835 vm->sbm.next_mb_id = vm->sbm.first_mb_id;
1837 /* Prepare the offline threshold - make sure we can add two blocks. */
1838 vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
1839 VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
1841 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
1842 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
1843 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
1844 (unsigned long long)vm->device_block_size);
1845 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
1846 memory_block_size_bytes());
1847 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
1848 (unsigned long long)vm->sbm.sb_size);
1849 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
1850 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
1855 static int virtio_mem_create_resource(struct virtio_mem *vm)
1858 * When force-unloading the driver and removing the device, we
1859 * could have a garbage pointer. Duplicate the string.
1861 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
1866 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
1867 name, IORESOURCE_SYSTEM_RAM);
1868 if (!vm->parent_resource) {
1870 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
1871 dev_info(&vm->vdev->dev,
1872 "reloading the driver is not supported\n");
1876 /* The memory is not actually busy - make add_memory() work. */
1877 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
1881 static void virtio_mem_delete_resource(struct virtio_mem *vm)
1885 if (!vm->parent_resource)
1888 name = vm->parent_resource->name;
1889 release_resource(vm->parent_resource);
1890 kfree(vm->parent_resource);
1892 vm->parent_resource = NULL;
1895 static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
1900 static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
1902 const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
1904 return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
1905 vm->addr + vm->region_size, NULL,
1906 virtio_mem_range_has_system_ram) == 1;
1909 static int virtio_mem_probe(struct virtio_device *vdev)
1911 struct virtio_mem *vm;
1914 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
1915 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
1917 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1921 init_waitqueue_head(&vm->host_resp);
1923 INIT_WORK(&vm->wq, virtio_mem_run_wq);
1924 mutex_init(&vm->hotplug_mutex);
1925 INIT_LIST_HEAD(&vm->next);
1926 spin_lock_init(&vm->removal_lock);
1927 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1928 vm->retry_timer.function = virtio_mem_timer_expired;
1929 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1931 /* register the virtqueue */
1932 rc = virtio_mem_init_vq(vm);
1936 /* initialize the device by querying the config */
1937 rc = virtio_mem_init(vm);
1941 /* create the parent resource for all memory */
1942 rc = virtio_mem_create_resource(vm);
1947 * If we still have memory plugged, we have to unplug all memory first.
1948 * Registering our parent resource makes sure that this memory isn't
1949 * actually in use (e.g., trying to reload the driver).
1951 if (vm->plugged_size) {
1952 vm->unplug_all_required = 1;
1953 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
1956 /* register callbacks */
1957 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
1958 rc = register_memory_notifier(&vm->memory_notifier);
1960 goto out_del_resource;
1961 rc = register_virtio_mem_device(vm);
1965 virtio_device_ready(vdev);
1967 /* trigger a config update to start processing the requested_size */
1968 atomic_set(&vm->config_changed, 1);
1969 queue_work(system_freezable_wq, &vm->wq);
1973 unregister_memory_notifier(&vm->memory_notifier);
1975 virtio_mem_delete_resource(vm);
1977 vdev->config->del_vqs(vdev);
1985 static void virtio_mem_remove(struct virtio_device *vdev)
1987 struct virtio_mem *vm = vdev->priv;
1988 unsigned long mb_id;
1992 * Make sure the workqueue won't be triggered anymore and no memory
1993 * blocks can be onlined/offlined until we're finished here.
1995 mutex_lock(&vm->hotplug_mutex);
1996 spin_lock_irq(&vm->removal_lock);
1997 vm->removing = true;
1998 spin_unlock_irq(&vm->removal_lock);
1999 mutex_unlock(&vm->hotplug_mutex);
2001 /* wait until the workqueue stopped */
2002 cancel_work_sync(&vm->wq);
2003 hrtimer_cancel(&vm->retry_timer);
2006 * After we unregistered our callbacks, user space can online partially
2007 * plugged offline blocks. Make sure to remove them.
2009 virtio_mem_sbm_for_each_mb(vm, mb_id,
2010 VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
2011 rc = virtio_mem_mb_remove(vm, mb_id);
2013 virtio_mem_sbm_set_mb_state(vm, mb_id,
2014 VIRTIO_MEM_SBM_MB_UNUSED);
2017 * After we unregistered our callbacks, user space can no longer
2018 * offline partially plugged online memory blocks. No need to worry
2022 /* unregister callbacks */
2023 unregister_virtio_mem_device(vm);
2024 unregister_memory_notifier(&vm->memory_notifier);
2027 * There is no way we could reliably remove all memory we have added to
2028 * the system. And there is no way to stop the driver/device from going
2029 * away. Warn at least.
2031 if (virtio_mem_has_memory_added(vm)) {
2032 dev_warn(&vdev->dev, "device still has system memory added\n");
2034 virtio_mem_delete_resource(vm);
2035 kfree_const(vm->resource_name);
2038 /* remove all tracking data - no locking needed */
2039 vfree(vm->sbm.mb_states);
2040 vfree(vm->sbm.sb_states);
2042 /* reset the device and cleanup the queues */
2043 vdev->config->reset(vdev);
2044 vdev->config->del_vqs(vdev);
2050 static void virtio_mem_config_changed(struct virtio_device *vdev)
2052 struct virtio_mem *vm = vdev->priv;
2054 atomic_set(&vm->config_changed, 1);
2055 virtio_mem_retry(vm);
2058 #ifdef CONFIG_PM_SLEEP
2059 static int virtio_mem_freeze(struct virtio_device *vdev)
2062 * When restarting the VM, all memory is usually unplugged. Don't
2063 * allow to suspend/hibernate.
2065 dev_err(&vdev->dev, "save/restore not supported.\n");
2069 static int virtio_mem_restore(struct virtio_device *vdev)
2075 static unsigned int virtio_mem_features[] = {
2076 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
2077 VIRTIO_MEM_F_ACPI_PXM,
2081 static const struct virtio_device_id virtio_mem_id_table[] = {
2082 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
2086 static struct virtio_driver virtio_mem_driver = {
2087 .feature_table = virtio_mem_features,
2088 .feature_table_size = ARRAY_SIZE(virtio_mem_features),
2089 .driver.name = KBUILD_MODNAME,
2090 .driver.owner = THIS_MODULE,
2091 .id_table = virtio_mem_id_table,
2092 .probe = virtio_mem_probe,
2093 .remove = virtio_mem_remove,
2094 .config_changed = virtio_mem_config_changed,
2095 #ifdef CONFIG_PM_SLEEP
2096 .freeze = virtio_mem_freeze,
2097 .restore = virtio_mem_restore,
2101 module_virtio_driver(virtio_mem_driver);
2102 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
2103 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
2104 MODULE_DESCRIPTION("Virtio-mem driver");
2105 MODULE_LICENSE("GPL");