1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio-mem device driver.
5 * Copyright Red Hat, Inc. 2020
7 * Author(s): David Hildenbrand <david@redhat.com>
10 #include <linux/virtio.h>
11 #include <linux/virtio_mem.h>
12 #include <linux/workqueue.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
16 #include <linux/memory_hotplug.h>
17 #include <linux/memory.h>
18 #include <linux/hrtimer.h>
19 #include <linux/crash_dump.h>
20 #include <linux/mutex.h>
21 #include <linux/bitmap.h>
22 #include <linux/lockdep.h>
24 #include <acpi/acpi_numa.h>
26 static bool unplug_online = true;
27 module_param(unplug_online, bool, 0644);
28 MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
30 enum virtio_mem_mb_state {
31 /* Unplugged, not added to Linux. Can be reused later. */
32 VIRTIO_MEM_MB_STATE_UNUSED = 0,
33 /* (Partially) plugged, not added to Linux. Error on add_memory(). */
34 VIRTIO_MEM_MB_STATE_PLUGGED,
35 /* Fully plugged, fully added to Linux, offline. */
36 VIRTIO_MEM_MB_STATE_OFFLINE,
37 /* Partially plugged, fully added to Linux, offline. */
38 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
39 /* Fully plugged, fully added to Linux, online. */
40 VIRTIO_MEM_MB_STATE_ONLINE,
41 /* Partially plugged, fully added to Linux, online. */
42 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
43 VIRTIO_MEM_MB_STATE_COUNT
47 struct virtio_device *vdev;
49 /* We might first have to unplug all memory when starting up. */
50 bool unplug_all_required;
52 /* Workqueue that processes the plug/unplug requests. */
53 struct work_struct wq;
54 atomic_t config_changed;
56 /* Virtqueue for guest->host requests. */
59 /* Wait for a host response to a guest request. */
60 wait_queue_head_t host_resp;
62 /* Space for one guest request and the host response. */
63 struct virtio_mem_req req;
64 struct virtio_mem_resp resp;
66 /* The current size of the device. */
67 uint64_t plugged_size;
68 /* The requested size of the device. */
69 uint64_t requested_size;
71 /* The device block size (for communicating with the device). */
72 uint64_t device_block_size;
73 /* The determined node id for all memory of the device. */
75 /* Physical start address of the memory region. */
77 /* Maximum region size in bytes. */
80 /* The subblock size. */
81 uint64_t subblock_size;
82 /* The number of subblocks per memory block. */
83 uint32_t nb_sb_per_mb;
85 /* Id of the first memory block of this device. */
86 unsigned long first_mb_id;
87 /* Id of the last memory block of this device. */
88 unsigned long last_mb_id;
89 /* Id of the last usable memory block of this device. */
90 unsigned long last_usable_mb_id;
91 /* Id of the next memory bock to prepare when needed. */
92 unsigned long next_mb_id;
94 /* The parent resource for all memory added via this device. */
95 struct resource *parent_resource;
97 * Copy of "System RAM (virtio_mem)" to be used for
98 * add_memory_driver_managed().
100 const char *resource_name;
102 /* Summary of all memory block states. */
103 unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
104 #define VIRTIO_MEM_NB_OFFLINE_THRESHOLD 10
107 * One byte state per memory block.
109 * Allocated via vmalloc(). When preparing new blocks, resized
110 * (alloc+copy+free) when needed (crossing pages with the next mb).
111 * (when crossing pages).
113 * With 128MB memory blocks, we have states for 512GB of memory in one
119 * $nb_sb_per_mb bit per memory block. Handled similar to mb_state.
121 * With 4MB subblocks, we manage 128GB of memory in one page.
123 unsigned long *sb_bitmap;
126 * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap.
128 * When this lock is held the pointers can't change, ONLINE and
129 * OFFLINE blocks can't change the state and no subblocks will get
132 struct mutex hotplug_mutex;
135 /* An error occurred we cannot handle - stop processing requests. */
138 /* The driver is being removed. */
139 spinlock_t removal_lock;
142 /* Timer for retrying to plug/unplug memory. */
143 struct hrtimer retry_timer;
144 unsigned int retry_timer_ms;
145 #define VIRTIO_MEM_RETRY_TIMER_MIN_MS 50000
146 #define VIRTIO_MEM_RETRY_TIMER_MAX_MS 300000
148 /* Memory notifier (online/offline events). */
149 struct notifier_block memory_notifier;
151 /* Next device in the list of virtio-mem devices. */
152 struct list_head next;
156 * We have to share a single online_page callback among all virtio-mem
157 * devices. We use RCU to iterate the list in the callback.
159 static DEFINE_MUTEX(virtio_mem_mutex);
160 static LIST_HEAD(virtio_mem_devices);
162 static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
165 * Register a virtio-mem device so it will be considered for the online_page
168 static int register_virtio_mem_device(struct virtio_mem *vm)
172 /* First device registers the callback. */
173 mutex_lock(&virtio_mem_mutex);
174 if (list_empty(&virtio_mem_devices))
175 rc = set_online_page_callback(&virtio_mem_online_page_cb);
177 list_add_rcu(&vm->next, &virtio_mem_devices);
178 mutex_unlock(&virtio_mem_mutex);
184 * Unregister a virtio-mem device so it will no longer be considered for the
185 * online_page callback.
187 static void unregister_virtio_mem_device(struct virtio_mem *vm)
189 /* Last device unregisters the callback. */
190 mutex_lock(&virtio_mem_mutex);
191 list_del_rcu(&vm->next);
192 if (list_empty(&virtio_mem_devices))
193 restore_online_page_callback(&virtio_mem_online_page_cb);
194 mutex_unlock(&virtio_mem_mutex);
200 * Calculate the memory block id of a given address.
202 static unsigned long virtio_mem_phys_to_mb_id(unsigned long addr)
204 return addr / memory_block_size_bytes();
208 * Calculate the physical start address of a given memory block id.
210 static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
212 return mb_id * memory_block_size_bytes();
216 * Calculate the subblock id of a given address.
218 static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
221 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
222 const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
224 return (addr - mb_addr) / vm->subblock_size;
228 * Set the state of a memory block, taking care of the state counter.
230 static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
231 enum virtio_mem_mb_state state)
233 const unsigned long idx = mb_id - vm->first_mb_id;
234 enum virtio_mem_mb_state old_state;
236 old_state = vm->mb_state[idx];
237 vm->mb_state[idx] = state;
239 BUG_ON(vm->nb_mb_state[old_state] == 0);
240 vm->nb_mb_state[old_state]--;
241 vm->nb_mb_state[state]++;
245 * Get the state of a memory block.
247 static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
250 const unsigned long idx = mb_id - vm->first_mb_id;
252 return vm->mb_state[idx];
256 * Prepare the state array for the next memory block.
258 static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
260 unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
261 unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
262 int old_pages = PFN_UP(old_bytes);
263 int new_pages = PFN_UP(new_bytes);
264 uint8_t *new_mb_state;
266 if (vm->mb_state && old_pages == new_pages)
269 new_mb_state = vzalloc(new_pages * PAGE_SIZE);
273 mutex_lock(&vm->hotplug_mutex);
275 memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
277 vm->mb_state = new_mb_state;
278 mutex_unlock(&vm->hotplug_mutex);
283 #define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \
284 for (_mb_id = _vm->first_mb_id; \
285 _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \
287 if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
289 #define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \
290 for (_mb_id = _vm->next_mb_id - 1; \
291 _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \
293 if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
296 * Mark all selected subblocks plugged.
298 * Will not modify the state of the memory block.
300 static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
301 unsigned long mb_id, int sb_id,
304 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
306 __bitmap_set(vm->sb_bitmap, bit, count);
310 * Mark all selected subblocks unplugged.
312 * Will not modify the state of the memory block.
314 static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
315 unsigned long mb_id, int sb_id,
318 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
320 __bitmap_clear(vm->sb_bitmap, bit, count);
324 * Test if all selected subblocks are plugged.
326 static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
327 unsigned long mb_id, int sb_id,
330 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
333 return test_bit(bit, vm->sb_bitmap);
335 /* TODO: Helper similar to bitmap_set() */
336 return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
341 * Test if all selected subblocks are unplugged.
343 static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
344 unsigned long mb_id, int sb_id,
347 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
349 /* TODO: Helper similar to bitmap_set() */
350 return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
354 * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
357 static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
360 const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
362 return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
367 * Prepare the subblock bitmap for the next memory block.
369 static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
371 const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
372 const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
373 const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
374 int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
375 int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
376 unsigned long *new_sb_bitmap, *old_sb_bitmap;
378 if (vm->sb_bitmap && old_pages == new_pages)
381 new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
385 mutex_lock(&vm->hotplug_mutex);
387 memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
389 old_sb_bitmap = vm->sb_bitmap;
390 vm->sb_bitmap = new_sb_bitmap;
391 mutex_unlock(&vm->hotplug_mutex);
393 vfree(old_sb_bitmap);
398 * Try to add a memory block to Linux. This will usually only fail
401 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
404 * Will not modify the state of the memory block.
406 static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
408 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
411 * When force-unloading the driver and we still have memory added to
412 * Linux, the resource name has to stay.
414 if (!vm->resource_name) {
415 vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
417 if (!vm->resource_name)
421 dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
422 return add_memory_driver_managed(vm->nid, addr,
423 memory_block_size_bytes(),
425 MEMHP_MERGE_RESOURCE);
429 * Try to remove a memory block from Linux. Will only fail if the memory block
432 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
435 * Will not modify the state of the memory block.
437 static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
439 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
441 dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
442 return remove_memory(vm->nid, addr, memory_block_size_bytes());
446 * Try to offline and remove a memory block from Linux.
448 * Must not be called with the vm->hotplug_mutex held (possible deadlock with
451 * Will not modify the state of the memory block.
453 static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
456 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
458 dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
460 return offline_and_remove_memory(vm->nid, addr,
461 memory_block_size_bytes());
465 * Trigger the workqueue so the device can perform its magic.
467 static void virtio_mem_retry(struct virtio_mem *vm)
471 spin_lock_irqsave(&vm->removal_lock, flags);
473 queue_work(system_freezable_wq, &vm->wq);
474 spin_unlock_irqrestore(&vm->removal_lock, flags);
477 static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
479 int node = NUMA_NO_NODE;
481 #if defined(CONFIG_ACPI_NUMA)
482 if (virtio_has_feature(vm->vdev, VIRTIO_MEM_F_ACPI_PXM))
483 node = pxm_to_node(node_id);
489 * Test if a virtio-mem device overlaps with the given range. Can be called
490 * from (notifier) callbacks lockless.
492 static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
493 unsigned long start, unsigned long size)
495 unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
496 unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
497 memory_block_size_bytes();
499 return start < dev_end && dev_start < start + size;
503 * Test if a virtio-mem device owns a memory block. Can be called from
504 * (notifier) callbacks lockless.
506 static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
508 return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
511 static int virtio_mem_notify_going_online(struct virtio_mem *vm,
514 switch (virtio_mem_mb_get_state(vm, mb_id)) {
515 case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
516 case VIRTIO_MEM_MB_STATE_OFFLINE:
521 dev_warn_ratelimited(&vm->vdev->dev,
522 "memory block onlining denied\n");
526 static void virtio_mem_notify_offline(struct virtio_mem *vm,
529 switch (virtio_mem_mb_get_state(vm, mb_id)) {
530 case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL:
531 virtio_mem_mb_set_state(vm, mb_id,
532 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
534 case VIRTIO_MEM_MB_STATE_ONLINE:
535 virtio_mem_mb_set_state(vm, mb_id,
536 VIRTIO_MEM_MB_STATE_OFFLINE);
544 * Trigger the workqueue, maybe we can now unplug memory. Also,
545 * when we offline and remove a memory block, this will re-trigger
546 * us immediately - which is often nice because the removal of
547 * the memory block (e.g., memmap) might have freed up memory
548 * on other memory blocks we manage.
550 virtio_mem_retry(vm);
553 static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id)
555 unsigned long nb_offline;
557 switch (virtio_mem_mb_get_state(vm, mb_id)) {
558 case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
559 virtio_mem_mb_set_state(vm, mb_id,
560 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
562 case VIRTIO_MEM_MB_STATE_OFFLINE:
563 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_ONLINE);
569 nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
570 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
572 /* see if we can add new blocks now that we onlined one block */
573 if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1)
574 virtio_mem_retry(vm);
577 static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
580 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
585 for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
586 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
589 * Drop our reference to the pages so the memory can get
590 * offlined and add the unplugged pages to the managed
591 * page counters (so offlining code can correctly subtract
594 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
595 sb_id * vm->subblock_size);
596 adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
597 for (i = 0; i < nr_pages; i++) {
598 page = pfn_to_page(pfn + i);
599 if (WARN_ON(!page_ref_dec_and_test(page)))
600 dump_page(page, "unplugged page referenced");
605 static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
608 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
612 for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
613 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
616 * Get the reference we dropped when going offline and
617 * subtract the unplugged pages from the managed page
620 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
621 sb_id * vm->subblock_size);
622 adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
623 for (i = 0; i < nr_pages; i++)
624 page_ref_inc(pfn_to_page(pfn + i));
629 * This callback will either be called synchronously from add_memory() or
630 * asynchronously (e.g., triggered via user space). We have to be careful
631 * with locking when calling add_memory().
633 static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
634 unsigned long action, void *arg)
636 struct virtio_mem *vm = container_of(nb, struct virtio_mem,
638 struct memory_notify *mhp = arg;
639 const unsigned long start = PFN_PHYS(mhp->start_pfn);
640 const unsigned long size = PFN_PHYS(mhp->nr_pages);
641 const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
644 if (!virtio_mem_overlaps_range(vm, start, size))
648 * Memory is onlined/offlined in memory block granularity. We cannot
649 * cross virtio-mem device boundaries and memory block boundaries. Bail
650 * out if this ever changes.
652 if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
653 !IS_ALIGNED(start, memory_block_size_bytes())))
657 * Avoid circular locking lockdep warnings. We lock the mutex
658 * e.g., in MEM_GOING_ONLINE and unlock it in MEM_ONLINE. The
659 * blocking_notifier_call_chain() has it's own lock, which gets unlocked
660 * between both notifier calls and will bail out. False positive.
665 case MEM_GOING_OFFLINE:
666 mutex_lock(&vm->hotplug_mutex);
668 rc = notifier_from_errno(-EBUSY);
669 mutex_unlock(&vm->hotplug_mutex);
672 vm->hotplug_active = true;
673 virtio_mem_notify_going_offline(vm, mb_id);
675 case MEM_GOING_ONLINE:
676 mutex_lock(&vm->hotplug_mutex);
678 rc = notifier_from_errno(-EBUSY);
679 mutex_unlock(&vm->hotplug_mutex);
682 vm->hotplug_active = true;
683 rc = virtio_mem_notify_going_online(vm, mb_id);
686 virtio_mem_notify_offline(vm, mb_id);
687 vm->hotplug_active = false;
688 mutex_unlock(&vm->hotplug_mutex);
691 virtio_mem_notify_online(vm, mb_id);
692 vm->hotplug_active = false;
693 mutex_unlock(&vm->hotplug_mutex);
695 case MEM_CANCEL_OFFLINE:
696 if (!vm->hotplug_active)
698 virtio_mem_notify_cancel_offline(vm, mb_id);
699 vm->hotplug_active = false;
700 mutex_unlock(&vm->hotplug_mutex);
702 case MEM_CANCEL_ONLINE:
703 if (!vm->hotplug_active)
705 vm->hotplug_active = false;
706 mutex_unlock(&vm->hotplug_mutex);
718 * Set a range of pages PG_offline. Remember pages that were never onlined
719 * (via generic_online_page()) using PageDirty().
721 static void virtio_mem_set_fake_offline(unsigned long pfn,
722 unsigned int nr_pages, bool onlined)
724 for (; nr_pages--; pfn++) {
725 struct page *page = pfn_to_page(pfn);
727 __SetPageOffline(page);
730 /* FIXME: remove after cleanups */
731 ClearPageReserved(page);
737 * Clear PG_offline from a range of pages. If the pages were never onlined,
738 * (via generic_online_page()), clear PageDirty().
740 static void virtio_mem_clear_fake_offline(unsigned long pfn,
741 unsigned int nr_pages, bool onlined)
743 for (; nr_pages--; pfn++) {
744 struct page *page = pfn_to_page(pfn);
746 __ClearPageOffline(page);
748 ClearPageDirty(page);
753 * Release a range of fake-offline pages to the buddy, effectively
754 * fake-onlining them.
756 static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
758 const int order = MAX_ORDER - 1;
762 * We are always called with subblock granularity, which is at least
763 * aligned to MAX_ORDER - 1.
765 for (i = 0; i < nr_pages; i += 1 << order) {
766 struct page *page = pfn_to_page(pfn + i);
769 * If the page is PageDirty(), it was kept fake-offline when
770 * onlining the memory block. Otherwise, it was allocated
771 * using alloc_contig_range(). All pages in a subblock are
774 if (PageDirty(page)) {
775 virtio_mem_clear_fake_offline(pfn + i, 1 << order,
777 generic_online_page(page, order);
779 virtio_mem_clear_fake_offline(pfn + i, 1 << order,
781 free_contig_range(pfn + i, 1 << order);
782 adjust_managed_page_count(page, 1 << order);
787 static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
789 const unsigned long addr = page_to_phys(page);
790 const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
791 struct virtio_mem *vm;
795 * We exploit here that subblocks have at least MAX_ORDER - 1
796 * size/alignment and that this callback is is called with such a
797 * size/alignment. So we cannot cross subblocks and therefore
798 * also not memory blocks.
801 list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
802 if (!virtio_mem_owned_mb(vm, mb_id))
805 sb_id = virtio_mem_phys_to_sb_id(vm, addr);
807 * If plugged, online the pages, otherwise, set them fake
808 * offline (PageOffline).
810 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
811 generic_online_page(page, order);
813 virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
820 /* not virtio-mem memory, but e.g., a DIMM. online it */
821 generic_online_page(page, order);
824 static uint64_t virtio_mem_send_request(struct virtio_mem *vm,
825 const struct virtio_mem_req *req)
827 struct scatterlist *sgs[2], sg_req, sg_resp;
831 /* don't use the request residing on the stack (vaddr) */
834 /* out: buffer for request */
835 sg_init_one(&sg_req, &vm->req, sizeof(vm->req));
838 /* in: buffer for response */
839 sg_init_one(&sg_resp, &vm->resp, sizeof(vm->resp));
842 rc = virtqueue_add_sgs(vm->vq, sgs, 1, 1, vm, GFP_KERNEL);
846 virtqueue_kick(vm->vq);
848 /* wait for a response */
849 wait_event(vm->host_resp, virtqueue_get_buf(vm->vq, &len));
851 return virtio16_to_cpu(vm->vdev, vm->resp.type);
854 static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
857 const uint64_t nb_vm_blocks = size / vm->device_block_size;
858 const struct virtio_mem_req req = {
859 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_PLUG),
860 .u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
861 .u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
864 if (atomic_read(&vm->config_changed))
867 switch (virtio_mem_send_request(vm, &req)) {
868 case VIRTIO_MEM_RESP_ACK:
869 vm->plugged_size += size;
871 case VIRTIO_MEM_RESP_NACK:
873 case VIRTIO_MEM_RESP_BUSY:
875 case VIRTIO_MEM_RESP_ERROR:
882 static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
885 const uint64_t nb_vm_blocks = size / vm->device_block_size;
886 const struct virtio_mem_req req = {
887 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG),
888 .u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
889 .u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
892 if (atomic_read(&vm->config_changed))
895 switch (virtio_mem_send_request(vm, &req)) {
896 case VIRTIO_MEM_RESP_ACK:
897 vm->plugged_size -= size;
899 case VIRTIO_MEM_RESP_BUSY:
901 case VIRTIO_MEM_RESP_ERROR:
908 static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
910 const struct virtio_mem_req req = {
911 .type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
914 switch (virtio_mem_send_request(vm, &req)) {
915 case VIRTIO_MEM_RESP_ACK:
916 vm->unplug_all_required = false;
917 vm->plugged_size = 0;
918 /* usable region might have shrunk */
919 atomic_set(&vm->config_changed, 1);
921 case VIRTIO_MEM_RESP_BUSY:
929 * Plug selected subblocks. Updates the plugged state, but not the state
930 * of the memory block.
932 static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
933 int sb_id, int count)
935 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
936 sb_id * vm->subblock_size;
937 const uint64_t size = count * vm->subblock_size;
940 dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
941 sb_id, sb_id + count - 1);
943 rc = virtio_mem_send_plug_request(vm, addr, size);
945 virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
950 * Unplug selected subblocks. Updates the plugged state, but not the state
951 * of the memory block.
953 static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
954 int sb_id, int count)
956 const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
957 sb_id * vm->subblock_size;
958 const uint64_t size = count * vm->subblock_size;
961 dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
962 mb_id, sb_id, sb_id + count - 1);
964 rc = virtio_mem_send_unplug_request(vm, addr, size);
966 virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
971 * Unplug the desired number of plugged subblocks of a offline or not-added
972 * memory block. Will fail if any subblock cannot get unplugged (instead of
975 * Will not modify the state of the memory block.
977 * Note: can fail after some subblocks were unplugged.
979 static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
980 unsigned long mb_id, uint64_t *nb_sb)
985 sb_id = vm->nb_sb_per_mb - 1;
987 /* Find the next candidate subblock */
989 virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
993 /* Try to unplug multiple subblocks at a time */
995 while (count < *nb_sb && sb_id > 0 &&
996 virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
1001 rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1012 * Unplug all plugged subblocks of an offline or not-added memory block.
1014 * Will not modify the state of the memory block.
1016 * Note: can fail after some subblocks were unplugged.
1018 static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
1020 uint64_t nb_sb = vm->nb_sb_per_mb;
1022 return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
1026 * Prepare tracking data for the next memory block.
1028 static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
1029 unsigned long *mb_id)
1033 if (vm->next_mb_id > vm->last_usable_mb_id)
1036 /* Resize the state array if required. */
1037 rc = virtio_mem_mb_state_prepare_next_mb(vm);
1041 /* Resize the subblock bitmap if required. */
1042 rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
1046 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
1047 *mb_id = vm->next_mb_id++;
1052 * Don't add too many blocks that are not onlined yet to avoid running OOM.
1054 static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
1056 unsigned long nb_offline;
1058 nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
1059 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
1060 return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD;
1064 * Try to plug the desired number of subblocks and add the memory block
1067 * Will modify the state of the memory block.
1069 static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
1070 unsigned long mb_id,
1073 const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
1076 if (WARN_ON_ONCE(!count))
1080 * Plug the requested number of subblocks before adding it to linux,
1081 * so that onlining will directly online all plugged subblocks.
1083 rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
1088 * Mark the block properly offline before adding it to Linux,
1089 * so the memory notifiers will find the block in the right state.
1091 if (count == vm->nb_sb_per_mb)
1092 virtio_mem_mb_set_state(vm, mb_id,
1093 VIRTIO_MEM_MB_STATE_OFFLINE);
1095 virtio_mem_mb_set_state(vm, mb_id,
1096 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
1098 /* Add the memory block to linux - if that fails, try to unplug. */
1099 rc = virtio_mem_mb_add(vm, mb_id);
1101 enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED;
1103 dev_err(&vm->vdev->dev,
1104 "adding memory block %lu failed with %d\n", mb_id, rc);
1105 rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
1108 * TODO: Linux MM does not properly clean up yet in all cases
1109 * where adding of memory failed - especially on -ENOMEM.
1112 new_state = VIRTIO_MEM_MB_STATE_PLUGGED;
1113 virtio_mem_mb_set_state(vm, mb_id, new_state);
1122 * Try to plug the desired number of subblocks of a memory block that
1123 * is already added to Linux.
1125 * Will modify the state of the memory block.
1127 * Note: Can fail after some subblocks were successfully plugged.
1129 static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
1130 uint64_t *nb_sb, bool online)
1132 unsigned long pfn, nr_pages;
1136 if (WARN_ON_ONCE(!*nb_sb))
1140 sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
1141 if (sb_id >= vm->nb_sb_per_mb)
1144 while (count < *nb_sb &&
1145 sb_id + count < vm->nb_sb_per_mb &&
1146 !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
1150 rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
1157 /* fake-online the pages if the memory block is online */
1158 pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1159 sb_id * vm->subblock_size);
1160 nr_pages = PFN_DOWN(count * vm->subblock_size);
1161 virtio_mem_fake_online(pfn, nr_pages);
1164 if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1166 virtio_mem_mb_set_state(vm, mb_id,
1167 VIRTIO_MEM_MB_STATE_ONLINE);
1169 virtio_mem_mb_set_state(vm, mb_id,
1170 VIRTIO_MEM_MB_STATE_OFFLINE);
1177 * Try to plug the requested amount of memory.
1179 static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
1181 uint64_t nb_sb = diff / vm->subblock_size;
1182 unsigned long mb_id;
1188 /* Don't race with onlining/offlining */
1189 mutex_lock(&vm->hotplug_mutex);
1191 /* Try to plug subblocks of partially plugged online blocks. */
1192 virtio_mem_for_each_mb_state(vm, mb_id,
1193 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
1194 rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
1200 /* Try to plug subblocks of partially plugged offline blocks. */
1201 virtio_mem_for_each_mb_state(vm, mb_id,
1202 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1203 rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
1210 * We won't be working on online/offline memory blocks from this point,
1211 * so we can't race with memory onlining/offlining. Drop the mutex.
1213 mutex_unlock(&vm->hotplug_mutex);
1215 /* Try to plug and add unused blocks */
1216 virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
1217 if (virtio_mem_too_many_mb_offline(vm))
1220 rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1226 /* Try to prepare, plug and add new blocks */
1228 if (virtio_mem_too_many_mb_offline(vm))
1231 rc = virtio_mem_prepare_next_mb(vm, &mb_id);
1234 rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
1242 mutex_unlock(&vm->hotplug_mutex);
1247 * Unplug the desired number of plugged subblocks of an offline memory block.
1248 * Will fail if any subblock cannot get unplugged (instead of skipping it).
1250 * Will modify the state of the memory block. Might temporarily drop the
1253 * Note: Can fail after some subblocks were successfully unplugged.
1255 static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
1256 unsigned long mb_id,
1261 rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
1263 /* some subblocks might have been unplugged even on failure */
1264 if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
1265 virtio_mem_mb_set_state(vm, mb_id,
1266 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
1270 if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1272 * Remove the block from Linux - this should never fail.
1273 * Hinder the block from getting onlined by marking it
1274 * unplugged. Temporarily drop the mutex, so
1275 * any pending GOING_ONLINE requests can be serviced/rejected.
1277 virtio_mem_mb_set_state(vm, mb_id,
1278 VIRTIO_MEM_MB_STATE_UNUSED);
1280 mutex_unlock(&vm->hotplug_mutex);
1281 rc = virtio_mem_mb_remove(vm, mb_id);
1283 mutex_lock(&vm->hotplug_mutex);
1289 * Unplug the given plugged subblocks of an online memory block.
1291 * Will modify the state of the memory block.
1293 static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
1294 unsigned long mb_id, int sb_id,
1297 const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
1298 unsigned long start_pfn;
1301 start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
1302 sb_id * vm->subblock_size);
1303 rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
1304 MIGRATE_MOVABLE, GFP_KERNEL);
1306 /* whoops, out of memory */
1311 /* Mark it as fake-offline before unplugging it */
1312 virtio_mem_set_fake_offline(start_pfn, nr_pages, true);
1313 adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
1315 /* Try to unplug the allocated memory */
1316 rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
1318 /* Return the memory to the buddy. */
1319 virtio_mem_fake_online(start_pfn, nr_pages);
1323 virtio_mem_mb_set_state(vm, mb_id,
1324 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
1329 * Unplug the desired number of plugged subblocks of an online memory block.
1330 * Will skip subblock that are busy.
1332 * Will modify the state of the memory block. Might temporarily drop the
1335 * Note: Can fail after some subblocks were successfully unplugged. Can
1336 * return 0 even if subblocks were busy and could not get unplugged.
1338 static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
1339 unsigned long mb_id,
1344 /* If possible, try to unplug the complete block in one shot. */
1345 if (*nb_sb >= vm->nb_sb_per_mb &&
1346 virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1347 rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
1350 *nb_sb -= vm->nb_sb_per_mb;
1352 } else if (rc != -EBUSY)
1356 /* Fallback to single subblocks. */
1357 for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
1358 /* Find the next candidate subblock */
1359 while (sb_id >= 0 &&
1360 !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
1365 rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
1375 * Once all subblocks of a memory block were unplugged, offline and
1376 * remove it. This will usually not fail, as no memory is in use
1377 * anymore - however some other notifiers might NACK the request.
1379 if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
1380 mutex_unlock(&vm->hotplug_mutex);
1381 rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
1382 mutex_lock(&vm->hotplug_mutex);
1384 virtio_mem_mb_set_state(vm, mb_id,
1385 VIRTIO_MEM_MB_STATE_UNUSED);
1392 * Try to unplug the requested amount of memory.
1394 static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
1396 uint64_t nb_sb = diff / vm->subblock_size;
1397 unsigned long mb_id;
1404 * We'll drop the mutex a couple of times when it is safe to do so.
1405 * This might result in some blocks switching the state (online/offline)
1406 * and we could miss them in this run - we will retry again later.
1408 mutex_lock(&vm->hotplug_mutex);
1410 /* Try to unplug subblocks of partially plugged offline blocks. */
1411 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1412 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1413 rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1420 /* Try to unplug subblocks of plugged offline blocks. */
1421 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1422 VIRTIO_MEM_MB_STATE_OFFLINE) {
1423 rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
1430 if (!unplug_online) {
1431 mutex_unlock(&vm->hotplug_mutex);
1435 /* Try to unplug subblocks of partially plugged online blocks. */
1436 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1437 VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
1438 rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1442 mutex_unlock(&vm->hotplug_mutex);
1444 mutex_lock(&vm->hotplug_mutex);
1447 /* Try to unplug subblocks of plugged online blocks. */
1448 virtio_mem_for_each_mb_state_rev(vm, mb_id,
1449 VIRTIO_MEM_MB_STATE_ONLINE) {
1450 rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
1454 mutex_unlock(&vm->hotplug_mutex);
1456 mutex_lock(&vm->hotplug_mutex);
1459 mutex_unlock(&vm->hotplug_mutex);
1460 return nb_sb ? -EBUSY : 0;
1462 mutex_unlock(&vm->hotplug_mutex);
1467 * Try to unplug all blocks that couldn't be unplugged before, for example,
1468 * because the hypervisor was busy.
1470 static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
1472 unsigned long mb_id;
1475 virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
1476 rc = virtio_mem_mb_unplug(vm, mb_id);
1479 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
1486 * Update all parts of the config that could have changed.
1488 static void virtio_mem_refresh_config(struct virtio_mem *vm)
1490 const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1491 uint64_t new_plugged_size, usable_region_size, end_addr;
1493 /* the plugged_size is just a reflection of what _we_ did previously */
1494 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1496 if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
1497 vm->plugged_size = new_plugged_size;
1499 /* calculate the last usable memory block id */
1500 virtio_cread_le(vm->vdev, struct virtio_mem_config,
1501 usable_region_size, &usable_region_size);
1502 end_addr = vm->addr + usable_region_size;
1503 end_addr = min(end_addr, phys_limit);
1504 vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
1506 /* see if there is a request to change the size */
1507 virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
1508 &vm->requested_size);
1510 dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size);
1511 dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size);
1515 * Workqueue function for handling plug/unplug requests and config updates.
1517 static void virtio_mem_run_wq(struct work_struct *work)
1519 struct virtio_mem *vm = container_of(work, struct virtio_mem, wq);
1523 hrtimer_cancel(&vm->retry_timer);
1531 /* Make sure we start with a clean state if there are leftovers. */
1532 if (unlikely(vm->unplug_all_required))
1533 rc = virtio_mem_send_unplug_all_request(vm);
1535 if (atomic_read(&vm->config_changed)) {
1536 atomic_set(&vm->config_changed, 0);
1537 virtio_mem_refresh_config(vm);
1540 /* Unplug any leftovers from previous runs */
1542 rc = virtio_mem_unplug_pending_mb(vm);
1544 if (!rc && vm->requested_size != vm->plugged_size) {
1545 if (vm->requested_size > vm->plugged_size) {
1546 diff = vm->requested_size - vm->plugged_size;
1547 rc = virtio_mem_plug_request(vm, diff);
1549 diff = vm->plugged_size - vm->requested_size;
1550 rc = virtio_mem_unplug_request(vm, diff);
1556 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1560 * We cannot add any more memory (alignment, physical limit)
1561 * or we have too many offline memory blocks.
1566 * The hypervisor cannot process our request right now
1567 * (e.g., out of memory, migrating);
1571 * We cannot free up any memory to unplug it (all plugged memory
1575 /* Out of memory, try again later. */
1576 hrtimer_start(&vm->retry_timer, ms_to_ktime(vm->retry_timer_ms),
1580 /* Retry immediately (e.g., the config changed). */
1583 /* Unknown error, mark as broken */
1584 dev_err(&vm->vdev->dev,
1585 "unknown error, marking device broken: %d\n", rc);
1590 static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
1592 struct virtio_mem *vm = container_of(timer, struct virtio_mem,
1595 virtio_mem_retry(vm);
1596 vm->retry_timer_ms = min_t(unsigned int, vm->retry_timer_ms * 2,
1597 VIRTIO_MEM_RETRY_TIMER_MAX_MS);
1598 return HRTIMER_NORESTART;
1601 static void virtio_mem_handle_response(struct virtqueue *vq)
1603 struct virtio_mem *vm = vq->vdev->priv;
1605 wake_up(&vm->host_resp);
1608 static int virtio_mem_init_vq(struct virtio_mem *vm)
1610 struct virtqueue *vq;
1612 vq = virtio_find_single_vq(vm->vdev, virtio_mem_handle_response,
1621 static int virtio_mem_init(struct virtio_mem *vm)
1623 const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
1626 if (!vm->vdev->config->get) {
1627 dev_err(&vm->vdev->dev, "config access disabled\n");
1632 * We don't want to (un)plug or reuse any memory when in kdump. The
1633 * memory is still accessible (but not mapped).
1635 if (is_kdump_kernel()) {
1636 dev_warn(&vm->vdev->dev, "disabled in kdump kernel\n");
1640 /* Fetch all properties that can't change. */
1641 virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
1643 virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size,
1644 &vm->device_block_size);
1645 virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id,
1647 vm->nid = virtio_mem_translate_node_id(vm, node_id);
1648 virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr);
1649 virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
1652 /* Determine the nid for the device based on the lowest address. */
1653 if (vm->nid == NUMA_NO_NODE)
1654 vm->nid = memory_add_physaddr_to_nid(vm->addr);
1657 * We always hotplug memory in memory block granularity. This way,
1658 * we have to wait for exactly one memory block to online.
1660 if (vm->device_block_size > memory_block_size_bytes()) {
1661 dev_err(&vm->vdev->dev,
1662 "The block size is not supported (too big).\n");
1666 /* bad device setup - warn only */
1667 if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
1668 dev_warn(&vm->vdev->dev,
1669 "The alignment of the physical start address can make some memory unusable.\n");
1670 if (!IS_ALIGNED(vm->addr + vm->region_size, memory_block_size_bytes()))
1671 dev_warn(&vm->vdev->dev,
1672 "The alignment of the physical end address can make some memory unusable.\n");
1673 if (vm->addr + vm->region_size > phys_limit)
1674 dev_warn(&vm->vdev->dev,
1675 "Some memory is not addressable. This can make some memory unusable.\n");
1678 * Calculate the subblock size:
1679 * - At least MAX_ORDER - 1 / pageblock_order.
1680 * - At least the device block size.
1681 * In the worst case, a single subblock per memory block.
1683 vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
1685 vm->subblock_size = max_t(uint64_t, vm->device_block_size,
1687 vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
1689 /* Round up to the next full memory block */
1690 vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
1691 memory_block_size_bytes());
1692 vm->next_mb_id = vm->first_mb_id;
1693 vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
1694 vm->region_size) - 1;
1696 dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
1697 dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
1698 dev_info(&vm->vdev->dev, "device block size: 0x%llx",
1699 (unsigned long long)vm->device_block_size);
1700 dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
1701 memory_block_size_bytes());
1702 dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
1703 (unsigned long long)vm->subblock_size);
1704 if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
1705 dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
1710 static int virtio_mem_create_resource(struct virtio_mem *vm)
1713 * When force-unloading the driver and removing the device, we
1714 * could have a garbage pointer. Duplicate the string.
1716 const char *name = kstrdup(dev_name(&vm->vdev->dev), GFP_KERNEL);
1721 vm->parent_resource = __request_mem_region(vm->addr, vm->region_size,
1722 name, IORESOURCE_SYSTEM_RAM);
1723 if (!vm->parent_resource) {
1725 dev_warn(&vm->vdev->dev, "could not reserve device region\n");
1726 dev_info(&vm->vdev->dev,
1727 "reloading the driver is not supported\n");
1731 /* The memory is not actually busy - make add_memory() work. */
1732 vm->parent_resource->flags &= ~IORESOURCE_BUSY;
1736 static void virtio_mem_delete_resource(struct virtio_mem *vm)
1740 if (!vm->parent_resource)
1743 name = vm->parent_resource->name;
1744 release_resource(vm->parent_resource);
1745 kfree(vm->parent_resource);
1747 vm->parent_resource = NULL;
1750 static int virtio_mem_probe(struct virtio_device *vdev)
1752 struct virtio_mem *vm;
1755 BUILD_BUG_ON(sizeof(struct virtio_mem_req) != 24);
1756 BUILD_BUG_ON(sizeof(struct virtio_mem_resp) != 10);
1758 vdev->priv = vm = kzalloc(sizeof(*vm), GFP_KERNEL);
1762 init_waitqueue_head(&vm->host_resp);
1764 INIT_WORK(&vm->wq, virtio_mem_run_wq);
1765 mutex_init(&vm->hotplug_mutex);
1766 INIT_LIST_HEAD(&vm->next);
1767 spin_lock_init(&vm->removal_lock);
1768 hrtimer_init(&vm->retry_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1769 vm->retry_timer.function = virtio_mem_timer_expired;
1770 vm->retry_timer_ms = VIRTIO_MEM_RETRY_TIMER_MIN_MS;
1772 /* register the virtqueue */
1773 rc = virtio_mem_init_vq(vm);
1777 /* initialize the device by querying the config */
1778 rc = virtio_mem_init(vm);
1782 /* create the parent resource for all memory */
1783 rc = virtio_mem_create_resource(vm);
1788 * If we still have memory plugged, we have to unplug all memory first.
1789 * Registering our parent resource makes sure that this memory isn't
1790 * actually in use (e.g., trying to reload the driver).
1792 if (vm->plugged_size) {
1793 vm->unplug_all_required = 1;
1794 dev_info(&vm->vdev->dev, "unplugging all memory is required\n");
1797 /* register callbacks */
1798 vm->memory_notifier.notifier_call = virtio_mem_memory_notifier_cb;
1799 rc = register_memory_notifier(&vm->memory_notifier);
1801 goto out_del_resource;
1802 rc = register_virtio_mem_device(vm);
1806 virtio_device_ready(vdev);
1808 /* trigger a config update to start processing the requested_size */
1809 atomic_set(&vm->config_changed, 1);
1810 queue_work(system_freezable_wq, &vm->wq);
1814 unregister_memory_notifier(&vm->memory_notifier);
1816 virtio_mem_delete_resource(vm);
1818 vdev->config->del_vqs(vdev);
1826 static void virtio_mem_remove(struct virtio_device *vdev)
1828 struct virtio_mem *vm = vdev->priv;
1829 unsigned long mb_id;
1833 * Make sure the workqueue won't be triggered anymore and no memory
1834 * blocks can be onlined/offlined until we're finished here.
1836 mutex_lock(&vm->hotplug_mutex);
1837 spin_lock_irq(&vm->removal_lock);
1838 vm->removing = true;
1839 spin_unlock_irq(&vm->removal_lock);
1840 mutex_unlock(&vm->hotplug_mutex);
1842 /* wait until the workqueue stopped */
1843 cancel_work_sync(&vm->wq);
1844 hrtimer_cancel(&vm->retry_timer);
1847 * After we unregistered our callbacks, user space can online partially
1848 * plugged offline blocks. Make sure to remove them.
1850 virtio_mem_for_each_mb_state(vm, mb_id,
1851 VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
1852 rc = virtio_mem_mb_remove(vm, mb_id);
1854 virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
1857 * After we unregistered our callbacks, user space can no longer
1858 * offline partially plugged online memory blocks. No need to worry
1862 /* unregister callbacks */
1863 unregister_virtio_mem_device(vm);
1864 unregister_memory_notifier(&vm->memory_notifier);
1867 * There is no way we could reliably remove all memory we have added to
1868 * the system. And there is no way to stop the driver/device from going
1869 * away. Warn at least.
1871 if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
1872 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
1873 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
1874 vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL]) {
1875 dev_warn(&vdev->dev, "device still has system memory added\n");
1877 virtio_mem_delete_resource(vm);
1878 kfree_const(vm->resource_name);
1881 /* remove all tracking data - no locking needed */
1882 vfree(vm->mb_state);
1883 vfree(vm->sb_bitmap);
1885 /* reset the device and cleanup the queues */
1886 vdev->config->reset(vdev);
1887 vdev->config->del_vqs(vdev);
1893 static void virtio_mem_config_changed(struct virtio_device *vdev)
1895 struct virtio_mem *vm = vdev->priv;
1897 atomic_set(&vm->config_changed, 1);
1898 virtio_mem_retry(vm);
1901 #ifdef CONFIG_PM_SLEEP
1902 static int virtio_mem_freeze(struct virtio_device *vdev)
1905 * When restarting the VM, all memory is usually unplugged. Don't
1906 * allow to suspend/hibernate.
1908 dev_err(&vdev->dev, "save/restore not supported.\n");
1912 static int virtio_mem_restore(struct virtio_device *vdev)
1918 static unsigned int virtio_mem_features[] = {
1919 #if defined(CONFIG_NUMA) && defined(CONFIG_ACPI_NUMA)
1920 VIRTIO_MEM_F_ACPI_PXM,
1924 static const struct virtio_device_id virtio_mem_id_table[] = {
1925 { VIRTIO_ID_MEM, VIRTIO_DEV_ANY_ID },
1929 static struct virtio_driver virtio_mem_driver = {
1930 .feature_table = virtio_mem_features,
1931 .feature_table_size = ARRAY_SIZE(virtio_mem_features),
1932 .driver.name = KBUILD_MODNAME,
1933 .driver.owner = THIS_MODULE,
1934 .id_table = virtio_mem_id_table,
1935 .probe = virtio_mem_probe,
1936 .remove = virtio_mem_remove,
1937 .config_changed = virtio_mem_config_changed,
1938 #ifdef CONFIG_PM_SLEEP
1939 .freeze = virtio_mem_freeze,
1940 .restore = virtio_mem_restore,
1944 module_virtio_driver(virtio_mem_driver);
1945 MODULE_DEVICE_TABLE(virtio, virtio_mem_id_table);
1946 MODULE_AUTHOR("David Hildenbrand <david@redhat.com>");
1947 MODULE_DESCRIPTION("Virtio-mem driver");
1948 MODULE_LICENSE("GPL");