KVM: x86: Unify pr_fmt to use module name for all KVM modules
[linux-2.6-microblaze.git] / arch / arm64 / kvm / vmid.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VMID allocator.
4  *
5  * Based on Arm64 ASID allocator algorithm.
6  * Please refer arch/arm64/mm/context.c for detailed
7  * comments on algorithm.
8  *
9  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
10  * Copyright (C) 2012 ARM Ltd.
11  */
12
13 #include <linux/bitfield.h>
14 #include <linux/bitops.h>
15
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_mmu.h>
18
19 unsigned int __ro_after_init kvm_arm_vmid_bits;
20 static DEFINE_RAW_SPINLOCK(cpu_vmid_lock);
21
22 static atomic64_t vmid_generation;
23 static unsigned long *vmid_map;
24
25 static DEFINE_PER_CPU(atomic64_t, active_vmids);
26 static DEFINE_PER_CPU(u64, reserved_vmids);
27
28 #define VMID_MASK               (~GENMASK(kvm_arm_vmid_bits - 1, 0))
29 #define VMID_FIRST_VERSION      (1UL << kvm_arm_vmid_bits)
30
31 #define NUM_USER_VMIDS          VMID_FIRST_VERSION
32 #define vmid2idx(vmid)          ((vmid) & ~VMID_MASK)
33 #define idx2vmid(idx)           vmid2idx(idx)
34
35 /*
36  * As vmid #0 is always reserved, we will never allocate one
37  * as below and can be treated as invalid. This is used to
38  * set the active_vmids on vCPU schedule out.
39  */
40 #define VMID_ACTIVE_INVALID             VMID_FIRST_VERSION
41
42 #define vmid_gen_match(vmid) \
43         (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
44
45 static void flush_context(void)
46 {
47         int cpu;
48         u64 vmid;
49
50         bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
51
52         for_each_possible_cpu(cpu) {
53                 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
54
55                 /* Preserve reserved VMID */
56                 if (vmid == 0)
57                         vmid = per_cpu(reserved_vmids, cpu);
58                 __set_bit(vmid2idx(vmid), vmid_map);
59                 per_cpu(reserved_vmids, cpu) = vmid;
60         }
61
62         /*
63          * Unlike ASID allocator, we expect less frequent rollover in
64          * case of VMIDs. Hence, instead of marking the CPU as
65          * flush_pending and issuing a local context invalidation on
66          * the next context-switch, we broadcast TLB flush + I-cache
67          * invalidation over the inner shareable domain on rollover.
68          */
69         kvm_call_hyp(__kvm_flush_vm_context);
70 }
71
72 static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
73 {
74         int cpu;
75         bool hit = false;
76
77         /*
78          * Iterate over the set of reserved VMIDs looking for a match
79          * and update to use newvmid (i.e. the same VMID in the current
80          * generation).
81          */
82         for_each_possible_cpu(cpu) {
83                 if (per_cpu(reserved_vmids, cpu) == vmid) {
84                         hit = true;
85                         per_cpu(reserved_vmids, cpu) = newvmid;
86                 }
87         }
88
89         return hit;
90 }
91
92 static u64 new_vmid(struct kvm_vmid *kvm_vmid)
93 {
94         static u32 cur_idx = 1;
95         u64 vmid = atomic64_read(&kvm_vmid->id);
96         u64 generation = atomic64_read(&vmid_generation);
97
98         if (vmid != 0) {
99                 u64 newvmid = generation | (vmid & ~VMID_MASK);
100
101                 if (check_update_reserved_vmid(vmid, newvmid)) {
102                         atomic64_set(&kvm_vmid->id, newvmid);
103                         return newvmid;
104                 }
105
106                 if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
107                         atomic64_set(&kvm_vmid->id, newvmid);
108                         return newvmid;
109                 }
110         }
111
112         vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
113         if (vmid != NUM_USER_VMIDS)
114                 goto set_vmid;
115
116         /* We're out of VMIDs, so increment the global generation count */
117         generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION,
118                                                  &vmid_generation);
119         flush_context();
120
121         /* We have more VMIDs than CPUs, so this will always succeed */
122         vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
123
124 set_vmid:
125         __set_bit(vmid, vmid_map);
126         cur_idx = vmid;
127         vmid = idx2vmid(vmid) | generation;
128         atomic64_set(&kvm_vmid->id, vmid);
129         return vmid;
130 }
131
132 /* Called from vCPU sched out with preemption disabled */
133 void kvm_arm_vmid_clear_active(void)
134 {
135         atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
136 }
137
138 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
139 {
140         unsigned long flags;
141         u64 vmid, old_active_vmid;
142
143         vmid = atomic64_read(&kvm_vmid->id);
144
145         /*
146          * Please refer comments in check_and_switch_context() in
147          * arch/arm64/mm/context.c.
148          *
149          * Unlike ASID allocator, we set the active_vmids to
150          * VMID_ACTIVE_INVALID on vCPU schedule out to avoid
151          * reserving the VMID space needlessly on rollover.
152          * Hence explicitly check here for a "!= 0" to
153          * handle the sync with a concurrent rollover.
154          */
155         old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
156         if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
157             0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
158                                           old_active_vmid, vmid))
159                 return;
160
161         raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
162
163         /* Check that our VMID belongs to the current generation. */
164         vmid = atomic64_read(&kvm_vmid->id);
165         if (!vmid_gen_match(vmid))
166                 vmid = new_vmid(kvm_vmid);
167
168         atomic64_set(this_cpu_ptr(&active_vmids), vmid);
169         raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
170 }
171
172 /*
173  * Initialize the VMID allocator
174  */
175 int __init kvm_arm_vmid_alloc_init(void)
176 {
177         kvm_arm_vmid_bits = kvm_get_vmid_bits();
178
179         /*
180          * Expect allocation after rollover to fail if we don't have
181          * at least one more VMID than CPUs. VMID #0 is always reserved.
182          */
183         WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
184         atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
185         vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
186                            sizeof(*vmid_map), GFP_KERNEL);
187         if (!vmid_map)
188                 return -ENOMEM;
189
190         return 0;
191 }
192
193 void __init kvm_arm_vmid_alloc_free(void)
194 {
195         kfree(vmid_map);
196 }