1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * VMID allocator.
4  *
5  * Based on Arm64 ASID allocator algorithm.
6  * Please refer arch/arm64/mm/context.c for detailed
7  * comments on algorithm.
8  *
9  * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
10  * Copyright (C) 2012 ARM Ltd.
11  */
12 
13 #include <linux/bitfield.h>
14 #include <linux/bitops.h>
15 
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_mmu.h>
18 
19 unsigned int __ro_after_init kvm_arm_vmid_bits;
20 static DEFINE_RAW_SPINLOCK(cpu_vmid_lock);
21 
22 static atomic64_t vmid_generation;
23 static unsigned long *vmid_map;
24 
25 static DEFINE_PER_CPU(atomic64_t, active_vmids);
26 static DEFINE_PER_CPU(u64, reserved_vmids);
27 
28 #define VMID_MASK		(~GENMASK(kvm_arm_vmid_bits - 1, 0))
29 #define VMID_FIRST_VERSION	(1UL << kvm_arm_vmid_bits)
30 
31 #define NUM_USER_VMIDS		VMID_FIRST_VERSION
32 #define vmid2idx(vmid)		((vmid) & ~VMID_MASK)
33 #define idx2vmid(idx)		vmid2idx(idx)
34 
35 /*
36  * As vmid #0 is always reserved, we will never allocate one
37  * as below and can be treated as invalid. This is used to
38  * set the active_vmids on vCPU schedule out.
39  */
40 #define VMID_ACTIVE_INVALID		VMID_FIRST_VERSION
41 
42 #define vmid_gen_match(vmid) \
43 	(!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
44 
flush_context(void)45 static void flush_context(void)
46 {
47 	int cpu;
48 	u64 vmid;
49 
50 	bitmap_zero(vmid_map, NUM_USER_VMIDS);
51 
52 	for_each_possible_cpu(cpu) {
53 		vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
54 
55 		/* Preserve reserved VMID */
56 		if (vmid == 0)
57 			vmid = per_cpu(reserved_vmids, cpu);
58 		__set_bit(vmid2idx(vmid), vmid_map);
59 		per_cpu(reserved_vmids, cpu) = vmid;
60 	}
61 
62 	/*
63 	 * Unlike ASID allocator, we expect less frequent rollover in
64 	 * case of VMIDs. Hence, instead of marking the CPU as
65 	 * flush_pending and issuing a local context invalidation on
66 	 * the next context-switch, we broadcast TLB flush + I-cache
67 	 * invalidation over the inner shareable domain on rollover.
68 	 */
69 	kvm_call_hyp(__kvm_flush_vm_context);
70 }
71 
check_update_reserved_vmid(u64 vmid,u64 newvmid)72 static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
73 {
74 	int cpu;
75 	bool hit = false;
76 
77 	/*
78 	 * Iterate over the set of reserved VMIDs looking for a match
79 	 * and update to use newvmid (i.e. the same VMID in the current
80 	 * generation).
81 	 */
82 	for_each_possible_cpu(cpu) {
83 		if (per_cpu(reserved_vmids, cpu) == vmid) {
84 			hit = true;
85 			per_cpu(reserved_vmids, cpu) = newvmid;
86 		}
87 	}
88 
89 	return hit;
90 }
91 
new_vmid(struct kvm_vmid * kvm_vmid)92 static u64 new_vmid(struct kvm_vmid *kvm_vmid)
93 {
94 	static u32 cur_idx = 1;
95 	u64 vmid = atomic64_read(&kvm_vmid->id);
96 	u64 generation = atomic64_read(&vmid_generation);
97 
98 	if (vmid != 0) {
99 		u64 newvmid = generation | (vmid & ~VMID_MASK);
100 
101 		if (check_update_reserved_vmid(vmid, newvmid)) {
102 			atomic64_set(&kvm_vmid->id, newvmid);
103 			return newvmid;
104 		}
105 
106 		if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
107 			atomic64_set(&kvm_vmid->id, newvmid);
108 			return newvmid;
109 		}
110 	}
111 
112 	vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
113 	if (vmid != NUM_USER_VMIDS)
114 		goto set_vmid;
115 
116 	/* We're out of VMIDs, so increment the global generation count */
117 	generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION,
118 						 &vmid_generation);
119 	flush_context();
120 
121 	/* We have more VMIDs than CPUs, so this will always succeed */
122 	vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
123 
124 set_vmid:
125 	__set_bit(vmid, vmid_map);
126 	cur_idx = vmid;
127 	vmid = idx2vmid(vmid) | generation;
128 	atomic64_set(&kvm_vmid->id, vmid);
129 	return vmid;
130 }
131 
132 /* Called from vCPU sched out with preemption disabled */
kvm_arm_vmid_clear_active(void)133 void kvm_arm_vmid_clear_active(void)
134 {
135 	atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
136 }
137 
kvm_arm_vmid_update(struct kvm_vmid * kvm_vmid)138 bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
139 {
140 	unsigned long flags;
141 	u64 vmid, old_active_vmid;
142 	bool updated = false;
143 
144 	vmid = atomic64_read(&kvm_vmid->id);
145 
146 	/*
147 	 * Please refer comments in check_and_switch_context() in
148 	 * arch/arm64/mm/context.c.
149 	 *
150 	 * Unlike ASID allocator, we set the active_vmids to
151 	 * VMID_ACTIVE_INVALID on vCPU schedule out to avoid
152 	 * reserving the VMID space needlessly on rollover.
153 	 * Hence explicitly check here for a "!= 0" to
154 	 * handle the sync with a concurrent rollover.
155 	 */
156 	old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
157 	if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
158 	    0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
159 					  old_active_vmid, vmid))
160 		return false;
161 
162 	raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
163 
164 	/* Check that our VMID belongs to the current generation. */
165 	vmid = atomic64_read(&kvm_vmid->id);
166 	if (!vmid_gen_match(vmid)) {
167 		vmid = new_vmid(kvm_vmid);
168 		updated = true;
169 	}
170 
171 	atomic64_set(this_cpu_ptr(&active_vmids), vmid);
172 	raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
173 
174 	return updated;
175 }
176 
177 /*
178  * Initialize the VMID allocator
179  */
kvm_arm_vmid_alloc_init(void)180 int __init kvm_arm_vmid_alloc_init(void)
181 {
182 	kvm_arm_vmid_bits = kvm_get_vmid_bits();
183 
184 	/*
185 	 * Expect allocation after rollover to fail if we don't have
186 	 * at least one more VMID than CPUs. VMID #0 is always reserved.
187 	 */
188 	WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
189 	atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
190 	vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL);
191 	if (!vmid_map)
192 		return -ENOMEM;
193 
194 	return 0;
195 }
196 
kvm_arm_vmid_alloc_free(void)197 void __init kvm_arm_vmid_alloc_free(void)
198 {
199 	bitmap_free(vmid_map);
200 }
201