1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2017 SiFive
5 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
6 */
7
8 #include <linux/bitops.h>
9 #include <linux/cpumask.h>
10 #include <linux/mm.h>
11 #include <linux/percpu.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/static_key.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/switch_to.h>
19
20 #ifdef CONFIG_MMU
21
22 DEFINE_STATIC_KEY_FALSE(use_asid_allocator);
23
24 static unsigned long num_asids;
25
26 static atomic_long_t current_version;
27
28 static DEFINE_RAW_SPINLOCK(context_lock);
29 static cpumask_t context_tlb_flush_pending;
30 static unsigned long *context_asid_map;
31
32 static DEFINE_PER_CPU(atomic_long_t, active_context);
33 static DEFINE_PER_CPU(unsigned long, reserved_context);
34
check_update_reserved_context(unsigned long cntx,unsigned long newcntx)35 static bool check_update_reserved_context(unsigned long cntx,
36 unsigned long newcntx)
37 {
38 int cpu;
39 bool hit = false;
40
41 /*
42 * Iterate over the set of reserved CONTEXT looking for a match.
43 * If we find one, then we can update our mm to use new CONTEXT
44 * (i.e. the same CONTEXT in the current_version) but we can't
45 * exit the loop early, since we need to ensure that all copies
46 * of the old CONTEXT are updated to reflect the mm. Failure to do
47 * so could result in us missing the reserved CONTEXT in a future
48 * version.
49 */
50 for_each_possible_cpu(cpu) {
51 if (per_cpu(reserved_context, cpu) == cntx) {
52 hit = true;
53 per_cpu(reserved_context, cpu) = newcntx;
54 }
55 }
56
57 return hit;
58 }
59
__flush_context(void)60 static void __flush_context(void)
61 {
62 int i;
63 unsigned long cntx;
64
65 /* Must be called with context_lock held */
66 lockdep_assert_held(&context_lock);
67
68 /* Update the list of reserved ASIDs and the ASID bitmap. */
69 bitmap_zero(context_asid_map, num_asids);
70
71 /* Mark already active ASIDs as used */
72 for_each_possible_cpu(i) {
73 cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0);
74 /*
75 * If this CPU has already been through a rollover, but
76 * hasn't run another task in the meantime, we must preserve
77 * its reserved CONTEXT, as this is the only trace we have of
78 * the process it is still running.
79 */
80 if (cntx == 0)
81 cntx = per_cpu(reserved_context, i);
82
83 __set_bit(cntx2asid(cntx), context_asid_map);
84 per_cpu(reserved_context, i) = cntx;
85 }
86
87 /* Mark ASID #0 as used because it is used at boot-time */
88 __set_bit(0, context_asid_map);
89
90 /* Queue a TLB invalidation for each CPU on next context-switch */
91 cpumask_setall(&context_tlb_flush_pending);
92 }
93
__new_context(struct mm_struct * mm)94 static unsigned long __new_context(struct mm_struct *mm)
95 {
96 static u32 cur_idx = 1;
97 unsigned long cntx = atomic_long_read(&mm->context.id);
98 unsigned long asid, ver = atomic_long_read(¤t_version);
99
100 /* Must be called with context_lock held */
101 lockdep_assert_held(&context_lock);
102
103 if (cntx != 0) {
104 unsigned long newcntx = ver | cntx2asid(cntx);
105
106 /*
107 * If our current CONTEXT was active during a rollover, we
108 * can continue to use it and this was just a false alarm.
109 */
110 if (check_update_reserved_context(cntx, newcntx))
111 return newcntx;
112
113 /*
114 * We had a valid CONTEXT in a previous life, so try to
115 * re-use it if possible.
116 */
117 if (!__test_and_set_bit(cntx2asid(cntx), context_asid_map))
118 return newcntx;
119 }
120
121 /*
122 * Allocate a free ASID. If we can't find one then increment
123 * current_version and flush all ASIDs.
124 */
125 asid = find_next_zero_bit(context_asid_map, num_asids, cur_idx);
126 if (asid != num_asids)
127 goto set_asid;
128
129 /* We're out of ASIDs, so increment current_version */
130 ver = atomic_long_add_return_relaxed(BIT(SATP_ASID_BITS), ¤t_version);
131
132 /* Flush everything */
133 __flush_context();
134
135 /* We have more ASIDs than CPUs, so this will always succeed */
136 asid = find_next_zero_bit(context_asid_map, num_asids, 1);
137
138 set_asid:
139 __set_bit(asid, context_asid_map);
140 cur_idx = asid;
141 return asid | ver;
142 }
143
set_mm_asid(struct mm_struct * mm,unsigned int cpu)144 static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
145 {
146 unsigned long flags;
147 bool need_flush_tlb = false;
148 unsigned long cntx, old_active_cntx;
149
150 cntx = atomic_long_read(&mm->context.id);
151
152 /*
153 * If our active_context is non-zero and the context matches the
154 * current_version, then we update the active_context entry with a
155 * relaxed cmpxchg.
156 *
157 * Following is how we handle racing with a concurrent rollover:
158 *
159 * - We get a zero back from the cmpxchg and end up waiting on the
160 * lock. Taking the lock synchronises with the rollover and so
161 * we are forced to see the updated verion.
162 *
163 * - We get a valid context back from the cmpxchg then we continue
164 * using old ASID because __flush_context() would have marked ASID
165 * of active_context as used and next context switch we will
166 * allocate new context.
167 */
168 old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
169 if (old_active_cntx &&
170 (cntx2version(cntx) == atomic_long_read(¤t_version)) &&
171 atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
172 old_active_cntx, cntx))
173 goto switch_mm_fast;
174
175 raw_spin_lock_irqsave(&context_lock, flags);
176
177 /* Check that our ASID belongs to the current_version. */
178 cntx = atomic_long_read(&mm->context.id);
179 if (cntx2version(cntx) != atomic_long_read(¤t_version)) {
180 cntx = __new_context(mm);
181 atomic_long_set(&mm->context.id, cntx);
182 }
183
184 if (cpumask_test_and_clear_cpu(cpu, &context_tlb_flush_pending))
185 need_flush_tlb = true;
186
187 atomic_long_set(&per_cpu(active_context, cpu), cntx);
188
189 raw_spin_unlock_irqrestore(&context_lock, flags);
190
191 switch_mm_fast:
192 csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
193 (cntx2asid(cntx) << SATP_ASID_SHIFT) |
194 satp_mode);
195
196 if (need_flush_tlb)
197 local_flush_tlb_all();
198 }
199
set_mm_noasid(struct mm_struct * mm)200 static void set_mm_noasid(struct mm_struct *mm)
201 {
202 /* Switch the page table and blindly nuke entire local TLB */
203 csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode);
204 local_flush_tlb_all_asid(0);
205 }
206
set_mm(struct mm_struct * prev,struct mm_struct * next,unsigned int cpu)207 static inline void set_mm(struct mm_struct *prev,
208 struct mm_struct *next, unsigned int cpu)
209 {
210 /*
211 * The mm_cpumask indicates which harts' TLBs contain the virtual
212 * address mapping of the mm. Compared to noasid, using asid
213 * can't guarantee that stale TLB entries are invalidated because
214 * the asid mechanism wouldn't flush TLB for every switch_mm for
215 * performance. So when using asid, keep all CPUs footmarks in
216 * cpumask() until mm reset.
217 */
218 cpumask_set_cpu(cpu, mm_cpumask(next));
219 if (static_branch_unlikely(&use_asid_allocator)) {
220 set_mm_asid(next, cpu);
221 } else {
222 cpumask_clear_cpu(cpu, mm_cpumask(prev));
223 set_mm_noasid(next);
224 }
225 }
226
asids_init(void)227 static int __init asids_init(void)
228 {
229 unsigned long asid_bits, old;
230
231 /* Figure-out number of ASID bits in HW */
232 old = csr_read(CSR_SATP);
233 asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT);
234 csr_write(CSR_SATP, asid_bits);
235 asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK;
236 asid_bits = fls_long(asid_bits);
237 csr_write(CSR_SATP, old);
238
239 /*
240 * In the process of determining number of ASID bits (above)
241 * we polluted the TLB of current HART so let's do TLB flushed
242 * to remove unwanted TLB enteries.
243 */
244 local_flush_tlb_all();
245
246 /* Pre-compute ASID details */
247 if (asid_bits) {
248 num_asids = 1 << asid_bits;
249 }
250
251 /*
252 * Use ASID allocator only if number of HW ASIDs are
253 * at-least twice more than CPUs
254 */
255 if (num_asids > (2 * num_possible_cpus())) {
256 atomic_long_set(¤t_version, BIT(SATP_ASID_BITS));
257
258 context_asid_map = bitmap_zalloc(num_asids, GFP_KERNEL);
259 if (!context_asid_map)
260 panic("Failed to allocate bitmap for %lu ASIDs\n",
261 num_asids);
262
263 __set_bit(0, context_asid_map);
264
265 static_branch_enable(&use_asid_allocator);
266
267 pr_info("ASID allocator using %lu bits (%lu entries)\n",
268 asid_bits, num_asids);
269 } else {
270 pr_info("ASID allocator disabled (%lu bits)\n", asid_bits);
271 }
272
273 return 0;
274 }
275 early_initcall(asids_init);
276 #else
set_mm(struct mm_struct * prev,struct mm_struct * next,unsigned int cpu)277 static inline void set_mm(struct mm_struct *prev,
278 struct mm_struct *next, unsigned int cpu)
279 {
280 /* Nothing to do here when there is no MMU */
281 }
282 #endif
283
284 /*
285 * When necessary, performs a deferred icache flush for the given MM context,
286 * on the local CPU. RISC-V has no direct mechanism for instruction cache
287 * shoot downs, so instead we send an IPI that informs the remote harts they
288 * need to flush their local instruction caches. To avoid pathologically slow
289 * behavior in a common case (a bunch of single-hart processes on a many-hart
290 * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
291 * executing a MM context and instead schedule a deferred local instruction
292 * cache flush to be performed before execution resumes on each hart. This
293 * actually performs that local instruction cache flush, which implicitly only
294 * refers to the current hart.
295 *
296 * The "cpu" argument must be the current local CPU number.
297 */
flush_icache_deferred(struct mm_struct * mm,unsigned int cpu,struct task_struct * task)298 static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu,
299 struct task_struct *task)
300 {
301 #ifdef CONFIG_SMP
302 if (cpumask_test_and_clear_cpu(cpu, &mm->context.icache_stale_mask)) {
303 /*
304 * Ensure the remote hart's writes are visible to this hart.
305 * This pairs with a barrier in flush_icache_mm.
306 */
307 smp_mb();
308
309 /*
310 * If cache will be flushed in switch_to, no need to flush here.
311 */
312 if (!(task && switch_to_should_flush_icache(task)))
313 local_flush_icache_all();
314 }
315 #endif
316 }
317
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * task)318 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
319 struct task_struct *task)
320 {
321 unsigned int cpu;
322
323 if (unlikely(prev == next))
324 return;
325
326 membarrier_arch_switch_mm(prev, next, task);
327
328 /*
329 * Mark the current MM context as inactive, and the next as
330 * active. This is at least used by the icache flushing
331 * routines in order to determine who should be flushed.
332 */
333 cpu = smp_processor_id();
334
335 set_mm(prev, next, cpu);
336
337 flush_icache_deferred(next, cpu, task);
338 }
339