1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (C) 2020 Google LLC
4   * Author: Quentin Perret <qperret@google.com>
5   */
6  
7  #include <linux/kvm_host.h>
8  #include <asm/kvm_hyp.h>
9  #include <asm/kvm_mmu.h>
10  #include <asm/kvm_pgtable.h>
11  #include <asm/kvm_pkvm.h>
12  #include <asm/spectre.h>
13  
14  #include <nvhe/early_alloc.h>
15  #include <nvhe/gfp.h>
16  #include <nvhe/memory.h>
17  #include <nvhe/mem_protect.h>
18  #include <nvhe/mm.h>
19  #include <nvhe/spinlock.h>
20  
21  struct kvm_pgtable pkvm_pgtable;
22  hyp_spinlock_t pkvm_pgd_lock;
23  
24  struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
25  unsigned int hyp_memblock_nr;
26  
27  static u64 __io_map_base;
28  
29  struct hyp_fixmap_slot {
30  	u64 addr;
31  	kvm_pte_t *ptep;
32  };
33  static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);
34  
__pkvm_create_mappings(unsigned long start,unsigned long size,unsigned long phys,enum kvm_pgtable_prot prot)35  static int __pkvm_create_mappings(unsigned long start, unsigned long size,
36  				  unsigned long phys, enum kvm_pgtable_prot prot)
37  {
38  	int err;
39  
40  	hyp_spin_lock(&pkvm_pgd_lock);
41  	err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
42  	hyp_spin_unlock(&pkvm_pgd_lock);
43  
44  	return err;
45  }
46  
__pkvm_alloc_private_va_range(unsigned long start,size_t size)47  static int __pkvm_alloc_private_va_range(unsigned long start, size_t size)
48  {
49  	unsigned long cur;
50  
51  	hyp_assert_lock_held(&pkvm_pgd_lock);
52  
53  	if (!start || start < __io_map_base)
54  		return -EINVAL;
55  
56  	/* The allocated size is always a multiple of PAGE_SIZE */
57  	cur = start + PAGE_ALIGN(size);
58  
59  	/* Are we overflowing on the vmemmap ? */
60  	if (cur > __hyp_vmemmap)
61  		return -ENOMEM;
62  
63  	__io_map_base = cur;
64  
65  	return 0;
66  }
67  
68  /**
69   * pkvm_alloc_private_va_range - Allocates a private VA range.
70   * @size:	The size of the VA range to reserve.
71   * @haddr:	The hypervisor virtual start address of the allocation.
72   *
73   * The private virtual address (VA) range is allocated above __io_map_base
74   * and aligned based on the order of @size.
75   *
76   * Return: 0 on success or negative error code on failure.
77   */
pkvm_alloc_private_va_range(size_t size,unsigned long * haddr)78  int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
79  {
80  	unsigned long addr;
81  	int ret;
82  
83  	hyp_spin_lock(&pkvm_pgd_lock);
84  	addr = __io_map_base;
85  	ret = __pkvm_alloc_private_va_range(addr, size);
86  	hyp_spin_unlock(&pkvm_pgd_lock);
87  
88  	*haddr = addr;
89  
90  	return ret;
91  }
92  
__pkvm_create_private_mapping(phys_addr_t phys,size_t size,enum kvm_pgtable_prot prot,unsigned long * haddr)93  int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
94  				  enum kvm_pgtable_prot prot,
95  				  unsigned long *haddr)
96  {
97  	unsigned long addr;
98  	int err;
99  
100  	size = PAGE_ALIGN(size + offset_in_page(phys));
101  	err = pkvm_alloc_private_va_range(size, &addr);
102  	if (err)
103  		return err;
104  
105  	err = __pkvm_create_mappings(addr, size, phys, prot);
106  	if (err)
107  		return err;
108  
109  	*haddr = addr + offset_in_page(phys);
110  	return err;
111  }
112  
pkvm_create_mappings_locked(void * from,void * to,enum kvm_pgtable_prot prot)113  int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot)
114  {
115  	unsigned long start = (unsigned long)from;
116  	unsigned long end = (unsigned long)to;
117  	unsigned long virt_addr;
118  	phys_addr_t phys;
119  
120  	hyp_assert_lock_held(&pkvm_pgd_lock);
121  
122  	start = start & PAGE_MASK;
123  	end = PAGE_ALIGN(end);
124  
125  	for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
126  		int err;
127  
128  		phys = hyp_virt_to_phys((void *)virt_addr);
129  		err = kvm_pgtable_hyp_map(&pkvm_pgtable, virt_addr, PAGE_SIZE,
130  					  phys, prot);
131  		if (err)
132  			return err;
133  	}
134  
135  	return 0;
136  }
137  
pkvm_create_mappings(void * from,void * to,enum kvm_pgtable_prot prot)138  int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
139  {
140  	int ret;
141  
142  	hyp_spin_lock(&pkvm_pgd_lock);
143  	ret = pkvm_create_mappings_locked(from, to, prot);
144  	hyp_spin_unlock(&pkvm_pgd_lock);
145  
146  	return ret;
147  }
148  
hyp_back_vmemmap(phys_addr_t back)149  int hyp_back_vmemmap(phys_addr_t back)
150  {
151  	unsigned long i, start, size, end = 0;
152  	int ret;
153  
154  	for (i = 0; i < hyp_memblock_nr; i++) {
155  		start = hyp_memory[i].base;
156  		start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
157  		/*
158  		 * The beginning of the hyp_vmemmap region for the current
159  		 * memblock may already be backed by the page backing the end
160  		 * the previous region, so avoid mapping it twice.
161  		 */
162  		start = max(start, end);
163  
164  		end = hyp_memory[i].base + hyp_memory[i].size;
165  		end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
166  		if (start >= end)
167  			continue;
168  
169  		size = end - start;
170  		ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
171  		if (ret)
172  			return ret;
173  
174  		memset(hyp_phys_to_virt(back), 0, size);
175  		back += size;
176  	}
177  
178  	return 0;
179  }
180  
181  static void *__hyp_bp_vect_base;
pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)182  int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
183  {
184  	void *vector;
185  
186  	switch (slot) {
187  	case HYP_VECTOR_DIRECT: {
188  		vector = __kvm_hyp_vector;
189  		break;
190  	}
191  	case HYP_VECTOR_SPECTRE_DIRECT: {
192  		vector = __bp_harden_hyp_vecs;
193  		break;
194  	}
195  	case HYP_VECTOR_INDIRECT:
196  	case HYP_VECTOR_SPECTRE_INDIRECT: {
197  		vector = (void *)__hyp_bp_vect_base;
198  		break;
199  	}
200  	default:
201  		return -EINVAL;
202  	}
203  
204  	vector = __kvm_vector_slot2addr(vector, slot);
205  	*this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
206  
207  	return 0;
208  }
209  
hyp_map_vectors(void)210  int hyp_map_vectors(void)
211  {
212  	phys_addr_t phys;
213  	unsigned long bp_base;
214  	int ret;
215  
216  	if (!kvm_system_needs_idmapped_vectors()) {
217  		__hyp_bp_vect_base = __bp_harden_hyp_vecs;
218  		return 0;
219  	}
220  
221  	phys = __hyp_pa(__bp_harden_hyp_vecs);
222  	ret = __pkvm_create_private_mapping(phys, __BP_HARDEN_HYP_VECS_SZ,
223  					    PAGE_HYP_EXEC, &bp_base);
224  	if (ret)
225  		return ret;
226  
227  	__hyp_bp_vect_base = (void *)bp_base;
228  
229  	return 0;
230  }
231  
hyp_fixmap_map(phys_addr_t phys)232  void *hyp_fixmap_map(phys_addr_t phys)
233  {
234  	struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
235  	kvm_pte_t pte, *ptep = slot->ptep;
236  
237  	pte = *ptep;
238  	pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
239  	pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
240  	WRITE_ONCE(*ptep, pte);
241  	dsb(ishst);
242  
243  	return (void *)slot->addr;
244  }
245  
fixmap_clear_slot(struct hyp_fixmap_slot * slot)246  static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
247  {
248  	kvm_pte_t *ptep = slot->ptep;
249  	u64 addr = slot->addr;
250  
251  	WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
252  
253  	/*
254  	 * Irritatingly, the architecture requires that we use inner-shareable
255  	 * broadcast TLB invalidation here in case another CPU speculates
256  	 * through our fixmap and decides to create an "amalagamation of the
257  	 * values held in the TLB" due to the apparent lack of a
258  	 * break-before-make sequence.
259  	 *
260  	 * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
261  	 */
262  	dsb(ishst);
263  	__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), KVM_PGTABLE_LAST_LEVEL);
264  	dsb(ish);
265  	isb();
266  }
267  
hyp_fixmap_unmap(void)268  void hyp_fixmap_unmap(void)
269  {
270  	fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
271  }
272  
__create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)273  static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
274  				   enum kvm_pgtable_walk_flags visit)
275  {
276  	struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg);
277  
278  	if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_LAST_LEVEL)
279  		return -EINVAL;
280  
281  	slot->addr = ctx->addr;
282  	slot->ptep = ctx->ptep;
283  
284  	/*
285  	 * Clear the PTE, but keep the page-table page refcount elevated to
286  	 * prevent it from ever being freed. This lets us manipulate the PTEs
287  	 * by hand safely without ever needing to allocate memory.
288  	 */
289  	fixmap_clear_slot(slot);
290  
291  	return 0;
292  }
293  
create_fixmap_slot(u64 addr,u64 cpu)294  static int create_fixmap_slot(u64 addr, u64 cpu)
295  {
296  	struct kvm_pgtable_walker walker = {
297  		.cb	= __create_fixmap_slot_cb,
298  		.flags	= KVM_PGTABLE_WALK_LEAF,
299  		.arg = (void *)cpu,
300  	};
301  
302  	return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
303  }
304  
hyp_create_pcpu_fixmap(void)305  int hyp_create_pcpu_fixmap(void)
306  {
307  	unsigned long addr, i;
308  	int ret;
309  
310  	for (i = 0; i < hyp_nr_cpus; i++) {
311  		ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
312  		if (ret)
313  			return ret;
314  
315  		ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
316  					  __hyp_pa(__hyp_bss_start), PAGE_HYP);
317  		if (ret)
318  			return ret;
319  
320  		ret = create_fixmap_slot(addr, i);
321  		if (ret)
322  			return ret;
323  	}
324  
325  	return 0;
326  }
327  
hyp_create_idmap(u32 hyp_va_bits)328  int hyp_create_idmap(u32 hyp_va_bits)
329  {
330  	unsigned long start, end;
331  
332  	start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
333  	start = ALIGN_DOWN(start, PAGE_SIZE);
334  
335  	end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
336  	end = ALIGN(end, PAGE_SIZE);
337  
338  	/*
339  	 * One half of the VA space is reserved to linearly map portions of
340  	 * memory -- see va_layout.c for more details. The other half of the VA
341  	 * space contains the trampoline page, and needs some care. Split that
342  	 * second half in two and find the quarter of VA space not conflicting
343  	 * with the idmap to place the IOs and the vmemmap. IOs use the lower
344  	 * half of the quarter and the vmemmap the upper half.
345  	 */
346  	__io_map_base = start & BIT(hyp_va_bits - 2);
347  	__io_map_base ^= BIT(hyp_va_bits - 2);
348  	__hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
349  
350  	return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
351  }
352  
pkvm_create_stack(phys_addr_t phys,unsigned long * haddr)353  int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
354  {
355  	unsigned long addr, prev_base;
356  	size_t size;
357  	int ret;
358  
359  	hyp_spin_lock(&pkvm_pgd_lock);
360  
361  	prev_base = __io_map_base;
362  	/*
363  	 * Efficient stack verification using the PAGE_SHIFT bit implies
364  	 * an alignment of our allocation on the order of the size.
365  	 */
366  	size = PAGE_SIZE * 2;
367  	addr = ALIGN(__io_map_base, size);
368  
369  	ret = __pkvm_alloc_private_va_range(addr, size);
370  	if (!ret) {
371  		/*
372  		 * Since the stack grows downwards, map the stack to the page
373  		 * at the higher address and leave the lower guard page
374  		 * unbacked.
375  		 *
376  		 * Any valid stack address now has the PAGE_SHIFT bit as 1
377  		 * and addresses corresponding to the guard page have the
378  		 * PAGE_SHIFT bit as 0 - this is used for overflow detection.
379  		 */
380  		ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
381  					  PAGE_SIZE, phys, PAGE_HYP);
382  		if (ret)
383  			__io_map_base = prev_base;
384  	}
385  	hyp_spin_unlock(&pkvm_pgd_lock);
386  
387  	*haddr = addr + size;
388  
389  	return ret;
390  }
391  
admit_host_page(void * arg)392  static void *admit_host_page(void *arg)
393  {
394  	struct kvm_hyp_memcache *host_mc = arg;
395  
396  	if (!host_mc->nr_pages)
397  		return NULL;
398  
399  	/*
400  	 * The host still owns the pages in its memcache, so we need to go
401  	 * through a full host-to-hyp donation cycle to change it. Fortunately,
402  	 * __pkvm_host_donate_hyp() takes care of races for us, so if it
403  	 * succeeds we're good to go.
404  	 */
405  	if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
406  		return NULL;
407  
408  	return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
409  }
410  
411  /* Refill our local memcache by popping pages from the one provided by the host. */
refill_memcache(struct kvm_hyp_memcache * mc,unsigned long min_pages,struct kvm_hyp_memcache * host_mc)412  int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
413  		    struct kvm_hyp_memcache *host_mc)
414  {
415  	struct kvm_hyp_memcache tmp = *host_mc;
416  	int ret;
417  
418  	ret =  __topup_hyp_memcache(mc, min_pages, admit_host_page,
419  				    hyp_virt_to_phys, &tmp);
420  	*host_mc = tmp;
421  
422  	return ret;
423  }
424