1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
4 */
5
6 #ifndef __ASM_ARM_EFI_H
7 #define __ASM_ARM_EFI_H
8
9 #include <asm/cacheflush.h>
10 #include <asm/cachetype.h>
11 #include <asm/early_ioremap.h>
12 #include <asm/fixmap.h>
13 #include <asm/highmem.h>
14 #include <asm/mach/map.h>
15 #include <asm/mmu_context.h>
16 #include <asm/ptrace.h>
17 #include <asm/uaccess.h>
18
19 #ifdef CONFIG_EFI
20 void efi_init(void);
21 void arm_efi_init(void);
22
23 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
24 int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
25
26 #define arch_efi_call_virt_setup() efi_virtmap_load()
27 #define arch_efi_call_virt_teardown() efi_virtmap_unload()
28
29 #ifdef CONFIG_CPU_TTBR0_PAN
30 #undef arch_efi_call_virt
31 #define arch_efi_call_virt(p, f, args...) ({ \
32 unsigned int flags = uaccess_save_and_enable(); \
33 efi_status_t res = _Generic((p)->f(args), \
34 efi_status_t: (p)->f(args), \
35 default: ((p)->f(args), EFI_ABORTED)); \
36 uaccess_restore(flags); \
37 res; \
38 })
39 #endif
40
41 #define ARCH_EFI_IRQ_FLAGS_MASK \
42 (PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
43 PSR_T_BIT | MODE_MASK)
44
efi_set_pgd(struct mm_struct * mm)45 static inline void efi_set_pgd(struct mm_struct *mm)
46 {
47 check_and_switch_context(mm, NULL);
48 }
49
50 void efi_virtmap_load(void);
51 void efi_virtmap_unload(void);
52
53 #else
54 #define arm_efi_init()
55 #endif /* CONFIG_EFI */
56
57 /* arch specific definitions used by the stub code */
58
59 /*
60 * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
61 * so we will reserve that amount of memory. We have no easy way to tell what
62 * the actuall size of code + data the uncompressed kernel will use.
63 * If this is insufficient, the decompressor will relocate itself out of the
64 * way before performing the decompression.
65 */
66 #define MAX_UNCOMP_KERNEL_SIZE SZ_32M
67
68 /*
69 * phys-to-virt patching requires that the physical to virtual offset is a
70 * multiple of 2 MiB. However, using an alignment smaller than TEXT_OFFSET
71 * here throws off the memory allocation logic, so let's use the lowest power
72 * of two greater than 2 MiB and greater than TEXT_OFFSET.
73 */
74 #define EFI_PHYS_ALIGN max(UL(SZ_2M), roundup_pow_of_two(TEXT_OFFSET))
75
76 /* on ARM, the initrd should be loaded in a lowmem region */
efi_get_max_initrd_addr(unsigned long image_addr)77 static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
78 {
79 return round_down(image_addr, SZ_4M) + SZ_512M;
80 }
81
82 struct efi_arm_entry_state {
83 u32 cpsr_before_ebs;
84 u32 sctlr_before_ebs;
85 u32 cpsr_after_ebs;
86 u32 sctlr_after_ebs;
87 };
88
efi_capsule_flush_cache_range(void * addr,int size)89 static inline void efi_capsule_flush_cache_range(void *addr, int size)
90 {
91 __cpuc_flush_dcache_area(addr, size);
92 }
93
94 #endif /* _ASM_ARM_EFI_H */
95