1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  *  arch/arm/include/asm/proc-fns.h
4  *
5  *  Copyright (C) 1997-1999 Russell King
6  *  Copyright (C) 2000 Deep Blue Solutions Ltd
7  */
8 #ifndef __ASM_PROCFNS_H
9 #define __ASM_PROCFNS_H
10 
11 #ifdef __KERNEL__
12 
13 #include <asm/glue-proc.h>
14 #include <asm/page.h>
15 
16 #ifndef __ASSEMBLY__
17 
18 struct mm_struct;
19 
20 /*
21  * Don't change this structure - ASM code relies on it.
22  */
23 struct processor {
24 	/* MISC
25 	 * get data abort address/flags
26 	 */
27 	void (*_data_abort)(unsigned long pc);
28 	/*
29 	 * Retrieve prefetch fault address
30 	 */
31 	unsigned long (*_prefetch_abort)(unsigned long lr);
32 	/*
33 	 * Set up any processor specifics
34 	 */
35 	void (*_proc_init)(void);
36 	/*
37 	 * Check for processor bugs
38 	 */
39 	void (*check_bugs)(void);
40 	/*
41 	 * Disable any processor specifics
42 	 */
43 	void (*_proc_fin)(void);
44 	/*
45 	 * Special stuff for a reset
46 	 */
47 	void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
48 	/*
49 	 * Idle the processor
50 	 */
51 	int (*_do_idle)(void);
52 	/*
53 	 * Processor architecture specific
54 	 */
55 	/*
56 	 * clean a virtual address range from the
57 	 * D-cache without flushing the cache.
58 	 */
59 	void (*dcache_clean_area)(void *addr, int size);
60 
61 	/*
62 	 * Set the page table
63 	 */
64 	void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
65 	/*
66 	 * Set a possibly extended PTE.  Non-extended PTEs should
67 	 * ignore 'ext'.
68 	 */
69 #ifdef CONFIG_ARM_LPAE
70 	void (*set_pte_ext)(pte_t *ptep, pte_t pte);
71 #else
72 	void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
73 #endif
74 
75 	/* Suspend/resume */
76 	unsigned int suspend_size;
77 	void (*do_suspend)(void *);
78 	void (*do_resume)(void *);
79 };
80 
81 #ifndef MULTI_CPU
init_proc_vtable(const struct processor * p)82 static inline void init_proc_vtable(const struct processor *p)
83 {
84 }
85 
86 extern void cpu_proc_init(void);
87 extern void cpu_proc_fin(void);
88 extern int cpu_do_idle(void);
89 extern void cpu_dcache_clean_area(void *, int);
90 extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
91 #ifdef CONFIG_ARM_LPAE
92 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
93 #else
94 extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
95 #endif
96 extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
97 
98 /* These three are private to arch/arm/kernel/suspend.c */
99 extern void cpu_do_suspend(void *);
100 extern void cpu_do_resume(void *);
101 #else
102 
103 extern struct processor processor;
104 #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
105 #include <linux/smp.h>
106 /*
107  * This can't be a per-cpu variable because we need to access it before
108  * per-cpu has been initialised.  We have a couple of functions that are
109  * called in a pre-emptible context, and so can't use smp_processor_id()
110  * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
111  * function pointers for these are identical across all CPUs.
112  */
113 extern struct processor *cpu_vtable[];
114 #define PROC_VTABLE(f)			cpu_vtable[smp_processor_id()]->f
115 #define PROC_TABLE(f)			cpu_vtable[0]->f
init_proc_vtable(const struct processor * p)116 static inline void init_proc_vtable(const struct processor *p)
117 {
118 	unsigned int cpu = smp_processor_id();
119 	*cpu_vtable[cpu] = *p;
120 	WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
121 		     cpu_vtable[0]->dcache_clean_area);
122 	WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
123 		     cpu_vtable[0]->set_pte_ext);
124 }
125 #else
126 #define PROC_VTABLE(f)			processor.f
127 #define PROC_TABLE(f)			processor.f
init_proc_vtable(const struct processor * p)128 static inline void init_proc_vtable(const struct processor *p)
129 {
130 	processor = *p;
131 }
132 #endif
133 
134 #define cpu_proc_init			PROC_VTABLE(_proc_init)
135 #define cpu_check_bugs			PROC_VTABLE(check_bugs)
136 #define cpu_proc_fin			PROC_VTABLE(_proc_fin)
137 #define cpu_reset			PROC_VTABLE(reset)
138 #define cpu_do_idle			PROC_VTABLE(_do_idle)
139 #define cpu_dcache_clean_area		PROC_TABLE(dcache_clean_area)
140 #define cpu_set_pte_ext			PROC_TABLE(set_pte_ext)
141 #define cpu_do_switch_mm		PROC_VTABLE(switch_mm)
142 
143 /* These two are private to arch/arm/kernel/suspend.c */
144 #define cpu_do_suspend			PROC_VTABLE(do_suspend)
145 #define cpu_do_resume			PROC_VTABLE(do_resume)
146 #endif
147 
148 extern void cpu_resume(void);
149 
150 #ifdef CONFIG_MMU
151 
152 #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
153 
154 #ifdef CONFIG_ARM_LPAE
155 
156 #define cpu_get_ttbr(nr)					\
157 	({							\
158 		u64 ttbr;					\
159 		__asm__("mrrc	p15, " #nr ", %Q0, %R0, c2"	\
160 			: "=r" (ttbr));				\
161 		ttbr;						\
162 	})
163 
164 #define cpu_get_pgd()	\
165 	({						\
166 		u64 pg = cpu_get_ttbr(0);		\
167 		pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1);	\
168 		(pgd_t *)phys_to_virt(pg);		\
169 	})
170 #else
171 #define cpu_get_pgd()	\
172 	({						\
173 		unsigned long pg;			\
174 		__asm__("mrc	p15, 0, %0, c2, c0, 0"	\
175 			 : "=r" (pg) : : "cc");		\
176 		pg &= ~0x3fff;				\
177 		(pgd_t *)phys_to_virt(pg);		\
178 	})
179 #endif
180 
cpu_get_ttbcr(void)181 static inline unsigned int cpu_get_ttbcr(void)
182 {
183 	unsigned int ttbcr;
184 	asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
185 	return ttbcr;
186 }
187 
cpu_set_ttbcr(unsigned int ttbcr)188 static inline void cpu_set_ttbcr(unsigned int ttbcr)
189 {
190 	asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr) : "memory");
191 }
192 
193 #else	/*!CONFIG_MMU */
194 
195 #define cpu_switch_mm(pgd,mm)	{ }
196 
197 #endif
198 
199 #endif /* __ASSEMBLY__ */
200 #endif /* __KERNEL__ */
201 #endif /* __ASM_PROCFNS_H */
202