1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Author: Huacai Chen <chenhuacai@loongson.cn>
4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 */
6 #ifndef _ASM_FPU_H
7 #define _ASM_FPU_H
8
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/ptrace.h>
12 #include <linux/thread_info.h>
13 #include <linux/bitops.h>
14
15 #include <asm/cpu.h>
16 #include <asm/cpu-features.h>
17 #include <asm/current.h>
18 #include <asm/loongarch.h>
19 #include <asm/processor.h>
20 #include <asm/ptrace.h>
21
22 struct sigcontext;
23
24 #define kernel_fpu_available() cpu_has_fpu
25 extern void kernel_fpu_begin(void);
26 extern void kernel_fpu_end(void);
27
28 extern void _init_fpu(unsigned int);
29 extern void _save_fp(struct loongarch_fpu *);
30 extern void _restore_fp(struct loongarch_fpu *);
31
32 extern void _save_lsx(struct loongarch_fpu *fpu);
33 extern void _restore_lsx(struct loongarch_fpu *fpu);
34 extern void _init_lsx_upper(void);
35 extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
36
37 extern void _save_lasx(struct loongarch_fpu *fpu);
38 extern void _restore_lasx(struct loongarch_fpu *fpu);
39 extern void _init_lasx_upper(void);
40 extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
41
42 static inline void enable_lsx(void);
43 static inline void disable_lsx(void);
44 static inline void save_lsx(struct task_struct *t);
45 static inline void restore_lsx(struct task_struct *t);
46
47 static inline void enable_lasx(void);
48 static inline void disable_lasx(void);
49 static inline void save_lasx(struct task_struct *t);
50 static inline void restore_lasx(struct task_struct *t);
51
52 /*
53 * Mask the FCSR Cause bits according to the Enable bits, observing
54 * that Unimplemented is always enabled.
55 */
mask_fcsr_x(unsigned long fcsr)56 static inline unsigned long mask_fcsr_x(unsigned long fcsr)
57 {
58 return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
59 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
60 }
61
is_fp_enabled(void)62 static inline int is_fp_enabled(void)
63 {
64 return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
65 1 : 0;
66 }
67
is_lsx_enabled(void)68 static inline int is_lsx_enabled(void)
69 {
70 if (!cpu_has_lsx)
71 return 0;
72
73 return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LSXEN) ?
74 1 : 0;
75 }
76
is_lasx_enabled(void)77 static inline int is_lasx_enabled(void)
78 {
79 if (!cpu_has_lasx)
80 return 0;
81
82 return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LASXEN) ?
83 1 : 0;
84 }
85
is_simd_enabled(void)86 static inline int is_simd_enabled(void)
87 {
88 return is_lsx_enabled() | is_lasx_enabled();
89 }
90
91 #define enable_fpu() set_csr_euen(CSR_EUEN_FPEN)
92
93 #define disable_fpu() clear_csr_euen(CSR_EUEN_FPEN)
94
95 #define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
96
is_fpu_owner(void)97 static inline int is_fpu_owner(void)
98 {
99 return test_thread_flag(TIF_USEDFPU);
100 }
101
__own_fpu(void)102 static inline void __own_fpu(void)
103 {
104 enable_fpu();
105 set_thread_flag(TIF_USEDFPU);
106 KSTK_EUEN(current) |= CSR_EUEN_FPEN;
107 }
108
own_fpu_inatomic(int restore)109 static inline void own_fpu_inatomic(int restore)
110 {
111 if (cpu_has_fpu && !is_fpu_owner()) {
112 __own_fpu();
113 if (restore)
114 _restore_fp(¤t->thread.fpu);
115 }
116 }
117
own_fpu(int restore)118 static inline void own_fpu(int restore)
119 {
120 preempt_disable();
121 own_fpu_inatomic(restore);
122 preempt_enable();
123 }
124
lose_fpu_inatomic(int save,struct task_struct * tsk)125 static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
126 {
127 if (is_fpu_owner()) {
128 if (!is_simd_enabled()) {
129 if (save)
130 _save_fp(&tsk->thread.fpu);
131 disable_fpu();
132 } else {
133 if (save) {
134 if (!is_lasx_enabled())
135 save_lsx(tsk);
136 else
137 save_lasx(tsk);
138 }
139 disable_fpu();
140 disable_lsx();
141 disable_lasx();
142 clear_tsk_thread_flag(tsk, TIF_USEDSIMD);
143 }
144 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
145 }
146 KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
147 }
148
lose_fpu(int save)149 static inline void lose_fpu(int save)
150 {
151 preempt_disable();
152 lose_fpu_inatomic(save, current);
153 preempt_enable();
154 }
155
init_fpu(void)156 static inline void init_fpu(void)
157 {
158 unsigned int fcsr = current->thread.fpu.fcsr;
159
160 __own_fpu();
161 _init_fpu(fcsr);
162 set_used_math();
163 }
164
save_fp(struct task_struct * tsk)165 static inline void save_fp(struct task_struct *tsk)
166 {
167 if (cpu_has_fpu)
168 _save_fp(&tsk->thread.fpu);
169 }
170
restore_fp(struct task_struct * tsk)171 static inline void restore_fp(struct task_struct *tsk)
172 {
173 if (cpu_has_fpu)
174 _restore_fp(&tsk->thread.fpu);
175 }
176
save_fpu_regs(struct task_struct * tsk)177 static inline void save_fpu_regs(struct task_struct *tsk)
178 {
179 unsigned int euen;
180
181 if (tsk == current) {
182 preempt_disable();
183
184 euen = csr_read32(LOONGARCH_CSR_EUEN);
185
186 #ifdef CONFIG_CPU_HAS_LASX
187 if (euen & CSR_EUEN_LASXEN)
188 _save_lasx(¤t->thread.fpu);
189 else
190 #endif
191 #ifdef CONFIG_CPU_HAS_LSX
192 if (euen & CSR_EUEN_LSXEN)
193 _save_lsx(¤t->thread.fpu);
194 else
195 #endif
196 if (euen & CSR_EUEN_FPEN)
197 _save_fp(¤t->thread.fpu);
198
199 preempt_enable();
200 }
201 }
202
is_simd_owner(void)203 static inline int is_simd_owner(void)
204 {
205 return test_thread_flag(TIF_USEDSIMD);
206 }
207
208 #ifdef CONFIG_CPU_HAS_LSX
209
enable_lsx(void)210 static inline void enable_lsx(void)
211 {
212 if (cpu_has_lsx)
213 csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN);
214 }
215
disable_lsx(void)216 static inline void disable_lsx(void)
217 {
218 if (cpu_has_lsx)
219 csr_xchg32(0, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN);
220 }
221
save_lsx(struct task_struct * t)222 static inline void save_lsx(struct task_struct *t)
223 {
224 if (cpu_has_lsx)
225 _save_lsx(&t->thread.fpu);
226 }
227
restore_lsx(struct task_struct * t)228 static inline void restore_lsx(struct task_struct *t)
229 {
230 if (cpu_has_lsx)
231 _restore_lsx(&t->thread.fpu);
232 }
233
init_lsx_upper(void)234 static inline void init_lsx_upper(void)
235 {
236 if (cpu_has_lsx)
237 _init_lsx_upper();
238 }
239
restore_lsx_upper(struct task_struct * t)240 static inline void restore_lsx_upper(struct task_struct *t)
241 {
242 if (cpu_has_lsx)
243 _restore_lsx_upper(&t->thread.fpu);
244 }
245
246 #else
enable_lsx(void)247 static inline void enable_lsx(void) {}
disable_lsx(void)248 static inline void disable_lsx(void) {}
save_lsx(struct task_struct * t)249 static inline void save_lsx(struct task_struct *t) {}
restore_lsx(struct task_struct * t)250 static inline void restore_lsx(struct task_struct *t) {}
init_lsx_upper(void)251 static inline void init_lsx_upper(void) {}
restore_lsx_upper(struct task_struct * t)252 static inline void restore_lsx_upper(struct task_struct *t) {}
253 #endif
254
255 #ifdef CONFIG_CPU_HAS_LASX
256
enable_lasx(void)257 static inline void enable_lasx(void)
258 {
259
260 if (cpu_has_lasx)
261 csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN);
262 }
263
disable_lasx(void)264 static inline void disable_lasx(void)
265 {
266 if (cpu_has_lasx)
267 csr_xchg32(0, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN);
268 }
269
save_lasx(struct task_struct * t)270 static inline void save_lasx(struct task_struct *t)
271 {
272 if (cpu_has_lasx)
273 _save_lasx(&t->thread.fpu);
274 }
275
restore_lasx(struct task_struct * t)276 static inline void restore_lasx(struct task_struct *t)
277 {
278 if (cpu_has_lasx)
279 _restore_lasx(&t->thread.fpu);
280 }
281
init_lasx_upper(void)282 static inline void init_lasx_upper(void)
283 {
284 if (cpu_has_lasx)
285 _init_lasx_upper();
286 }
287
restore_lasx_upper(struct task_struct * t)288 static inline void restore_lasx_upper(struct task_struct *t)
289 {
290 if (cpu_has_lasx)
291 _restore_lasx_upper(&t->thread.fpu);
292 }
293
294 #else
enable_lasx(void)295 static inline void enable_lasx(void) {}
disable_lasx(void)296 static inline void disable_lasx(void) {}
save_lasx(struct task_struct * t)297 static inline void save_lasx(struct task_struct *t) {}
restore_lasx(struct task_struct * t)298 static inline void restore_lasx(struct task_struct *t) {}
init_lasx_upper(void)299 static inline void init_lasx_upper(void) {}
restore_lasx_upper(struct task_struct * t)300 static inline void restore_lasx_upper(struct task_struct *t) {}
301 #endif
302
thread_lsx_context_live(void)303 static inline int thread_lsx_context_live(void)
304 {
305 if (!cpu_has_lsx)
306 return 0;
307
308 return test_thread_flag(TIF_LSX_CTX_LIVE);
309 }
310
thread_lasx_context_live(void)311 static inline int thread_lasx_context_live(void)
312 {
313 if (!cpu_has_lasx)
314 return 0;
315
316 return test_thread_flag(TIF_LASX_CTX_LIVE);
317 }
318
319 #endif /* _ASM_FPU_H */
320