1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_CODE_PATCHING_H
3 #define _ASM_POWERPC_CODE_PATCHING_H
4 
5 /*
6  * Copyright 2008, Michael Ellerman, IBM Corporation.
7  */
8 
9 #include <asm/types.h>
10 #include <asm/ppc-opcode.h>
11 #include <linux/string.h>
12 #include <linux/kallsyms.h>
13 #include <asm/asm-compat.h>
14 #include <asm/inst.h>
15 
16 /* Flags for create_branch:
17  * "b"   == create_branch(addr, target, 0);
18  * "ba"  == create_branch(addr, target, BRANCH_ABSOLUTE);
19  * "bl"  == create_branch(addr, target, BRANCH_SET_LINK);
20  * "bla" == create_branch(addr, target, BRANCH_ABSOLUTE | BRANCH_SET_LINK);
21  */
22 #define BRANCH_SET_LINK	0x1
23 #define BRANCH_ABSOLUTE	0x2
24 
25 /*
26  * Powerpc branch instruction is :
27  *
28  *  0         6                 30   31
29  *  +---------+----------------+---+---+
30  *  | opcode  |     LI         |AA |LK |
31  *  +---------+----------------+---+---+
32  *  Where AA = 0 and LK = 0
33  *
34  * LI is a signed 24 bits integer. The real branch offset is computed
35  * by: imm32 = SignExtend(LI:'0b00', 32);
36  *
37  * So the maximum forward branch should be:
38  *   (0x007fffff << 2) = 0x01fffffc =  0x1fffffc
39  * The maximum backward branch should be:
40  *   (0xff800000 << 2) = 0xfe000000 = -0x2000000
41  */
is_offset_in_branch_range(long offset)42 static inline bool is_offset_in_branch_range(long offset)
43 {
44 	return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
45 }
46 
is_offset_in_cond_branch_range(long offset)47 static inline bool is_offset_in_cond_branch_range(long offset)
48 {
49 	return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
50 }
51 
create_branch(ppc_inst_t * instr,const u32 * addr,unsigned long target,int flags)52 static inline int create_branch(ppc_inst_t *instr, const u32 *addr,
53 				unsigned long target, int flags)
54 {
55 	long offset;
56 
57 	*instr = ppc_inst(0);
58 	offset = target;
59 	if (! (flags & BRANCH_ABSOLUTE))
60 		offset = offset - (unsigned long)addr;
61 
62 	/* Check we can represent the target in the instruction format */
63 	if (!is_offset_in_branch_range(offset))
64 		return 1;
65 
66 	/* Mask out the flags and target, so they don't step on each other. */
67 	*instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
68 
69 	return 0;
70 }
71 
72 int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
73 		       unsigned long target, int flags);
74 int patch_branch(u32 *addr, unsigned long target, int flags);
75 int patch_instruction(u32 *addr, ppc_inst_t instr);
76 int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
77 int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr);
78 
79 /*
80  * The data patching functions patch_uint() and patch_ulong(), etc., must be
81  * called on aligned addresses.
82  *
83  * The instruction patching functions patch_instruction() and similar must be
84  * called on addresses satisfying instruction alignment requirements.
85  */
86 
87 #ifdef CONFIG_PPC64
88 
89 int patch_uint(void *addr, unsigned int val);
90 int patch_ulong(void *addr, unsigned long val);
91 
92 #define patch_u64 patch_ulong
93 
94 #else
95 
patch_uint(void * addr,unsigned int val)96 static inline int patch_uint(void *addr, unsigned int val)
97 {
98 	if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int)))
99 		return -EINVAL;
100 
101 	return patch_instruction(addr, ppc_inst(val));
102 }
103 
patch_ulong(void * addr,unsigned long val)104 static inline int patch_ulong(void *addr, unsigned long val)
105 {
106 	if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)))
107 		return -EINVAL;
108 
109 	return patch_instruction(addr, ppc_inst(val));
110 }
111 
112 #endif
113 
114 #define patch_u32 patch_uint
115 
patch_site_addr(s32 * site)116 static inline unsigned long patch_site_addr(s32 *site)
117 {
118 	return (unsigned long)site + *site;
119 }
120 
patch_instruction_site(s32 * site,ppc_inst_t instr)121 static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
122 {
123 	return patch_instruction((u32 *)patch_site_addr(site), instr);
124 }
125 
patch_branch_site(s32 * site,unsigned long target,int flags)126 static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
127 {
128 	return patch_branch((u32 *)patch_site_addr(site), target, flags);
129 }
130 
modify_instruction(unsigned int * addr,unsigned int clr,unsigned int set)131 static inline int modify_instruction(unsigned int *addr, unsigned int clr,
132 				     unsigned int set)
133 {
134 	return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
135 }
136 
modify_instruction_site(s32 * site,unsigned int clr,unsigned int set)137 static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
138 {
139 	return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
140 }
141 
branch_opcode(ppc_inst_t instr)142 static inline unsigned int branch_opcode(ppc_inst_t instr)
143 {
144 	return ppc_inst_primary_opcode(instr) & 0x3F;
145 }
146 
instr_is_branch_iform(ppc_inst_t instr)147 static inline int instr_is_branch_iform(ppc_inst_t instr)
148 {
149 	return branch_opcode(instr) == 18;
150 }
151 
instr_is_branch_bform(ppc_inst_t instr)152 static inline int instr_is_branch_bform(ppc_inst_t instr)
153 {
154 	return branch_opcode(instr) == 16;
155 }
156 
157 int instr_is_relative_branch(ppc_inst_t instr);
158 int instr_is_relative_link_branch(ppc_inst_t instr);
159 unsigned long branch_target(const u32 *instr);
160 int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
161 bool is_conditional_branch(ppc_inst_t instr);
162 
163 #define OP_RT_RA_MASK	0xffff0000UL
164 #define LIS_R2		(PPC_RAW_LIS(_R2, 0))
165 #define ADDIS_R2_R12	(PPC_RAW_ADDIS(_R2, _R12, 0))
166 #define ADDI_R2_R2	(PPC_RAW_ADDI(_R2, _R2, 0))
167 
168 
ppc_function_entry(void * func)169 static inline unsigned long ppc_function_entry(void *func)
170 {
171 #ifdef CONFIG_PPC64_ELF_ABI_V2
172 	u32 *insn = func;
173 
174 	/*
175 	 * A PPC64 ABIv2 function may have a local and a global entry
176 	 * point. We need to use the local entry point when patching
177 	 * functions, so identify and step over the global entry point
178 	 * sequence.
179 	 *
180 	 * The global entry point sequence is always of the form:
181 	 *
182 	 * addis r2,r12,XXXX
183 	 * addi  r2,r2,XXXX
184 	 *
185 	 * A linker optimisation may convert the addis to lis:
186 	 *
187 	 * lis   r2,XXXX
188 	 * addi  r2,r2,XXXX
189 	 */
190 	if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
191 	     ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
192 	    ((*(insn+1) & OP_RT_RA_MASK) == ADDI_R2_R2))
193 		return (unsigned long)(insn + 2);
194 	else
195 		return (unsigned long)func;
196 #elif defined(CONFIG_PPC64_ELF_ABI_V1)
197 	/*
198 	 * On PPC64 ABIv1 the function pointer actually points to the
199 	 * function's descriptor. The first entry in the descriptor is the
200 	 * address of the function text.
201 	 */
202 	return ((struct func_desc *)func)->addr;
203 #else
204 	return (unsigned long)func;
205 #endif
206 }
207 
ppc_global_function_entry(void * func)208 static inline unsigned long ppc_global_function_entry(void *func)
209 {
210 #ifdef CONFIG_PPC64_ELF_ABI_V2
211 	/* PPC64 ABIv2 the global entry point is at the address */
212 	return (unsigned long)func;
213 #else
214 	/* All other cases there is no change vs ppc_function_entry() */
215 	return ppc_function_entry(func);
216 #endif
217 }
218 
219 /*
220  * Wrapper around kallsyms_lookup() to return function entry address:
221  * - For ABIv1, we lookup the dot variant.
222  * - For ABIv2, we return the local entry point.
223  */
ppc_kallsyms_lookup_name(const char * name)224 static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
225 {
226 	unsigned long addr;
227 #ifdef CONFIG_PPC64_ELF_ABI_V1
228 	/* check for dot variant */
229 	char dot_name[1 + KSYM_NAME_LEN];
230 	bool dot_appended = false;
231 
232 	if (strnlen(name, KSYM_NAME_LEN) >= KSYM_NAME_LEN)
233 		return 0;
234 
235 	if (name[0] != '.') {
236 		dot_name[0] = '.';
237 		dot_name[1] = '\0';
238 		strlcat(dot_name, name, sizeof(dot_name));
239 		dot_appended = true;
240 	} else {
241 		dot_name[0] = '\0';
242 		strlcat(dot_name, name, sizeof(dot_name));
243 	}
244 	addr = kallsyms_lookup_name(dot_name);
245 	if (!addr && dot_appended)
246 		/* Let's try the original non-dot symbol lookup	*/
247 		addr = kallsyms_lookup_name(name);
248 #elif defined(CONFIG_PPC64_ELF_ABI_V2)
249 	addr = kallsyms_lookup_name(name);
250 	if (addr)
251 		addr = ppc_function_entry((void *)addr);
252 #else
253 	addr = kallsyms_lookup_name(name);
254 #endif
255 	return addr;
256 }
257 
258 /*
259  * Some instruction encodings commonly used in dynamic ftracing
260  * and function live patching.
261  */
262 
263 /* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
264 #ifdef CONFIG_PPC64_ELF_ABI_V2
265 #define R2_STACK_OFFSET         24
266 #else
267 #define R2_STACK_OFFSET         40
268 #endif
269 
270 #define PPC_INST_LD_TOC		PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
271 
272 /* usually preceded by a mflr r0 */
273 #define PPC_INST_STD_LR		PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
274 
275 #endif /* _ASM_POWERPC_CODE_PATCHING_H */
276