1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * arch/arm/kernel/unwind.c
4   *
5   * Copyright (C) 2008 ARM Limited
6   *
7   * Stack unwinding support for ARM
8   *
9   * An ARM EABI version of gcc is required to generate the unwind
10   * tables. For information about the structure of the unwind tables,
11   * see "Exception Handling ABI for the ARM Architecture" at:
12   *
13   * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
14   */
15  
16  #ifndef __CHECKER__
17  #if !defined (__ARM_EABI__)
18  #warning Your compiler does not have EABI support.
19  #warning    ARM unwind is known to compile only with EABI compilers.
20  #warning    Change compiler or disable ARM_UNWIND option.
21  #endif
22  #endif /* __CHECKER__ */
23  
24  #include <linux/kernel.h>
25  #include <linux/init.h>
26  #include <linux/export.h>
27  #include <linux/sched.h>
28  #include <linux/slab.h>
29  #include <linux/spinlock.h>
30  #include <linux/list.h>
31  #include <linux/module.h>
32  
33  #include <asm/stacktrace.h>
34  #include <asm/traps.h>
35  #include <asm/unwind.h>
36  
37  #include "reboot.h"
38  
39  /* Dummy functions to avoid linker complaints */
__aeabi_unwind_cpp_pr0(void)40  void __aeabi_unwind_cpp_pr0(void)
41  {
42  };
43  EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
44  
__aeabi_unwind_cpp_pr1(void)45  void __aeabi_unwind_cpp_pr1(void)
46  {
47  };
48  EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
49  
__aeabi_unwind_cpp_pr2(void)50  void __aeabi_unwind_cpp_pr2(void)
51  {
52  };
53  EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
54  
55  struct unwind_ctrl_block {
56  	unsigned long vrs[16];		/* virtual register set */
57  	const unsigned long *insn;	/* pointer to the current instructions word */
58  	unsigned long sp_high;		/* highest value of sp allowed */
59  	unsigned long *lr_addr;		/* address of LR value on the stack */
60  	/*
61  	 * 1 : check for stack overflow for each register pop.
62  	 * 0 : save overhead if there is plenty of stack remaining.
63  	 */
64  	int check_each_pop;
65  	int entries;			/* number of entries left to interpret */
66  	int byte;			/* current byte number in the instructions word */
67  };
68  
69  enum regs {
70  #ifdef CONFIG_THUMB2_KERNEL
71  	FP = 7,
72  #else
73  	FP = 11,
74  #endif
75  	SP = 13,
76  	LR = 14,
77  	PC = 15
78  };
79  
80  extern const struct unwind_idx __start_unwind_idx[];
81  static const struct unwind_idx *__origin_unwind_idx;
82  extern const struct unwind_idx __stop_unwind_idx[];
83  
84  static DEFINE_RAW_SPINLOCK(unwind_lock);
85  static LIST_HEAD(unwind_tables);
86  
87  /* Convert a prel31 symbol to an absolute address */
88  #define prel31_to_addr(ptr)				\
89  ({							\
90  	/* sign-extend to 32 bits */			\
91  	long offset = (((long)*(ptr)) << 1) >> 1;	\
92  	(unsigned long)(ptr) + offset;			\
93  })
94  
95  /*
96   * Binary search in the unwind index. The entries are
97   * guaranteed to be sorted in ascending order by the linker.
98   *
99   * start = first entry
100   * origin = first entry with positive offset (or stop if there is no such entry)
101   * stop - 1 = last entry
102   */
search_index(unsigned long addr,const struct unwind_idx * start,const struct unwind_idx * origin,const struct unwind_idx * stop)103  static const struct unwind_idx *search_index(unsigned long addr,
104  				       const struct unwind_idx *start,
105  				       const struct unwind_idx *origin,
106  				       const struct unwind_idx *stop)
107  {
108  	unsigned long addr_prel31;
109  
110  	pr_debug("%s(%08lx, %p, %p, %p)\n",
111  			__func__, addr, start, origin, stop);
112  
113  	/*
114  	 * only search in the section with the matching sign. This way the
115  	 * prel31 numbers can be compared as unsigned longs.
116  	 */
117  	if (addr < (unsigned long)start)
118  		/* negative offsets: [start; origin) */
119  		stop = origin;
120  	else
121  		/* positive offsets: [origin; stop) */
122  		start = origin;
123  
124  	/* prel31 for address relavive to start */
125  	addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
126  
127  	while (start < stop - 1) {
128  		const struct unwind_idx *mid = start + ((stop - start) >> 1);
129  
130  		/*
131  		 * As addr_prel31 is relative to start an offset is needed to
132  		 * make it relative to mid.
133  		 */
134  		if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
135  				mid->addr_offset)
136  			stop = mid;
137  		else {
138  			/* keep addr_prel31 relative to start */
139  			addr_prel31 -= ((unsigned long)mid -
140  					(unsigned long)start);
141  			start = mid;
142  		}
143  	}
144  
145  	if (likely(start->addr_offset <= addr_prel31))
146  		return start;
147  	else {
148  		pr_warn("unwind: Unknown symbol address %08lx\n", addr);
149  		return NULL;
150  	}
151  }
152  
unwind_find_origin(const struct unwind_idx * start,const struct unwind_idx * stop)153  static const struct unwind_idx *unwind_find_origin(
154  		const struct unwind_idx *start, const struct unwind_idx *stop)
155  {
156  	pr_debug("%s(%p, %p)\n", __func__, start, stop);
157  	while (start < stop) {
158  		const struct unwind_idx *mid = start + ((stop - start) >> 1);
159  
160  		if (mid->addr_offset >= 0x40000000)
161  			/* negative offset */
162  			start = mid + 1;
163  		else
164  			/* positive offset */
165  			stop = mid;
166  	}
167  	pr_debug("%s -> %p\n", __func__, stop);
168  	return stop;
169  }
170  
unwind_find_idx(unsigned long addr)171  static const struct unwind_idx *unwind_find_idx(unsigned long addr)
172  {
173  	const struct unwind_idx *idx = NULL;
174  	unsigned long flags;
175  
176  	pr_debug("%s(%08lx)\n", __func__, addr);
177  
178  	if (core_kernel_text(addr)) {
179  		if (unlikely(!__origin_unwind_idx))
180  			__origin_unwind_idx =
181  				unwind_find_origin(__start_unwind_idx,
182  						__stop_unwind_idx);
183  
184  		/* main unwind table */
185  		idx = search_index(addr, __start_unwind_idx,
186  				   __origin_unwind_idx,
187  				   __stop_unwind_idx);
188  	} else {
189  		/* module unwind tables */
190  		struct unwind_table *table;
191  
192  		raw_spin_lock_irqsave(&unwind_lock, flags);
193  		list_for_each_entry(table, &unwind_tables, list) {
194  			if (addr >= table->begin_addr &&
195  			    addr < table->end_addr) {
196  				idx = search_index(addr, table->start,
197  						   table->origin,
198  						   table->stop);
199  				/* Move-to-front to exploit common traces */
200  				list_move(&table->list, &unwind_tables);
201  				break;
202  			}
203  		}
204  		raw_spin_unlock_irqrestore(&unwind_lock, flags);
205  	}
206  
207  	pr_debug("%s: idx = %p\n", __func__, idx);
208  	return idx;
209  }
210  
unwind_get_byte(struct unwind_ctrl_block * ctrl)211  static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
212  {
213  	unsigned long ret;
214  
215  	if (ctrl->entries <= 0) {
216  		pr_warn("unwind: Corrupt unwind table\n");
217  		return 0;
218  	}
219  
220  	ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
221  
222  	if (ctrl->byte == 0) {
223  		ctrl->insn++;
224  		ctrl->entries--;
225  		ctrl->byte = 3;
226  	} else
227  		ctrl->byte--;
228  
229  	return ret;
230  }
231  
232  /* Before poping a register check whether it is feasible or not */
unwind_pop_register(struct unwind_ctrl_block * ctrl,unsigned long ** vsp,unsigned int reg)233  static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
234  				unsigned long **vsp, unsigned int reg)
235  {
236  	if (unlikely(ctrl->check_each_pop))
237  		if (*vsp >= (unsigned long *)ctrl->sp_high)
238  			return -URC_FAILURE;
239  
240  	/* Use READ_ONCE_NOCHECK here to avoid this memory access
241  	 * from being tracked by KASAN.
242  	 */
243  	ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp));
244  	if (reg == 14)
245  		ctrl->lr_addr = *vsp;
246  	(*vsp)++;
247  	return URC_OK;
248  }
249  
250  /* Helper functions to execute the instructions */
unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block * ctrl,unsigned long mask)251  static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
252  						unsigned long mask)
253  {
254  	unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
255  	int load_sp, reg = 4;
256  
257  	load_sp = mask & (1 << (13 - 4));
258  	while (mask) {
259  		if (mask & 1)
260  			if (unwind_pop_register(ctrl, &vsp, reg))
261  				return -URC_FAILURE;
262  		mask >>= 1;
263  		reg++;
264  	}
265  	if (!load_sp) {
266  		ctrl->vrs[SP] = (unsigned long)vsp;
267  	}
268  
269  	return URC_OK;
270  }
271  
unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block * ctrl,unsigned long insn)272  static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
273  					unsigned long insn)
274  {
275  	unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
276  	int reg;
277  
278  	/* pop R4-R[4+bbb] */
279  	for (reg = 4; reg <= 4 + (insn & 7); reg++)
280  		if (unwind_pop_register(ctrl, &vsp, reg))
281  				return -URC_FAILURE;
282  
283  	if (insn & 0x8)
284  		if (unwind_pop_register(ctrl, &vsp, 14))
285  				return -URC_FAILURE;
286  
287  	ctrl->vrs[SP] = (unsigned long)vsp;
288  
289  	return URC_OK;
290  }
291  
unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block * ctrl,unsigned long mask)292  static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
293  						unsigned long mask)
294  {
295  	unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
296  	int reg = 0;
297  
298  	/* pop R0-R3 according to mask */
299  	while (mask) {
300  		if (mask & 1)
301  			if (unwind_pop_register(ctrl, &vsp, reg))
302  				return -URC_FAILURE;
303  		mask >>= 1;
304  		reg++;
305  	}
306  	ctrl->vrs[SP] = (unsigned long)vsp;
307  
308  	return URC_OK;
309  }
310  
unwind_decode_uleb128(struct unwind_ctrl_block * ctrl)311  static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
312  {
313  	unsigned long bytes = 0;
314  	unsigned long insn;
315  	unsigned long result = 0;
316  
317  	/*
318  	 * unwind_get_byte() will advance `ctrl` one instruction at a time, so
319  	 * loop until we get an instruction byte where bit 7 is not set.
320  	 *
321  	 * Note: This decodes a maximum of 4 bytes to output 28 bits data where
322  	 * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
323  	 * it is sufficient for unwinding the stack.
324  	 */
325  	do {
326  		insn = unwind_get_byte(ctrl);
327  		result |= (insn & 0x7f) << (bytes * 7);
328  		bytes++;
329  	} while (!!(insn & 0x80) && (bytes != sizeof(result)));
330  
331  	return result;
332  }
333  
334  /*
335   * Execute the current unwind instruction.
336   */
unwind_exec_insn(struct unwind_ctrl_block * ctrl)337  static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
338  {
339  	unsigned long insn = unwind_get_byte(ctrl);
340  	int ret = URC_OK;
341  
342  	pr_debug("%s: insn = %08lx\n", __func__, insn);
343  
344  	if ((insn & 0xc0) == 0x00)
345  		ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
346  	else if ((insn & 0xc0) == 0x40) {
347  		ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
348  	} else if ((insn & 0xf0) == 0x80) {
349  		unsigned long mask;
350  
351  		insn = (insn << 8) | unwind_get_byte(ctrl);
352  		mask = insn & 0x0fff;
353  		if (mask == 0) {
354  			pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n",
355  				insn);
356  			return -URC_FAILURE;
357  		}
358  
359  		ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask);
360  		if (ret)
361  			goto error;
362  	} else if ((insn & 0xf0) == 0x90 &&
363  		   (insn & 0x0d) != 0x0d) {
364  		ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
365  	} else if ((insn & 0xf0) == 0xa0) {
366  		ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
367  		if (ret)
368  			goto error;
369  	} else if (insn == 0xb0) {
370  		if (ctrl->vrs[PC] == 0)
371  			ctrl->vrs[PC] = ctrl->vrs[LR];
372  		/* no further processing */
373  		ctrl->entries = 0;
374  	} else if (insn == 0xb1) {
375  		unsigned long mask = unwind_get_byte(ctrl);
376  
377  		if (mask == 0 || mask & 0xf0) {
378  			pr_warn("unwind: Spare encoding %04lx\n",
379  				(insn << 8) | mask);
380  			return -URC_FAILURE;
381  		}
382  
383  		ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask);
384  		if (ret)
385  			goto error;
386  	} else if (insn == 0xb2) {
387  		unsigned long uleb128 = unwind_decode_uleb128(ctrl);
388  
389  		ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
390  	} else {
391  		pr_warn("unwind: Unhandled instruction %02lx\n", insn);
392  		return -URC_FAILURE;
393  	}
394  
395  	pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
396  		 ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
397  
398  error:
399  	return ret;
400  }
401  
402  /*
403   * Unwind a single frame starting with *sp for the symbol at *pc. It
404   * updates the *pc and *sp with the new values.
405   */
unwind_frame(struct stackframe * frame)406  int unwind_frame(struct stackframe *frame)
407  {
408  	const struct unwind_idx *idx;
409  	struct unwind_ctrl_block ctrl;
410  	unsigned long sp_low;
411  
412  	/* store the highest address on the stack to avoid crossing it*/
413  	sp_low = frame->sp;
414  	ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN)
415  		       + THREAD_SIZE;
416  
417  	pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
418  		 frame->pc, frame->lr, frame->sp);
419  
420  	idx = unwind_find_idx(frame->pc);
421  	if (!idx) {
422  		if (frame->pc && kernel_text_address(frame->pc)) {
423  			if (in_module_plt(frame->pc) && frame->pc != frame->lr) {
424  				/*
425  				 * Quoting Ard: Veneers only set PC using a
426  				 * PC+immediate LDR, and so they don't affect
427  				 * the state of the stack or the register file
428  				 */
429  				frame->pc = frame->lr;
430  				return URC_OK;
431  			}
432  			pr_warn("unwind: Index not found %08lx\n", frame->pc);
433  		}
434  		return -URC_FAILURE;
435  	}
436  
437  	ctrl.vrs[FP] = frame->fp;
438  	ctrl.vrs[SP] = frame->sp;
439  	ctrl.vrs[LR] = frame->lr;
440  	ctrl.vrs[PC] = 0;
441  
442  	if (idx->insn == 1)
443  		/* can't unwind */
444  		return -URC_FAILURE;
445  	else if (frame->pc == prel31_to_addr(&idx->addr_offset)) {
446  		/*
447  		 * Unwinding is tricky when we're halfway through the prologue,
448  		 * since the stack frame that the unwinder expects may not be
449  		 * fully set up yet. However, one thing we do know for sure is
450  		 * that if we are unwinding from the very first instruction of
451  		 * a function, we are still effectively in the stack frame of
452  		 * the caller, and the unwind info has no relevance yet.
453  		 */
454  		if (frame->pc == frame->lr)
455  			return -URC_FAILURE;
456  		frame->pc = frame->lr;
457  		return URC_OK;
458  	} else if ((idx->insn & 0x80000000) == 0)
459  		/* prel31 to the unwind table */
460  		ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
461  	else if ((idx->insn & 0xff000000) == 0x80000000)
462  		/* only personality routine 0 supported in the index */
463  		ctrl.insn = &idx->insn;
464  	else {
465  		pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n",
466  			idx->insn, idx);
467  		return -URC_FAILURE;
468  	}
469  
470  	/* check the personality routine */
471  	if ((*ctrl.insn & 0xff000000) == 0x80000000) {
472  		ctrl.byte = 2;
473  		ctrl.entries = 1;
474  	} else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
475  		ctrl.byte = 1;
476  		ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
477  	} else {
478  		pr_warn("unwind: Unsupported personality routine %08lx at %p\n",
479  			*ctrl.insn, ctrl.insn);
480  		return -URC_FAILURE;
481  	}
482  
483  	ctrl.check_each_pop = 0;
484  
485  	if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) {
486  		/*
487  		 * call_with_stack() is the only place where we permit SP to
488  		 * jump from one stack to another, and since we know it is
489  		 * guaranteed to happen, set up the SP bounds accordingly.
490  		 */
491  		sp_low = frame->fp;
492  		ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE);
493  	}
494  
495  	while (ctrl.entries > 0) {
496  		int urc;
497  		if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
498  			ctrl.check_each_pop = 1;
499  		urc = unwind_exec_insn(&ctrl);
500  		if (urc < 0)
501  			return urc;
502  		if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high)
503  			return -URC_FAILURE;
504  	}
505  
506  	if (ctrl.vrs[PC] == 0)
507  		ctrl.vrs[PC] = ctrl.vrs[LR];
508  
509  	/* check for infinite loop */
510  	if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP])
511  		return -URC_FAILURE;
512  
513  	frame->fp = ctrl.vrs[FP];
514  	frame->sp = ctrl.vrs[SP];
515  	frame->lr = ctrl.vrs[LR];
516  	frame->pc = ctrl.vrs[PC];
517  	frame->lr_addr = ctrl.lr_addr;
518  
519  	return URC_OK;
520  }
521  
unwind_backtrace(struct pt_regs * regs,struct task_struct * tsk,const char * loglvl)522  void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
523  		      const char *loglvl)
524  {
525  	struct stackframe frame;
526  
527  	printk("%sCall trace: ", loglvl);
528  
529  	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
530  
531  	if (!tsk)
532  		tsk = current;
533  
534  	if (regs) {
535  		arm_get_current_stackframe(regs, &frame);
536  		/* PC might be corrupted, use LR in that case. */
537  		if (!kernel_text_address(regs->ARM_pc))
538  			frame.pc = regs->ARM_lr;
539  	} else if (tsk == current) {
540  		frame.fp = (unsigned long)__builtin_frame_address(0);
541  		frame.sp = current_stack_pointer;
542  		frame.lr = (unsigned long)__builtin_return_address(0);
543  		/* We are saving the stack and execution state at this
544  		 * point, so we should ensure that frame.pc is within
545  		 * this block of code.
546  		 */
547  here:
548  		frame.pc = (unsigned long)&&here;
549  	} else {
550  		/* task blocked in __switch_to */
551  		frame.fp = thread_saved_fp(tsk);
552  		frame.sp = thread_saved_sp(tsk);
553  		/*
554  		 * The function calling __switch_to cannot be a leaf function
555  		 * so LR is recovered from the stack.
556  		 */
557  		frame.lr = 0;
558  		frame.pc = thread_saved_pc(tsk);
559  	}
560  
561  	while (1) {
562  		int urc;
563  		unsigned long where = frame.pc;
564  
565  		urc = unwind_frame(&frame);
566  		if (urc < 0)
567  			break;
568  		dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
569  	}
570  }
571  
unwind_table_add(unsigned long start,unsigned long size,unsigned long text_addr,unsigned long text_size)572  struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
573  				      unsigned long text_addr,
574  				      unsigned long text_size)
575  {
576  	unsigned long flags;
577  	struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
578  
579  	pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
580  		 text_addr, text_size);
581  
582  	if (!tab)
583  		return tab;
584  
585  	tab->start = (const struct unwind_idx *)start;
586  	tab->stop = (const struct unwind_idx *)(start + size);
587  	tab->origin = unwind_find_origin(tab->start, tab->stop);
588  	tab->begin_addr = text_addr;
589  	tab->end_addr = text_addr + text_size;
590  
591  	raw_spin_lock_irqsave(&unwind_lock, flags);
592  	list_add_tail(&tab->list, &unwind_tables);
593  	raw_spin_unlock_irqrestore(&unwind_lock, flags);
594  
595  	return tab;
596  }
597  
unwind_table_del(struct unwind_table * tab)598  void unwind_table_del(struct unwind_table *tab)
599  {
600  	unsigned long flags;
601  
602  	if (!tab)
603  		return;
604  
605  	raw_spin_lock_irqsave(&unwind_lock, flags);
606  	list_del(&tab->list);
607  	raw_spin_unlock_irqrestore(&unwind_lock, flags);
608  
609  	kfree(tab);
610  }
611