/linux-6.12.1/include/linux/ |
D | stackdepot.h | 3 * Stack depot - a stack trace storage that avoids duplication. 5 * Stack depot is intended to be used by subsystems that need to store and 6 * later retrieve many potentially duplicated stack traces without wasting 9 * For example, KASAN needs to save allocation and free stack traces for each 10 * object. Storing two stack traces per object requires a lot of memory (e.g. 12 * stack traces often repeat, using stack depot allows to save about 100x space. 28 * Number of bits in the handle that stack depot doesn't use. Users may store 43 /* Compact structure that stores a reference to a stack. */ 64 * only place a stack record onto the freelist iff its 65 * refcount is zero. Because stack records with a zero [all …]
|
/linux-6.12.1/Documentation/arch/x86/ |
D | shstk.rst | 4 Control-flow Enforcement Technology (CET) Shadow Stack 14 CET introduces shadow stack and indirect branch tracking (IBT). A shadow stack 15 is a secondary stack allocated from memory which cannot be directly modified by 17 return address to both the normal stack and the shadow stack. Upon 18 function return, the processor pops the shadow stack copy and compares it 19 to the normal stack copy. If the two differ, the processor raises a 22 Stack and Indirect Branch Tracking. Today in the 64-bit kernel, only userspace 23 shadow stack and kernel IBT are supported. 25 Requirements to use Shadow Stack 28 To use userspace shadow stack you need HW that supports it, a kernel [all …]
|
D | kernel-stacks.rst | 14 Like all other architectures, x86_64 has a kernel stack for every 17 zombie. While the thread is in user space the kernel stack is empty 25 * Interrupt stack. IRQ_STACK_SIZE 29 kernel switches from the current task to the interrupt stack. Like 32 of every per thread stack. 34 The interrupt stack is also used when processing a softirq. 36 Switching to the kernel interrupt stack is done by software based on a 41 to automatically switch to a new stack for designated events such as 43 events on x86_64. This feature is called the Interrupt Stack Table 46 point to dedicated stacks; each stack can be a different size. [all …]
|
/linux-6.12.1/tools/testing/selftests/bpf/progs/ |
D | verifier_subprog_precision.c | 43 __msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7") 44 __msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4") 45 __msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit") 46 __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1") 47 __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5") 48 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6") 49 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") 103 __msg("mark_precise: frame0: regs=r0 stack= before 5: (bf) r1 = r6") 104 __msg("mark_precise: frame0: regs=r0 stack= before 4: (27) r0 *= 4") 105 __msg("mark_precise: frame0: regs=r0 stack= before 3: (57) r0 &= 3") [all …]
|
D | verifier_var_off.c | 36 __description("variable-offset stack read, priv vs unpriv") 38 __msg_unpriv("R2 variable stack access prohibited for !root") 43 /* Fill the top 8 bytes of the stack */ \ in stack_read_priv_vs_unpriv() 55 /* dereference it for a stack read */ \ in stack_read_priv_vs_unpriv() 63 __description("variable-offset stack read, uninitialized") 65 __failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root") 78 /* dereference it for a stack read */ \ in variable_offset_stack_read_uninitialized() 86 __description("variable-offset stack write, priv vs unpriv") 88 /* Check that the maximum stack depth is correctly maintained according to the 91 __log_level(4) __msg("stack depth 16") [all …]
|
D | test_global_func_ctx_args.c | 11 static long stack[256]; variable 19 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in kprobe_typedef_ctx_subprog() 50 return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0); in kprobe_struct_ctx_subprog() 67 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in kprobe_workaround_ctx_subprog() 83 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in raw_tp_ctx_subprog() 99 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in raw_tp_writable_ctx_subprog() 115 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in perf_event_ctx_subprog() 130 return bpf_get_stack(ctx, stack, sizeof(stack), 0); in subprog_ctx_tag() 142 return bpf_get_stack(ctx1, stack, sizeof(stack), 0) + in subprog_multi_ctx_tags() 144 bpf_get_stack(ctx2, stack, sizeof(stack), 0); in subprog_multi_ctx_tags()
|
D | verifier_precision.c | 9 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 10 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2") 11 __msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2") 12 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8") 29 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 30 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") 31 __msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2") 32 __msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0") 50 __msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10") 51 __msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2") [all …]
|
/linux-6.12.1/lib/ |
D | stackdepot.c | 3 * Stack depot - a stack trace storage that avoids duplication. 5 * Internally, stack depot maintains a hash table of unique stacktraces. The 6 * stack traces themselves are stored contiguously one after another in a set 57 /* Hash table of stored stack records. */ 64 /* Array of memory regions that store stack records. */ 72 /* Freelist of stack records within stack_pools. */ 143 * stack traces being stored in stack depot. in stack_depot_early_init() 242 * Initializes new stack pool, and updates the list of pools. 252 WARN_ONCE(1, "Stack depot reached limit capacity"); in depot_init_pool() 269 * Stack depot tries to keep an extra pool allocated even before it runs in depot_init_pool() [all …]
|
/linux-6.12.1/arch/x86/kernel/ |
D | dumpstack_32.c | 38 static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info) in in_hardirq_stack() argument 44 * This is a software stack, so 'end' can be a valid stack pointer. in in_hardirq_stack() 45 * It just means the stack is empty. in in_hardirq_stack() 47 if (stack < begin || stack > end) in in_hardirq_stack() 55 * See irq_32.c -- the next stack pointer is stored at the beginning of in in_hardirq_stack() 56 * the stack. in in_hardirq_stack() 63 static bool in_softirq_stack(unsigned long *stack, struct stack_info *info) in in_softirq_stack() argument 69 * This is a software stack, so 'end' can be a valid stack pointer. in in_softirq_stack() 70 * It just means the stack is empty. in in_softirq_stack() 72 if (stack < begin || stack > end) in in_softirq_stack() [all …]
|
D | dumpstack_64.c | 46 * On 64-bit, we have a generic entry stack that we in stack_type_name() 61 * @offs: Offset from the start of the exception stack area 62 * @size: Size of the exception stack 79 * Array of exception stack page descriptors. If the stack is larger than 80 * PAGE_SIZE, all pages covering a particular stack will have the same 81 * info. The guard pages including the not mapped DB2 stack are zeroed 94 static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info) in in_exception_stack() argument 96 unsigned long begin, end, stk = (unsigned long)stack; in in_exception_stack() 105 * Handle the case where stack trace is collected _before_ in in_exception_stack() 112 /* Bail if @stack is outside the exception stack area. */ in in_exception_stack() [all …]
|
D | dumpstack.c | 32 bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task, in in_task_stack() argument 38 if (stack < begin || stack >= end) in in_task_stack() 50 bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info) in in_entry_stack() argument 57 if ((void *)stack < begin || (void *)stack >= end) in in_entry_stack() 128 /* No access to the user space stack of other tasks. Ignore. */ in show_opcodes() 160 * ordering reasons: if the registers are on the next stack, we don't in show_regs_if_on_stack() 162 * the wrong stack. Later, when show_trace_log_lvl() switches to the in show_regs_if_on_stack() 163 * next stack, this function will be called again with the same regs so in show_regs_if_on_stack() 181 * This function reads pointers from the stack and dereferences them. The 187 unsigned long *stack, const char *log_lvl) in show_trace_log_lvl() argument [all …]
|
/linux-6.12.1/drivers/misc/altera-stapl/ |
D | altera.c | 119 /* This function checks if enough parameters are available on the stack. */ 213 long *stack = astate->stack; in altera_execute() local 528 stack[stack_ptr] = stack[stack_ptr - 1]; in altera_execute() 534 swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); in altera_execute() 539 stack[stack_ptr - 1] += stack[stack_ptr]; in altera_execute() 545 stack[stack_ptr - 1] -= stack[stack_ptr]; in altera_execute() 551 stack[stack_ptr - 1] *= stack[stack_ptr]; in altera_execute() 557 stack[stack_ptr - 1] /= stack[stack_ptr]; in altera_execute() 563 stack[stack_ptr - 1] %= stack[stack_ptr]; in altera_execute() 569 stack[stack_ptr - 1] <<= stack[stack_ptr]; in altera_execute() [all …]
|
/linux-6.12.1/tools/testing/selftests/bpf/verifier/ |
D | precise.c | 42 mark_precise: frame0: regs=r2 stack= before 25\ 43 mark_precise: frame0: regs=r2 stack= before 24\ 44 mark_precise: frame0: regs=r2 stack= before 23\ 45 mark_precise: frame0: regs=r2 stack= before 22\ 46 mark_precise: frame0: regs=r2 stack= before 20\ 47 mark_precise: frame0: parent state regs=r2,r9 stack=:\ 49 mark_precise: frame0: regs=r2,r9 stack= before 19\ 50 mark_precise: frame0: regs=r9 stack= before 18\ 51 mark_precise: frame0: regs=r8,r9 stack= before 17\ 52 mark_precise: frame0: regs=r0,r9 stack= before 15\ [all …]
|
/linux-6.12.1/Documentation/mm/ |
D | vmalloced-kernel-stacks.rst | 4 Virtually Mapped Kernel Stack Support 21 Kernel stack overflows are often hard to debug and make the kernel 25 Virtually mapped kernel stacks with guard pages cause kernel stack 31 causes reliable faults when the stack overflows. The usability of 32 the stack trace after overflow and response to the overflow itself 49 needs to work while the stack points to a virtual address with 51 most likely) needs to ensure that the stack's page table entries 52 are populated before running on a possibly unpopulated stack. 53 - If the stack overflows into a guard page, something reasonable 64 with guard pages. This causes kernel stack overflows to be caught [all …]
|
/linux-6.12.1/arch/x86/entry/ |
D | entry_32.S | 7 * Stack layout while running C code: 8 * ptrace needs to have all registers on the stack. 123 * When we're here from kernel mode; the (exception) stack looks like: 171 * so any attempt to access the stack needs to use SS. (except for 180 * middle doesn't scribble our stack. 233 /* Switch to kernel stack if necessary */ 325 * Setup and switch to ESPFIX stack 327 * We're returning to userspace with a 16 bit stack. The CPU will not 361 * entry-stack, it will overwrite the task-stack and everything we 362 * copied there. So allocate the stack-frame on the task-stack and [all …]
|
/linux-6.12.1/kernel/ |
D | stackleak.c | 3 * This code fills the used part of the kernel stack with a poison value 9 * STACKLEAK reduces the information which kernel stack leak bugs can 10 * reveal and blocks some uninitialized stack variable attacks. 43 pr_warn("stackleak: kernel stack erasing is %s\n", in stack_erasing_sysctl() 98 * Write poison to the task's stack between 'erase_low' and in __stackleak_erase() 101 * If we're running on a different stack (e.g. an entry trampoline in __stackleak_erase() 102 * stack) we can erase everything below the pt_regs at the top of the in __stackleak_erase() 103 * task stack. in __stackleak_erase() 105 * If we're running on the task stack itself, we must not clobber any in __stackleak_erase() 106 * stack used by this function and its caller. We assume that this in __stackleak_erase() [all …]
|
D | stacktrace.c | 5 * Stack trace management functions 19 * stack_trace_print - Print the entries in the stack trace 38 * stack_trace_snprint - Print the entries in the stack trace into a buffer 105 * stack_trace_save - Save a stack trace into a storage array 108 * @skipnr: Number of entries to skip at the start of the stack trace 128 * stack_trace_save_tsk - Save a task stack trace into a storage array 132 * @skipnr: Number of entries to skip at the start of the stack trace 157 * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array 161 * @skipnr: Number of entries to skip at the start of the stack trace 181 * stack_trace_save_tsk_reliable - Save task stack with verification [all …]
|
/linux-6.12.1/security/ |
D | Kconfig.hardening | 8 stack variable initializations, this warning is silenced for 38 prompt "Initialize kernel stack variables at function entry" 44 This option enables initialization of stack variables at 56 bool "no automatic stack variable initialization (weakest)" 58 Disable automatic stack variable initialization. 60 classes of uninitialized stack variable exploits 69 Zero-initialize any structures on the stack containing 71 uninitialized stack variable exploits and information 82 Zero-initialize any structures on the stack that may 85 of uninitialized stack variable exploits and information [all …]
|
/linux-6.12.1/drivers/misc/lkdtm/ |
D | stackleak.c | 3 * This code tests that the current task stack is properly erased (filled 16 * Check that stackleak tracks the lowest stack pointer and erases the stack 19 * To prevent the lowest stack pointer changing during the test, IRQs are 21 * compiler will create a fixed-size stack frame for this function. 23 * Any non-inlined function may make further use of the stack, altering the 24 * lowest stack pointer and/or clobbering poison values. To avoid spurious 40 * Check that the current and lowest recorded stack pointer values fall in check_stackleak_irqoff() 41 * within the expected task stack boundaries. These tests should never in check_stackleak_irqoff() 47 pr_err("FAIL: current_stack_pointer (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", in check_stackleak_irqoff() 54 pr_err("FAIL: current->lowest_stack (0x%lx) outside of task stack bounds [0x%lx..0x%lx]\n", in check_stackleak_irqoff() [all …]
|
/linux-6.12.1/arch/openrisc/kernel/ |
D | unwinder.c | 28 * the frame pointer should point to a location in the stack after the 40 * Create a stack trace doing scanning which is frame pointer aware. We can 41 * get reliable stack traces by matching the previously found frame 42 * pointer with the top of the stack address every time we find a valid 45 * Ideally the stack parameter will be passed as FP, but it can not be 49 * The OpenRISC stack frame looks something like the following. The 53 * SP -> (top of stack) 58 * FP -> (previous top of stack) / 60 void unwind_stack(void *data, unsigned long *stack, in unwind_stack() argument 67 while (!kstack_end(stack)) { in unwind_stack() [all …]
|
/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/ |
D | build_id.c | 10 static void print_stack(struct bpf_stack_build_id *stack, int frame_cnt) in print_stack() argument 16 switch (stack[i].status) { in print_stack() 23 printf("%02hhx", (unsigned)stack[i].build_id[j]); in print_stack() 24 printf(" OFFSET = %llx", (unsigned long long)stack[i].offset); in print_stack() 27 printf("IP = %llx", (unsigned long long)stack[i].ip); in print_stack() 30 printf("UNEXPECTED STATUS %d ", stack[i].status); in print_stack() 40 struct bpf_stack_build_id *stack; in subtest_nofault() local 59 stack = skel->bss->stack_nofault; in subtest_nofault() 62 print_stack(stack, frame_cnt); in subtest_nofault() 65 ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status"); in subtest_nofault() [all …]
|
/linux-6.12.1/kernel/trace/ |
D | trace_stack.c | 58 * The stack tracer looks for a maximum stack at each call from a function. It 59 * registers a callback from ftrace, and in that callback it examines the stack 60 * size. It determines the stack size from the variable passed in, which is the 62 * The stack size is calculated by the address of the local variable to the top 63 * of the current stack. If that size is smaller than the currently saved max 64 * stack size, nothing more is done. 66 * If the size of the stack is greater than the maximum recorded size, then the 70 * saving the function's local variables, the stack will look something like 73 * [ top of stack ] 80 * 31: [ do trace stack here ] [all …]
|
/linux-6.12.1/arch/arm64/include/asm/stacktrace/ |
D | common.h | 3 * Common arm64 stack unwinder code. 25 * @stack: The stack currently being unwound. 33 struct stack_info stack; member 60 state->stack = stackinfo_get_unknown(); in unwind_init_common() 78 * unwind_consume_stack() - Check if an object is on an accessible stack, 79 * updating stack boundaries so that future unwind steps cannot consume this 94 if (stackinfo_on_stack(&state->stack, sp, size)) in unwind_consume_stack() 102 * Stack transitions are strictly one-way, and once we've in unwind_consume_stack() 103 * transitioned from one stack to another, it's never valid to in unwind_consume_stack() 104 * unwind back to the old stack. in unwind_consume_stack() [all …]
|
/linux-6.12.1/include/linux/sched/ |
D | task_stack.h | 6 * task->stack (kernel stack) handling interfaces: 17 * When accessing the stack of a non-current task that might exit, use 23 return task->stack; in task_stack_page() 31 return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; in end_of_stack() 33 return task->stack; in end_of_stack() 39 #define task_stack_page(task) ((void *)(task)->stack) 48 * Return the address of the last usable long on the stack. 50 * When the stack grows down, this is just above the thread 53 * When the stack grows up, this is the highest address. 91 void *stack = task_stack_page(current); in object_is_on_stack() local [all …]
|
/linux-6.12.1/tools/testing/selftests/mm/ |
D | pkey_sighandler_tests.c | 6 * using an alternate signal stack, with the default pkey (pkey 0) disabled. 128 /* After we disable MPK 0, we can't access the stack to return */ in thread_segv_pkuerr_stack() 134 stack_t *stack = ptr; in thread_segv_maperr_ptr() local 138 * Setup alternate signal stack, which should be pkey_mprotect()ed by in thread_segv_maperr_ptr() 139 * MPK 0. The thread's stack cannot be used for signals because it is in thread_segv_maperr_ptr() 142 syscall_raw(SYS_sigaltstack, (long)stack, 0, 0, 0, 0, 0); in thread_segv_maperr_ptr() 155 * Note that the new thread stack and the alternate signal stack is 193 * Note that the new thread stack and the alternate signal stack is 231 * Verify that the sigsegv handler that uses an alternate signal stack 233 * its own stack, and disables all other MPKs (including 0). [all …]
|