Home
last modified time | relevance | path

Searched refs:stack (Results 1 – 25 of 888) sorted by relevance

12345678910>>...36

/linux-6.12.1/lib/
Dstackdepot.c312 struct stack_record *stack; in depot_pop_free_pool() local
330 stack = current_pool + pool_offset; in depot_pop_free_pool()
333 stack->handle.pool_index_plus_1 = pool_index + 1; in depot_pop_free_pool()
334 stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN; in depot_pop_free_pool()
335 stack->handle.extra = 0; in depot_pop_free_pool()
336 INIT_LIST_HEAD(&stack->hash_list); in depot_pop_free_pool()
340 return stack; in depot_pop_free_pool()
346 struct stack_record *stack; in depot_pop_free() local
359 stack = list_first_entry(&free_stacks, struct stack_record, free_list); in depot_pop_free()
360 if (!poll_state_synchronize_rcu(stack->rcu_state)) in depot_pop_free()
[all …]
/linux-6.12.1/tools/testing/selftests/bpf/progs/
Dtest_global_func_ctx_args.c11 static long stack[256]; variable
19 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in kprobe_typedef_ctx_subprog()
50 return bpf_get_stack((void *)ctx, &stack, sizeof(stack), 0); in kprobe_struct_ctx_subprog()
67 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in kprobe_workaround_ctx_subprog()
83 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in raw_tp_ctx_subprog()
99 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in raw_tp_writable_ctx_subprog()
115 return bpf_get_stack(ctx, &stack, sizeof(stack), 0); in perf_event_ctx_subprog()
130 return bpf_get_stack(ctx, stack, sizeof(stack), 0); in subprog_ctx_tag()
142 return bpf_get_stack(ctx1, stack, sizeof(stack), 0) + in subprog_multi_ctx_tags()
144 bpf_get_stack(ctx2, stack, sizeof(stack), 0); in subprog_multi_ctx_tags()
/linux-6.12.1/drivers/misc/altera-stapl/
Daltera.c213 long *stack = astate->stack; in altera_execute() local
528 stack[stack_ptr] = stack[stack_ptr - 1]; in altera_execute()
534 swap(stack[stack_ptr - 2], stack[stack_ptr - 1]); in altera_execute()
539 stack[stack_ptr - 1] += stack[stack_ptr]; in altera_execute()
545 stack[stack_ptr - 1] -= stack[stack_ptr]; in altera_execute()
551 stack[stack_ptr - 1] *= stack[stack_ptr]; in altera_execute()
557 stack[stack_ptr - 1] /= stack[stack_ptr]; in altera_execute()
563 stack[stack_ptr - 1] %= stack[stack_ptr]; in altera_execute()
569 stack[stack_ptr - 1] <<= stack[stack_ptr]; in altera_execute()
575 stack[stack_ptr - 1] >>= stack[stack_ptr]; in altera_execute()
[all …]
/linux-6.12.1/arch/x86/kernel/
Ddumpstack_32.c38 static bool in_hardirq_stack(unsigned long *stack, struct stack_info *info) in in_hardirq_stack() argument
47 if (stack < begin || stack > end) in in_hardirq_stack()
63 static bool in_softirq_stack(unsigned long *stack, struct stack_info *info) in in_softirq_stack() argument
72 if (stack < begin || stack > end) in in_softirq_stack()
88 static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info) in in_doublefault_stack() argument
93 void *begin = ss->stack; in in_doublefault_stack()
94 void *end = begin + sizeof(ss->stack); in in_doublefault_stack()
96 if ((void *)stack < begin || (void *)stack >= end) in in_doublefault_stack()
108 int get_stack_info(unsigned long *stack, struct task_struct *task, in get_stack_info() argument
111 if (!stack) in get_stack_info()
[all …]
Ddumpstack.c32 bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task, in in_task_stack() argument
38 if (stack < begin || stack >= end) in in_task_stack()
50 bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info) in in_entry_stack() argument
57 if ((void *)stack < begin || (void *)stack >= end) in in_entry_stack()
187 unsigned long *stack, const char *log_lvl) in show_trace_log_lvl() argument
197 unwind_start(&state, task, regs, stack); in show_trace_log_lvl()
216 for (stack = stack ?: get_stack_pointer(task, regs); in show_trace_log_lvl()
217 stack; in show_trace_log_lvl()
218 stack = stack_info.next_sp) { in show_trace_log_lvl()
221 stack = PTR_ALIGN(stack, sizeof(long)); in show_trace_log_lvl()
[all …]
Ddumpstack_64.c94 static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info) in in_exception_stack() argument
96 unsigned long begin, end, stk = (unsigned long)stack; in in_exception_stack()
135 static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info) in in_irq_stack() argument
153 if (stack < begin || stack >= end) in in_irq_stack()
170 bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task, in get_stack_info_noinstr() argument
173 if (in_task_stack(stack, task, info)) in get_stack_info_noinstr()
179 if (in_exception_stack(stack, info)) in get_stack_info_noinstr()
182 if (in_irq_stack(stack, info)) in get_stack_info_noinstr()
185 if (in_entry_stack(stack, info)) in get_stack_info_noinstr()
191 int get_stack_info(unsigned long *stack, struct task_struct *task, in get_stack_info() argument
[all …]
/linux-6.12.1/tools/testing/selftests/bpf/prog_tests/
Dbuild_id.c10 static void print_stack(struct bpf_stack_build_id *stack, int frame_cnt) in print_stack() argument
16 switch (stack[i].status) { in print_stack()
23 printf("%02hhx", (unsigned)stack[i].build_id[j]); in print_stack()
24 printf(" OFFSET = %llx", (unsigned long long)stack[i].offset); in print_stack()
27 printf("IP = %llx", (unsigned long long)stack[i].ip); in print_stack()
30 printf("UNEXPECTED STATUS %d ", stack[i].status); in print_stack()
40 struct bpf_stack_build_id *stack; in subtest_nofault() local
59 stack = skel->bss->stack_nofault; in subtest_nofault()
62 print_stack(stack, frame_cnt); in subtest_nofault()
65 ASSERT_EQ(stack[0].status, BPF_STACK_BUILD_ID_VALID, "build_id_status"); in subtest_nofault()
[all …]
/linux-6.12.1/Documentation/arch/x86/
Dshstk.rst14 CET introduces shadow stack and indirect branch tracking (IBT). A shadow stack
15 is a secondary stack allocated from memory which cannot be directly modified by
17 return address to both the normal stack and the shadow stack. Upon
18 function return, the processor pops the shadow stack copy and compares it
19 to the normal stack copy. If the two differ, the processor raises a
23 shadow stack and kernel IBT are supported.
28 To use userspace shadow stack you need HW that supports it, a kernel
34 To build a user shadow stack enabled kernel, Binutils v2.29 or LLVM v6 or later
38 CET. "user_shstk" means that userspace shadow stack is supported on the current
98 ARCH_SHSTK_SHSTK - Shadow stack
[all …]
Dkernel-stacks.rst14 Like all other architectures, x86_64 has a kernel stack for every
17 zombie. While the thread is in user space the kernel stack is empty
25 * Interrupt stack. IRQ_STACK_SIZE
29 kernel switches from the current task to the interrupt stack. Like
32 of every per thread stack.
34 The interrupt stack is also used when processing a softirq.
36 Switching to the kernel interrupt stack is done by software based on a
41 to automatically switch to a new stack for designated events such as
46 point to dedicated stacks; each stack can be a different size.
50 loads such a descriptor, the hardware automatically sets the new stack
[all …]
/linux-6.12.1/arch/s390/kernel/
Ddumpstack.c44 enum stack_type type, unsigned long stack) in in_stack() argument
46 if (sp < stack || sp >= stack + THREAD_SIZE) in in_stack()
49 info->begin = stack; in in_stack()
50 info->end = stack + THREAD_SIZE; in in_stack()
57 unsigned long stack = (unsigned long)task_stack_page(task); in in_task_stack() local
59 return in_stack(sp, info, STACK_TYPE_TASK, stack); in in_task_stack()
64 unsigned long stack = get_lowcore()->async_stack - STACK_INIT_OFFSET; in in_irq_stack() local
66 return in_stack(sp, info, STACK_TYPE_IRQ, stack); in in_irq_stack()
71 unsigned long stack = get_lowcore()->nodat_stack - STACK_INIT_OFFSET; in in_nodat_stack() local
73 return in_stack(sp, info, STACK_TYPE_NODAT, stack); in in_nodat_stack()
[all …]
/linux-6.12.1/arch/um/kernel/skas/
Dmmu.c25 unsigned long stack = 0; in init_new_context() local
28 stack = __get_free_pages(GFP_KERNEL | __GFP_ZERO, ilog2(STUB_DATA_PAGES)); in init_new_context()
29 if (stack == 0) in init_new_context()
32 new_id->stack = stack; in init_new_context()
35 new_id->pid = start_userspace(stack); in init_new_context()
70 if (new_id->stack != 0) in init_new_context()
71 free_pages(new_id->stack, ilog2(STUB_DATA_PAGES)); in init_new_context()
93 free_pages(mmu->id.stack, ilog2(STUB_DATA_PAGES)); in destroy_context()
/linux-6.12.1/Documentation/mm/
Dvmalloced-kernel-stacks.rst21 Kernel stack overflows are often hard to debug and make the kernel
25 Virtually mapped kernel stacks with guard pages cause kernel stack
31 causes reliable faults when the stack overflows. The usability of
32 the stack trace after overflow and response to the overflow itself
49 needs to work while the stack points to a virtual address with
51 most likely) needs to ensure that the stack's page table entries
52 are populated before running on a possibly unpopulated stack.
53 - If the stack overflows into a guard page, something reasonable
64 with guard pages. This causes kernel stack overflows to be caught
75 VMAP_STACK is enabled, it is not possible to run DMA on stack
[all …]
/linux-6.12.1/arch/nios2/kernel/
Dtraps.c60 void show_stack(struct task_struct *task, unsigned long *stack, in show_stack() argument
66 if (!stack) { in show_stack()
68 stack = (unsigned long *)task->thread.ksp; in show_stack()
70 stack = (unsigned long *)&stack; in show_stack()
73 addr = (unsigned long) stack; in show_stack()
76 printk("%sStack from %08lx:", loglvl, (unsigned long)stack); in show_stack()
78 if (stack + 1 > endstack) in show_stack()
82 printk("%s %08lx", loglvl, *stack++); in show_stack()
87 while (stack + 1 <= endstack) { in show_stack()
88 addr = *stack++; in show_stack()
/linux-6.12.1/arch/um/os-Linux/
Dhelper.c46 unsigned long stack, sp; in run_helper() local
49 stack = alloc_stack(0, __uml_cant_sleep()); in run_helper()
50 if (stack == 0) in run_helper()
68 sp = stack + UM_KERN_PAGE_SIZE; in run_helper()
114 free_stack(stack, 0); in run_helper()
121 unsigned long stack, sp; in run_helper_thread() local
124 stack = alloc_stack(0, __uml_cant_sleep()); in run_helper_thread()
125 if (stack == 0) in run_helper_thread()
128 sp = stack + UM_KERN_PAGE_SIZE; in run_helper_thread()
147 free_stack(stack, 0); in run_helper_thread()
[all …]
/linux-6.12.1/arch/openrisc/kernel/
Dunwinder.c60 void unwind_stack(void *data, unsigned long *stack, in unwind_stack() argument
67 while (!kstack_end(stack)) { in unwind_stack()
68 frameinfo = container_of(stack, in unwind_stack()
83 stack++; in unwind_stack()
93 void unwind_stack(void *data, unsigned long *stack, in unwind_stack() argument
98 while (!kstack_end(stack)) { in unwind_stack()
99 addr = *stack++; in unwind_stack()
/linux-6.12.1/mm/kmsan/
Dinit.c150 static void smallstack_push(struct smallstack *stack, struct page *pages) in smallstack_push() argument
152 KMSAN_WARN_ON(stack->index == MAX_BLOCKS); in smallstack_push()
153 stack->items[stack->index] = pages; in smallstack_push()
154 stack->index++; in smallstack_push()
158 static struct page *smallstack_pop(struct smallstack *stack) in smallstack_pop() argument
162 KMSAN_WARN_ON(stack->index == 0); in smallstack_pop()
163 stack->index--; in smallstack_pop()
164 ret = stack->items[stack->index]; in smallstack_pop()
165 stack->items[stack->index] = NULL; in smallstack_pop()
/linux-6.12.1/tools/testing/selftests/vDSO/
Dvdso_standalone_test_x86.c87 void c_main(void **stack) in c_main() argument
90 long argc = (long)*stack; in c_main()
91 stack += argc + 2; in c_main()
94 while(*stack) in c_main()
95 stack++; in c_main()
96 stack++; in c_main()
99 vdso_init_from_auxv((void *)stack); in c_main()
/linux-6.12.1/include/linux/sched/
Dtask_stack.h23 return task->stack; in task_stack_page()
31 return (unsigned long *)((unsigned long)task->stack + THREAD_SIZE) - 1; in end_of_stack()
33 return task->stack; in end_of_stack()
39 #define task_stack_page(task) ((void *)(task)->stack)
91 void *stack = task_stack_page(current); in object_is_on_stack() local
94 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); in object_is_on_stack()
/linux-6.12.1/arch/um/kernel/
Dsysrq.c29 void show_stack(struct task_struct *task, unsigned long *stack, in show_stack() argument
41 if (!stack) in show_stack()
42 stack = get_stack_pointer(task, segv_regs); in show_stack()
46 if (kstack_end(stack)) in show_stack()
50 pr_cont(" %08lx", READ_ONCE_NOCHECK(*stack)); in show_stack()
51 stack++; in show_stack()
/linux-6.12.1/mm/
Dpage_owner.c39 struct stack { struct
41 struct stack *next; argument
43 static struct stack dummy_stack;
44 static struct stack failure_stack;
45 static struct stack *stack_list;
169 struct stack *stack; in add_stack_record_to_list() local
172 stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); in add_stack_record_to_list()
173 if (!stack) { in add_stack_record_to_list()
179 stack->stack_record = stack_record; in add_stack_record_to_list()
180 stack->next = NULL; in add_stack_record_to_list()
[all …]
/linux-6.12.1/tools/perf/scripts/python/
Dstackcollapse.py97 stack = list()
103 stack.append(tidy_function_name(entry['sym']['name'],
108 stack.append(tidy_function_name(param_dict['symbol'],
119 stack.append(comm)
121 stack_string = ';'.join(reversed(stack))
126 for stack in list:
127 print("%s %d" % (stack, lines[stack]))
/linux-6.12.1/tools/testing/selftests/mm/
Dpkey_sighandler_tests.c134 stack_t *stack = ptr; in thread_segv_maperr_ptr() local
142 syscall_raw(SYS_sigaltstack, (long)stack, 0, 0, 0, 0, 0); in thread_segv_maperr_ptr()
239 void *stack; in test_sigsegv_handler_with_different_pkey_for_stack() local
254 stack = mmap(0, STACK_SIZE, PROT_READ | PROT_WRITE, in test_sigsegv_handler_with_different_pkey_for_stack()
257 assert(stack != MAP_FAILED); in test_sigsegv_handler_with_different_pkey_for_stack()
264 pkey_mprotect(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey); in test_sigsegv_handler_with_different_pkey_for_stack()
280 (long) ((char *)(stack) + STACK_SIZE), in test_sigsegv_handler_with_different_pkey_for_stack()
349 stack_t *stack = ptr; in thread_sigusr2_self() local
356 syscall(SYS_sigaltstack, (long)stack, 0, 0, 0, 0, 0); in thread_sigusr2_self()
383 void *stack; in test_pkru_sigreturn() local
[all …]
/linux-6.12.1/arch/x86/include/asm/
Dstacktrace.h31 bool in_task_stack(unsigned long *stack, struct task_struct *task,
34 bool in_entry_stack(unsigned long *stack, struct stack_info *info);
36 int get_stack_info(unsigned long *stack, struct task_struct *task,
38 bool get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
42 bool get_stack_guard_info(unsigned long *stack, struct stack_info *info) in get_stack_guard_info() argument
45 if (get_stack_info_noinstr(stack, current, info)) in get_stack_guard_info()
48 return get_stack_info_noinstr((void *)stack + PAGE_SIZE, current, info); in get_stack_guard_info()
/linux-6.12.1/scripts/kconfig/
Dsymbol.c1054 static void dep_stack_insert(struct dep_stack *stack, struct symbol *sym) in dep_stack_insert() argument
1056 memset(stack, 0, sizeof(*stack)); in dep_stack_insert()
1058 check_top->next = stack; in dep_stack_insert()
1059 stack->prev = check_top; in dep_stack_insert()
1060 stack->sym = sym; in dep_stack_insert()
1061 check_top = stack; in dep_stack_insert()
1078 struct dep_stack *stack; in sym_check_print_recursive() local
1090 for (stack = check_top; stack != NULL; stack = stack->prev) in sym_check_print_recursive()
1091 if (stack->sym == last_sym) in sym_check_print_recursive()
1093 if (!stack) { in sym_check_print_recursive()
[all …]
/linux-6.12.1/arch/powerpc/kernel/
Dstacktrace.c44 unsigned long *stack = (unsigned long *) sp; in arch_stack_walk() local
50 newsp = stack[0]; in arch_stack_walk()
51 ip = stack[STACK_FRAME_LR_SAVE]; in arch_stack_walk()
96 unsigned long *stack = (unsigned long *) sp; in arch_stack_walk_reliable() local
103 newsp = stack[0]; in arch_stack_walk_reliable()
123 stack[STACK_INT_FRAME_MARKER_LONGS] == STACK_FRAME_REGS_MARKER) { in arch_stack_walk_reliable()
128 ip = stack[STACK_FRAME_LR_SAVE]; in arch_stack_walk_reliable()
136 ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack); in arch_stack_walk_reliable()

12345678910>>...36