Lines Matching +full:idle +full:- +full:halt
1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/sched/idle.h>
21 #include <linux/user-return-notifier.h>
27 #include <linux/elf-randomize.h>
31 #include <linux/entry-common.h>
47 #include <asm/spec-ctrl.h>
59 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
60 * no more per-task TSS's. The TSS size is kept cacheline-aligned
62 * section. Since TSS's are completely CPU-local, we want them
63 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
73 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
97 dst->thread.vm86 = NULL; in arch_dup_task_struct()
100 dst->thread.fpu.fpstate = NULL; in arch_dup_task_struct()
109 fpstate_free(&tsk->thread.fpu); in arch_release_task_struct()
118 struct thread_struct *t = &tsk->thread; in exit_thread()
119 struct fpu *fpu = &t->fpu; in exit_thread()
135 return do_set_thread_area(p, -1, utls, 0); in set_new_tls()
153 regs->ax = 0; in ret_from_fork()
161 unsigned long clone_flags = args->flags; in copy_thread()
162 unsigned long sp = args->stack; in copy_thread()
163 unsigned long tls = args->tls; in copy_thread()
172 frame = &fork_frame->frame; in copy_thread()
174 frame->bp = encode_frame_pointer(childregs); in copy_thread()
175 frame->ret_addr = (unsigned long) ret_from_fork_asm; in copy_thread()
176 p->thread.sp = (unsigned long) fork_frame; in copy_thread()
177 p->thread.io_bitmap = NULL; in copy_thread()
178 p->thread.iopl_warn = 0; in copy_thread()
179 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); in copy_thread()
183 p->thread.fsindex = current->thread.fsindex; in copy_thread()
184 p->thread.fsbase = current->thread.fsbase; in copy_thread()
185 p->thread.gsindex = current->thread.gsindex; in copy_thread()
186 p->thread.gsbase = current->thread.gsbase; in copy_thread()
188 savesegment(es, p->thread.es); in copy_thread()
189 savesegment(ds, p->thread.ds); in copy_thread()
191 if (p->mm && (clone_flags & (CLONE_VM | CLONE_VFORK)) == CLONE_VM) in copy_thread()
192 set_bit(MM_CONTEXT_LOCK_LAM, &p->mm->context.flags); in copy_thread()
194 p->thread.sp0 = (unsigned long) (childregs + 1); in copy_thread()
195 savesegment(gs, p->thread.gs); in copy_thread()
202 frame->flags = X86_EFLAGS_FIXED; in copy_thread()
210 new_ssp = shstk_alloc_thread_stack(p, clone_flags, args->stack_size); in copy_thread()
214 fpu_clone(p, clone_flags, args->fn, new_ssp); in copy_thread()
217 if (unlikely(p->flags & PF_KTHREAD)) { in copy_thread()
218 p->thread.pkru = pkru_get_init_value(); in copy_thread()
220 kthread_frame_init(frame, args->fn, args->fn_arg); in copy_thread()
225 * Clone current's PKRU value from hardware. tsk->thread.pkru in copy_thread()
228 p->thread.pkru = read_pkru(); in copy_thread()
230 frame->bx = 0; in copy_thread()
232 childregs->ax = 0; in copy_thread()
234 childregs->sp = sp; in copy_thread()
236 if (unlikely(args->fn)) { in copy_thread()
247 childregs->sp = 0; in copy_thread()
248 childregs->ip = 0; in copy_thread()
249 kthread_frame_init(frame, args->fn, args->fn_arg); in copy_thread()
277 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); in flush_thread()
326 return -EINVAL; in set_tsc_mode()
378 return -ENODEV; in set_cpuid_mode()
393 /* If cpuid was previously disabled for this task, re-enable it. */ in arch_setup_new_exec()
409 mm_reset_untag_mask(current->mm); in arch_setup_new_exec()
436 memcpy(tss->io_bitmap.bitmap, iobm->bitmap, in tss_copy_io_bitmap()
437 max(tss->io_bitmap.prev_max, iobm->max)); in tss_copy_io_bitmap()
443 tss->io_bitmap.prev_max = iobm->max; in tss_copy_io_bitmap()
444 tss->io_bitmap.prev_sequence = iobm->sequence; in tss_copy_io_bitmap()
448 * native_tss_update_io_bitmap - Update I/O bitmap before exiting to user mode
453 struct thread_struct *t = ¤t->thread; in native_tss_update_io_bitmap()
454 u16 *base = &tss->x86_tss.io_bitmap_base; in native_tss_update_io_bitmap()
461 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) { in native_tss_update_io_bitmap()
464 struct io_bitmap *iobm = t->io_bitmap; in native_tss_update_io_bitmap()
470 if (tss->io_bitmap.prev_sequence != iobm->sequence) in native_tss_update_io_bitmap()
508 st->local_state = 0; in speculative_store_bypass_ht_init()
514 if (st->shared_state) in speculative_store_bypass_ht_init()
517 raw_spin_lock_init(&st->lock); in speculative_store_bypass_ht_init()
531 st->shared_state = per_cpu(ssb_state, cpu).shared_state; in speculative_store_bypass_ht_init()
541 st->shared_state = st; in speculative_store_bypass_ht_init()
567 if (__test_and_set_bit(LSTATE_SSB, &st->local_state)) in amd_set_core_ssb_state()
572 raw_spin_lock(&st->shared_state->lock); in amd_set_core_ssb_state()
574 if (!st->shared_state->disable_state) in amd_set_core_ssb_state()
576 st->shared_state->disable_state++; in amd_set_core_ssb_state()
577 raw_spin_unlock(&st->shared_state->lock); in amd_set_core_ssb_state()
579 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state)) in amd_set_core_ssb_state()
582 raw_spin_lock(&st->shared_state->lock); in amd_set_core_ssb_state()
583 st->shared_state->disable_state--; in amd_set_core_ssb_state()
584 if (!st->shared_state->disable_state) in amd_set_core_ssb_state()
586 raw_spin_unlock(&st->shared_state->lock); in amd_set_core_ssb_state()
732 * Idle related variables and functions
738 * We use this if we don't have any better idle routine..
775 * Called from the generic idle code.
818 * to SME active (or vice-versa). The cache must be cleared so that in stop_this_cpu()
827 if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0))) in stop_this_cpu()
856 * Prefer MWAIT over HALT if MWAIT is supported, MWAIT_CPUID leaf
860 * Do not prefer MWAIT if MONITOR instruction has a bug or idle=nomwait
868 /* If override is enforced on the command line, fall back to HALT. */ in prefer_mwait_c1_over_halt()
872 /* MWAIT is not supported on this platform. Fallback to HALT */ in prefer_mwait_c1_over_halt()
876 /* Monitor has a bug or APIC stops in C1E. Fallback to HALT */ in prefer_mwait_c1_over_halt()
906 clflush((void *)¤t_thread_info()->flags); in mwait_idle()
910 __monitor((void *)¤t_thread_info()->flags, 0, 0); in mwait_idle()
923 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n"); in select_idle_routine()
932 pr_info("using mwait in idle threads\n"); in select_idle_routine()
935 pr_info("using TDX aware idle routine\n"); in select_idle_routine()
971 mark_tsc_unstable("TSC halt in AMD C1E"); in arch_post_acpi_subsys_init()
981 return -EINVAL; in idle_setup()
984 pr_info("using polling idle threads\n"); in idle_setup()
987 } else if (!strcmp(str, "halt")) { in idle_setup()
988 /* 'idle=halt' HALT for idle. C-states are disabled. */ in idle_setup()
991 /* 'idle=nomwait' disables MWAIT for idle */ in idle_setup()
994 return -EINVAL; in idle_setup()
999 early_param("idle", idle_setup);
1003 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) in arch_align_stack()
1004 sp -= get_random_u32_below(8192); in arch_align_stack()
1011 return randomize_page(mm->brk, SZ_32M); in arch_randomize_brk()
1013 return randomize_page(mm->brk, SZ_1G); in arch_randomize_brk()
1060 return -EINVAL; in do_arch_prctl_common()