Lines Matching +full:disable +full:- +full:hibernation +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Suspend support specific for i386/x86-64.
41 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context()
42 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context()
45 if (msr->valid) in msr_save_context()
46 rdmsrl(msr->info.msr_no, msr->info.reg.q); in msr_save_context()
53 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context()
54 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context()
57 if (msr->valid) in msr_restore_context()
58 wrmsrl(msr->info.msr_no, msr->info.reg.q); in msr_restore_context()
64 * __save_processor_state() - Save CPU registers before creating a
65 * hibernation image and before restoring
70 * boot kernel (ie. the kernel used for loading the hibernation image)
72 * saved in the hibernation image), then its contents must be saved by this
74 * kernel B is used for loading the hibernation image into memory, the
89 store_idt(&ctxt->idt); in __save_processor_state()
93 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit in __save_processor_state()
94 * mode in "secondary_startup_64". In 32-bit mode it is done via in __save_processor_state()
97 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state()
98 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state()
100 store_tr(ctxt->tr); in __save_processor_state()
106 savesegment(gs, ctxt->gs); in __save_processor_state()
108 savesegment(fs, ctxt->fs); in __save_processor_state()
109 savesegment(ds, ctxt->ds); in __save_processor_state()
110 savesegment(es, ctxt->es); in __save_processor_state()
112 rdmsrl(MSR_FS_BASE, ctxt->fs_base); in __save_processor_state()
113 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); in __save_processor_state()
114 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); in __save_processor_state()
117 rdmsrl(MSR_EFER, ctxt->efer); in __save_processor_state()
123 ctxt->cr0 = read_cr0(); in __save_processor_state()
124 ctxt->cr2 = read_cr2(); in __save_processor_state()
125 ctxt->cr3 = __read_cr3(); in __save_processor_state()
126 ctxt->cr4 = __read_cr4(); in __save_processor_state()
127 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, in __save_processor_state()
128 &ctxt->misc_enable); in __save_processor_state()
165 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); in fix_processor_context()
169 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ in fix_processor_context()
178 load_mm_ldt(current->active_mm); /* This does lldt */ in fix_processor_context()
188 * __restore_processor_state() - Restore the contents of CPU registers saved
199 if (ctxt->misc_enable_saved) in __restore_processor_state()
200 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); in __restore_processor_state()
206 if (ctxt->cr4) in __restore_processor_state()
207 __write_cr4(ctxt->cr4); in __restore_processor_state()
210 wrmsrl(MSR_EFER, ctxt->efer); in __restore_processor_state()
211 __write_cr4(ctxt->cr4); in __restore_processor_state()
213 write_cr3(ctxt->cr3); in __restore_processor_state()
214 write_cr2(ctxt->cr2); in __restore_processor_state()
215 write_cr0(ctxt->cr0); in __restore_processor_state()
218 load_idt(&ctxt->idt); in __restore_processor_state()
233 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); in __restore_processor_state()
238 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ in __restore_processor_state()
246 loadsegment(ds, ctxt->es); in __restore_processor_state()
247 loadsegment(es, ctxt->es); in __restore_processor_state()
248 loadsegment(fs, ctxt->fs); in __restore_processor_state()
249 load_gs_index(ctxt->gs); in __restore_processor_state()
256 wrmsrl(MSR_FS_BASE, ctxt->fs_base); in __restore_processor_state()
257 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); in __restore_processor_state()
259 loadsegment(gs, ctxt->gs); in __restore_processor_state()
315 * Those will be put to proper (not interfering with hibernation in hibernate_resume_nonboot_cpu_disable()
338 return -ENODEV; in bsp_check()
365 * earlier to disable cpu hotplug before bsp online check. in bsp_pm_check_init()
367 pm_notifier(bsp_pm_callback, -INT_MAX); in bsp_pm_check_init()
380 total_num = saved_msrs->num + num; in msr_build_context()
385 return -ENOMEM; in msr_build_context()
388 if (saved_msrs->array) { in msr_build_context()
393 memcpy(msr_array, saved_msrs->array, in msr_build_context()
394 sizeof(struct saved_msr) * saved_msrs->num); in msr_build_context()
396 kfree(saved_msrs->array); in msr_build_context()
399 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { in msr_build_context()
406 saved_msrs->num = total_num; in msr_build_context()
407 saved_msrs->array = msr_array; in msr_build_context()
427 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); in msr_initialize_bdw()
437 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
450 c->family); in msr_save_cpuid_features()
471 fn = (pm_cpu_match_t)m->driver_data; in pm_cpu_check()