Lines Matching +full:disable +full:- +full:hibernation +full:- +full:mode

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Suspend support specific for i386/x86-64.
42 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context()
43 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context()
46 if (msr->valid) in msr_save_context()
47 rdmsrl(msr->info.msr_no, msr->info.reg.q); in msr_save_context()
54 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context()
55 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context()
58 if (msr->valid) in msr_restore_context()
59 wrmsrl(msr->info.msr_no, msr->info.reg.q); in msr_restore_context()
65 * __save_processor_state() - Save CPU registers before creating a
66 * hibernation image and before restoring
71 * boot kernel (ie. the kernel used for loading the hibernation image)
73 * saved in the hibernation image), then its contents must be saved by this
75 * kernel B is used for loading the hibernation image into memory, the
90 store_idt(&ctxt->idt); in __save_processor_state()
94 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit in __save_processor_state()
95 * mode in "secondary_startup_64". In 32-bit mode it is done via in __save_processor_state()
98 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state()
99 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state()
101 store_tr(ctxt->tr); in __save_processor_state()
107 savesegment(gs, ctxt->gs); in __save_processor_state()
109 savesegment(fs, ctxt->fs); in __save_processor_state()
110 savesegment(ds, ctxt->ds); in __save_processor_state()
111 savesegment(es, ctxt->es); in __save_processor_state()
113 rdmsrl(MSR_FS_BASE, ctxt->fs_base); in __save_processor_state()
114 rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); in __save_processor_state()
115 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); in __save_processor_state()
118 rdmsrl(MSR_EFER, ctxt->efer); in __save_processor_state()
124 ctxt->cr0 = read_cr0(); in __save_processor_state()
125 ctxt->cr2 = read_cr2(); in __save_processor_state()
126 ctxt->cr3 = __read_cr3(); in __save_processor_state()
127 ctxt->cr4 = __read_cr4(); in __save_processor_state()
128 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, in __save_processor_state()
129 &ctxt->misc_enable); in __save_processor_state()
166 set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); in fix_processor_context()
170 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ in fix_processor_context()
179 load_mm_ldt(current->active_mm); /* This does lldt */ in fix_processor_context()
189 * __restore_processor_state() - Restore the contents of CPU registers saved
200 if (ctxt->misc_enable_saved) in __restore_processor_state()
201 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); in __restore_processor_state()
207 if (ctxt->cr4) in __restore_processor_state()
208 __write_cr4(ctxt->cr4); in __restore_processor_state()
211 wrmsrl(MSR_EFER, ctxt->efer); in __restore_processor_state()
212 __write_cr4(ctxt->cr4); in __restore_processor_state()
214 write_cr3(ctxt->cr3); in __restore_processor_state()
215 write_cr2(ctxt->cr2); in __restore_processor_state()
216 write_cr0(ctxt->cr0); in __restore_processor_state()
219 load_idt(&ctxt->idt); in __restore_processor_state()
234 wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); in __restore_processor_state()
238 * as before hibernation. in __restore_processor_state()
244 if (ctxt->cr4 & X86_CR4_FRED) { in __restore_processor_state()
252 /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ in __restore_processor_state()
260 loadsegment(ds, ctxt->es); in __restore_processor_state()
261 loadsegment(es, ctxt->es); in __restore_processor_state()
262 loadsegment(fs, ctxt->fs); in __restore_processor_state()
263 load_gs_index(ctxt->gs); in __restore_processor_state()
270 wrmsrl(MSR_FS_BASE, ctxt->fs_base); in __restore_processor_state()
271 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); in __restore_processor_state()
273 loadsegment(gs, ctxt->gs); in __restore_processor_state()
329 * Those will be put to proper (not interfering with hibernation in hibernate_resume_nonboot_cpu_disable()
352 return -ENODEV; in bsp_check()
379 * earlier to disable cpu hotplug before bsp online check. in bsp_pm_check_init()
381 pm_notifier(bsp_pm_callback, -INT_MAX); in bsp_pm_check_init()
394 total_num = saved_msrs->num + num; in msr_build_context()
399 return -ENOMEM; in msr_build_context()
402 if (saved_msrs->array) { in msr_build_context()
407 memcpy(msr_array, saved_msrs->array, in msr_build_context()
408 sizeof(struct saved_msr) * saved_msrs->num); in msr_build_context()
410 kfree(saved_msrs->array); in msr_build_context()
413 for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { in msr_build_context()
420 saved_msrs->num = total_num; in msr_build_context()
421 saved_msrs->array = msr_array; in msr_build_context()
441 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); in msr_initialize_bdw()
451 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
464 c->family); in msr_save_cpuid_features()
485 fn = (pm_cpu_match_t)m->driver_data; in pm_cpu_check()