> @@ -133,49 +150,36 @@ static noinline void hv_crash_clear_kernpt(void) *
> available. We restore kernel GDT, and rest of the context, and continue
> * to kexec.
> */
> -static asmlinkage void __noreturn hv_crash_c_entry(void) +static void
> __naked hv_crash_c_entry(void) {
> - struct hv_crash_ctxt *ctxt = &hv_crash_ctxt; - /* first thing, restore
> kernel gdt */
> - native_load_gdt(&ctxt->gdtr); + asm volatile("lgdt %0" : : "m"
> (hv_crash_ctxt.gdtr));
> - asm volatile("movw %%ax, %%ss" : : "a"(ctxt->ss)); - asm
> volatile("movq %0, %%rsp" : : "m"(ctxt->rsp)); + asm volatile("movw
> %%ax, %%ss" : : "a"(hv_crash_ctxt.ss)); + asm volatile("movq %0,
> %%rsp" : : "m"(hv_crash_ctxt.rsp));
I know this is pre-existing, but the asm here is poor.
All segment registers loads can have a memory operand, rather than
forcing through %eax, which in turn reduces the setup logic the compiler
needs to emit.
Something like this:
"movl %0, %%ss" : : "m"(hv_crash_ctxt.ss)
ought to do.
>
> - asm volatile("movw %%ax, %%ds" : : "a"(ctxt->ds)); - asm
> volatile("movw %%ax, %%es" : : "a"(ctxt->es)); - asm volatile("movw
> %%ax, %%fs" : : "a"(ctxt->fs)); - asm volatile("movw %%ax, %%gs" : :
> "a"(ctxt->gs)); + asm volatile("movw %%ax, %%ds" : :
> "a"(hv_crash_ctxt.ds)); + asm volatile("movw %%ax, %%es" : :
> "a"(hv_crash_ctxt.es)); + asm volatile("movw %%ax, %%fs" : :
> "a"(hv_crash_ctxt.fs)); + asm volatile("movw %%ax, %%gs" : :
> "a"(hv_crash_ctxt.gs));
> - native_wrmsrq(MSR_IA32_CR_PAT, ctxt->pat); - asm volatile("movq %0,
> %%cr0" : : "r"(ctxt->cr0)); + hv_wrmsr(MSR_IA32_CR_PAT,
> hv_crash_ctxt.pat); + asm volatile("movq %0, %%cr0" : :
> "r"(hv_crash_ctxt.cr0));
> - asm volatile("movq %0, %%cr8" : : "r"(ctxt->cr8)); - asm
> volatile("movq %0, %%cr4" : : "r"(ctxt->cr4)); - asm volatile("movq
> %0, %%cr2" : : "r"(ctxt->cr4)); + asm volatile("movq %0, %%cr8" : :
> "r"(hv_crash_ctxt.cr8)); + asm volatile("movq %0, %%cr4" : :
> "r"(hv_crash_ctxt.cr4)); + asm volatile("movq %0, %%cr2" : :
> "r"(hv_crash_ctxt.cr4));
> - native_load_idt(&ctxt->idtr); - native_wrmsrq(MSR_GS_BASE,
> ctxt->gsbase); - native_wrmsrq(MSR_EFER, ctxt->efer); + asm
> volatile("lidt %0" : : "m" (hv_crash_ctxt.idtr)); +
> hv_wrmsr(MSR_GS_BASE, hv_crash_ctxt.gsbase); + hv_wrmsr(MSR_EFER,
> hv_crash_ctxt.efer);
> /* restore the original kernel CS now via far return */
> - asm volatile("movzwq %0, %%rax\n\t" - "pushq %%rax\n\t" - "pushq
> $1f\n\t" - "lretq\n\t" - "1:nop\n\t" : : "m"(ctxt->cs) : "rax"); - -
> /* We are in asmlinkage without stack frame, hence make C function - *
> calls which will buy stack frames. - */ - hv_crash_restore_tss(); -
> hv_crash_clear_kernpt(); - - /* we are now fully in devirtualized
> normal kernel mode */ - __crash_kexec(NULL); - -
> hv_panic_timeout_reboot(); + asm volatile("pushq %q0 \n\t" + "leaq
> %c1(%%rip), %q0 \n\t" + "pushq %q0 \n\t" + "lretq \n\t" + ::
> "a"(hv_crash_ctxt.cs), "i"(hv_crash_handle));
As Uros notes, "a" is clobbered here but the compiler is not informed.
But, it's not necessary.
As a naked function you could even use 3x asm() statements, but you can
get the compiler to sort out the function reference automatically with:
asm volatile ("push %q0\n\t"
"push %q1\n\t"
"lretq"
:: "r"(hv_crash_ctxt.cs), "r"(hv_crash_handle));
(Only tested in godbolt)
~Andrew