Commit b10d92a54dac ("KVM: x86: fix RSM into 64-bit protected mode")
reordered the rsm_load_seg_64() and rsm_enter_protected_mode() calls,
relative to each other. The argument that said commit made was correct,
however putting rsm_enter_protected_mode() first whole-sale violated the
following (correct) invariant from em_rsm():
* Get back to real mode, to prepare a safe state in which to load
* CR0/CR3/CR4/EFER. Also this will ensure that addresses passed
* to read_std/write_std are not virtual.
Namely, rsm_enter_protected_mode() may re-enable paging, *after* which
rsm_load_seg_64()
GET_SMSTATE()
read_std()
will try to interpret the (smbase + offset) address as a virtual one. This
will result in unexpected page faults being injected to the guest in
response to the RSM instruction.
Split rsm_load_seg_64() in two parts:
- The first part, rsm_stash_seg_64(), shall call GET_SMSTATE() while in
real mode, and save the relevant state off SMRAM into an array local to
rsm_load_state_64().
- The second part, rsm_load_seg_64(), shall occur after entering protected
mode, but the segment details shall come from the local array, not the
guest's SMRAM.
Fixes: b10d92a54dac25a6152f1aa1ffc95c12908035ce
Cc: Paolo Bonzini <[email protected]>
Cc: Radim Krčmář <[email protected]>
Cc: Jordan Justen <[email protected]>
Cc: Michael Kinney <[email protected]>
Cc: [email protected]
Signed-off-by: Laszlo Ersek <[email protected]>
---
arch/x86/kvm/emulate.c | 37 ++++++++++++++++++++++++++++++-------
1 file changed, 30 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9da95b9..25e16b6 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2311,7 +2311,16 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt
*ctxt, u64 smbase, int n)
return X86EMUL_CONTINUE;
}
-static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
+struct rsm_stashed_seg_64 {
+ u16 selector;
+ struct desc_struct desc;
+ u32 base3;
+};
+
+static int rsm_stash_seg_64(struct x86_emulate_ctxt *ctxt,
+ struct rsm_stashed_seg_64 *stash,
+ u64 smbase,
+ int n)
{
struct desc_struct desc;
int offset;
@@ -2326,10 +2335,20 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt
*ctxt, u64 smbase, int n)
set_desc_base(&desc, GET_SMSTATE(u32, smbase, offset + 8));
base3 = GET_SMSTATE(u32, smbase, offset + 12);
- ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
+ stash[n].selector = selector;
+ stash[n].desc = desc;
+ stash[n].base3 = base3;
return X86EMUL_CONTINUE;
}
+static inline void rsm_load_seg_64(struct x86_emulate_ctxt *ctxt,
+ struct rsm_stashed_seg_64 *stash,
+ int n)
+{
+ ctxt->ops->set_segment(ctxt, stash[n].selector, &stash[n].desc,
+ stash[n].base3, n);
+}
+
static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
u64 cr0, u64 cr4)
{
@@ -2419,6 +2438,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt
*ctxt, u64 smbase)
u32 base3;
u16 selector;
int i, r;
+ struct rsm_stashed_seg_64 stash[6];
for (i = 0; i < 16; i++)
*reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
@@ -2460,15 +2480,18 @@ static int rsm_load_state_64(struct x86_emulate_ctxt
*ctxt, u64 smbase)
dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
ctxt->ops->set_gdt(ctxt, &dt);
+ for (i = 0; i < ARRAY_SIZE(stash); i++) {
+ r = rsm_stash_seg_64(ctxt, stash, smbase, i);
+ if (r != X86EMUL_CONTINUE)
+ return r;
+ }
+
r = rsm_enter_protected_mode(ctxt, cr0, cr4);
if (r != X86EMUL_CONTINUE)
return r;
- for (i = 0; i < 6; i++) {
- r = rsm_load_seg_64(ctxt, smbase, i);
- if (r != X86EMUL_CONTINUE)
- return r;
- }
+ for (i = 0; i < ARRAY_SIZE(stash); i++)
+ rsm_load_seg_64(ctxt, stash, i);
return X86EMUL_CONTINUE;
}
--
1.8.3.1
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at http://vger.kernel.org/majordomo-info.html