Test that
+ memory failure handling results in unmapping of bad memory from stage
  2 page tables, hence requiring faulting on next guest access
+ when the guest tries to fault a poisoned page from guest_memfd, the
  userspace VMM informed with EHWPOISON

Co-developed-by: Ackerley Tng <[email protected]>
Signed-off-by: Ackerley Tng <[email protected]>
Signed-off-by: Lisa Wang <[email protected]>
---
 .../testing/selftests/kvm/guest_memfd_test.c  | 65 +++++++++++++++++++
 1 file changed, 65 insertions(+)

diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c 
b/tools/testing/selftests/kvm/guest_memfd_test.c
index 7bcf8d2d5d4d..dc3398e22edd 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -539,6 +539,70 @@ static void test_guest_memfd_guest(void)
        kvm_vm_free(vm);
 }
 
+static void __guest_code_read(uint8_t *mem)
+{
+       READ_ONCE(*mem);
+       GUEST_DONE();
+}
+
+static void guest_read(struct kvm_vcpu *vcpu, uint64_t gpa, int expected_errno)
+{
+       vcpu_arch_set_entry_point(vcpu, __guest_code_read);
+       vcpu_args_set(vcpu, 1, gpa);
+
+       if (expected_errno) {
+               TEST_ASSERT_EQ(_vcpu_run(vcpu), -1);
+               TEST_ASSERT_EQ(errno, expected_errno);
+       } else {
+               vcpu_run(vcpu);
+               TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+       }
+}
+
+static void test_memory_failure_guest(void)
+{
+       const uint64_t gpa = SZ_4G;
+       const int slot = 1;
+
+       unsigned long memory_failure_pfn;
+       struct kvm_vcpu *vcpu;
+       struct kvm_vm *vm;
+       uint8_t *mem;
+       size_t size;
+       int fd;
+
+       if (!kvm_has_cap(KVM_CAP_GUEST_MEMFD_FLAGS))
+               return;
+
+       vm = __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, &vcpu, 1, 
__guest_code_read);
+
+       size = vm->page_size;
+       fd = vm_create_guest_memfd(vm, size, GUEST_MEMFD_FLAG_MMAP | 
GUEST_MEMFD_FLAG_INIT_SHARED);
+       vm_set_user_memory_region2(vm, slot, KVM_MEM_GUEST_MEMFD, gpa, size, 
NULL, fd, 0);
+
+       mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+       TEST_ASSERT(mem != MAP_FAILED, "mmap() for guest_memfd should 
succeed.");
+       virt_pg_map(vm, gpa, gpa);
+
+       /* Fault in page to read pfn, then unmap page for testing. */
+       READ_ONCE(*mem);
+       memory_failure_pfn = addr_to_pfn(mem);
+       munmap(mem, size);
+
+       /* Fault page into stage2 page tables. */
+       guest_read(vcpu, gpa, 0);
+
+       mark_memory_failure(memory_failure_pfn, 0);
+
+       guest_read(vcpu, gpa, EHWPOISON);
+       munmap(mem, size);
+
+       close(fd);
+       kvm_vm_free(vm);
+
+       unmark_memory_failure(memory_failure_pfn, 0);
+}
+
 int main(int argc, char *argv[])
 {
        unsigned long vm_types, vm_type;
@@ -559,4 +623,5 @@ int main(int argc, char *argv[])
                test_guest_memfd(vm_type);
 
        test_guest_memfd_guest();
+       test_memory_failure_guest();
 }
-- 
2.51.0.788.g6d19910ace-goog


Reply via email to