This commit separates the SEV, SEV-ES, SEV-SNP ioctl calls from its positive test asserts. This is done so that negative tests can be introduced and both kinds of testing can be performed independently using the same base helpers of the ioctl.
This commit also adds additional parameters such as flags to improve testing coverage for the ioctls.
Cleanups performed with no functional change intended.
Signed-off-by: Pratik R. Sampat pratikrajesh.sampat@amd.com --- .../selftests/kvm/include/x86_64/sev.h | 20 +-- tools/testing/selftests/kvm/lib/x86_64/sev.c | 145 ++++++++++++------ 2 files changed, 108 insertions(+), 57 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/x86_64/sev.h b/tools/testing/selftests/kvm/include/x86_64/sev.h index 43b6c52831b2..ef99151e13a7 100644 --- a/tools/testing/selftests/kvm/include/x86_64/sev.h +++ b/tools/testing/selftests/kvm/include/x86_64/sev.h @@ -37,14 +37,16 @@ enum sev_guest_state { #define GHCB_MSR_TERM_REQ 0x100
void sev_vm_launch(struct kvm_vm *vm, uint32_t policy); -void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); -void sev_vm_launch_finish(struct kvm_vm *vm); +int sev_vm_launch_start(struct kvm_vm *vm, uint32_t policy); +int sev_vm_launch_update(struct kvm_vm *vm, uint32_t policy); +int sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement); +int sev_vm_launch_finish(struct kvm_vm *vm);
bool is_kvm_snp_supported(void);
-void snp_vm_launch(struct kvm_vm *vm, uint32_t policy); -void snp_vm_launch_update(struct kvm_vm *vm); -void snp_vm_launch_finish(struct kvm_vm *vm); +int snp_vm_launch(struct kvm_vm *vm, uint32_t policy, uint8_t flags); +int snp_vm_launch_update(struct kvm_vm *vm, uint8_t page_type); +int snp_vm_launch_finish(struct kvm_vm *vm, uint16_t flags);
struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code, struct kvm_vcpu **cpu); @@ -98,7 +100,7 @@ static inline void sev_register_encrypted_memory(struct kvm_vm *vm, vm_ioctl(vm, KVM_MEMORY_ENCRYPT_REG_REGION, &range); }
-static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, +static inline int snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, uint64_t size, uint8_t type) { struct kvm_sev_snp_launch_update update_data = { @@ -108,10 +110,10 @@ static inline void snp_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, .type = type, };
- vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_UPDATE, &update_data); + return __vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_UPDATE, &update_data); }
-static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, +static inline int sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, uint64_t size) { struct kvm_sev_launch_update_data update_data = { @@ -119,7 +121,7 @@ static inline void sev_launch_update_data(struct kvm_vm *vm, vm_paddr_t gpa, .len = size, };
- vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); + return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_DATA, &update_data); }
#endif /* SELFTEST_KVM_SEV_H */ diff --git a/tools/testing/selftests/kvm/lib/x86_64/sev.c b/tools/testing/selftests/kvm/lib/x86_64/sev.c index 90231c578aca..a931a321968f 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/sev.c +++ b/tools/testing/selftests/kvm/lib/x86_64/sev.c @@ -14,15 +14,18 @@ * and find the first range, but that's correct because the condition * expression would cause us to quit the loop. */ -static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *region) +static int encrypt_region(struct kvm_vm *vm, + struct userspace_mem_region *region, + uint8_t page_type) { const struct sparsebit *protected_phy_pages = region->protected_phy_pages; const vm_paddr_t gpa_base = region->region.guest_phys_addr; const sparsebit_idx_t lowest_page_in_region = gpa_base >> vm->page_shift; sparsebit_idx_t i, j; + int ret;
if (!sparsebit_any_set(protected_phy_pages)) - return; + return 0;
if (vm->type == KVM_X86_SEV_VM || vm->type == KVM_X86_SEV_ES_VM) sev_register_encrypted_memory(vm, region); @@ -33,12 +36,18 @@ static void encrypt_region(struct kvm_vm *vm, struct userspace_mem_region *regio
if (vm->type == KVM_X86_SNP_VM) { vm_mem_set_private(vm, gpa_base + offset, size); - snp_launch_update_data(vm, gpa_base + offset, size, - KVM_SEV_SNP_PAGE_TYPE_NORMAL); + ret = snp_launch_update_data(vm, gpa_base + offset, size, + page_type); + if (ret) + return ret; continue; } - sev_launch_update_data(vm, gpa_base + offset, size); + ret = sev_launch_update_data(vm, gpa_base + offset, size); + if (ret) + return ret; } + + return 0; }
void sev_vm_init(struct kvm_vm *vm) @@ -75,83 +84,97 @@ void snp_vm_init(struct kvm_vm *vm) vm_sev_ioctl(vm, KVM_SEV_INIT2, &init); }
-void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) +int sev_vm_launch_start(struct kvm_vm *vm, uint32_t policy) { struct kvm_sev_launch_start launch_start = { .policy = policy, }; - struct userspace_mem_region *region; - struct kvm_sev_guest_status status; - int ctr; - - vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start); - vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status);
- TEST_ASSERT_EQ(status.policy, policy); - TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE); + return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_START, &launch_start); +}
- hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) - encrypt_region(vm, region); +int sev_vm_launch_update(struct kvm_vm *vm, uint32_t policy) +{ + struct userspace_mem_region *region; + int ctr, ret;
+ hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { + ret = encrypt_region(vm, region, 0); + if (ret) + return ret; + } if (policy & SEV_POLICY_ES) vm_sev_ioctl(vm, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
vm->arch.is_pt_protected = true; + + return 0; }
-void sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement) +void sev_vm_launch(struct kvm_vm *vm, uint32_t policy) { - struct kvm_sev_launch_measure launch_measure; - struct kvm_sev_guest_status guest_status; + struct kvm_sev_guest_status status; + int ret;
- launch_measure.len = 256; - launch_measure.uaddr = (__u64)measurement; - vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure); + ret = sev_vm_launch_start(vm, policy); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_SNP_LAUNCH_START, ret)); + + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); + TEST_ASSERT_EQ(status.policy, policy); + TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE);
- vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &guest_status); - TEST_ASSERT_EQ(guest_status.state, SEV_GUEST_STATE_LAUNCH_SECRET); + ret = sev_vm_launch_update(vm, policy); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_LAUNCH_UPDATE_DATA, ret)); }
-void sev_vm_launch_finish(struct kvm_vm *vm) +int sev_vm_launch_measure(struct kvm_vm *vm, uint8_t *measurement) { - struct kvm_sev_guest_status status; + struct kvm_sev_launch_measure launch_measure;
- vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); - TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE || - status.state == SEV_GUEST_STATE_LAUNCH_SECRET, - "Unexpected guest state: %d", status.state); + launch_measure.len = 256; + launch_measure.uaddr = (__u64)measurement;
- vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL); + return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_MEASURE, &launch_measure); +}
- vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); - TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); +int sev_vm_launch_finish(struct kvm_vm *vm) +{ + return __vm_sev_ioctl(vm, KVM_SEV_LAUNCH_FINISH, NULL); }
-void snp_vm_launch(struct kvm_vm *vm, uint32_t policy) +int snp_vm_launch(struct kvm_vm *vm, uint32_t policy, uint8_t flags) { struct kvm_sev_snp_launch_start launch_start = { .policy = policy, + .flags = flags, };
- vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_START, &launch_start); + return __vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_START, &launch_start); }
-void snp_vm_launch_update(struct kvm_vm *vm) +int snp_vm_launch_update(struct kvm_vm *vm, uint8_t page_type) { struct userspace_mem_region *region; - int ctr; + int ctr, ret;
- hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) - encrypt_region(vm, region); + hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { + ret = encrypt_region(vm, region, page_type); + if (ret) + return ret; + }
vm->arch.is_pt_protected = true; + + return 0; }
-void snp_vm_launch_finish(struct kvm_vm *vm) +int snp_vm_launch_finish(struct kvm_vm *vm, uint16_t flags) { - struct kvm_sev_snp_launch_finish launch_finish = { 0 }; + struct kvm_sev_snp_launch_finish launch_finish = { + .flags = flags, + };
- vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish); + return __vm_sev_ioctl(vm, KVM_SEV_SNP_LAUNCH_FINISH, &launch_finish); }
bool is_kvm_snp_supported(void) @@ -190,20 +213,46 @@ struct kvm_vm *vm_sev_create_with_one_vcpu(uint32_t type, void *guest_code,
void vm_sev_launch(struct kvm_vm *vm, uint32_t policy, uint8_t *measurement) { + struct kvm_sev_guest_status status; + int ret; + if (vm->type == KVM_X86_SNP_VM) { vm_enable_cap(vm, KVM_CAP_EXIT_HYPERCALL, (1 << KVM_HC_MAP_GPA_RANGE)); - snp_vm_launch(vm, policy); - snp_vm_launch_update(vm); - snp_vm_launch_finish(vm); + ret = snp_vm_launch(vm, policy, 0); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_SNP_LAUNCH_START, ret)); + + ret = snp_vm_launch_update(vm, KVM_SEV_SNP_PAGE_TYPE_NORMAL); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_SNP_LAUNCH_UPDATE, ret)); + + ret = snp_vm_launch_finish(vm, 0); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_SNP_LAUNCH_FINISH, ret)); return; }
- sev_vm_launch(vm, policy); + ret = sev_vm_launch_start(vm, policy); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_LAUNCH_START, ret)); + + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); + TEST_ASSERT_EQ(status.policy, policy); + TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_LAUNCH_UPDATE); + + ret = sev_vm_launch_update(vm, policy); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_LAUNCH_UPDATE_DATA, ret));
if (!measurement) measurement = alloca(256);
- sev_vm_launch_measure(vm, measurement); + ret = sev_vm_launch_measure(vm, measurement); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_LAUNCH_MEASURE, ret));
- sev_vm_launch_finish(vm); + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); + TEST_ASSERT(status.state == SEV_GUEST_STATE_LAUNCH_UPDATE || + status.state == SEV_GUEST_STATE_LAUNCH_SECRET, + "Unexpected guest state: %d", status.state); + + ret = sev_vm_launch_finish(vm); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SEV_LAUNCH_FINISH, ret)); + + vm_sev_ioctl(vm, KVM_SEV_GUEST_STATUS, &status); + TEST_ASSERT_EQ(status.state, SEV_GUEST_STATE_RUNNING); }