This patch series makes the selftest work with NV enabled. The guest code is run in vEL2 instead of EL1. We add a command line option to enable testing of NV. The NV tests are disabled by default.
Modified around 12 selftests in this series.
Changes since v1: - Updated NV helper functions as per comments [1]. - Modified existing testscases to run guest code in vEL2.
[1] https://lkml.iu.edu/hypermail/linux/kernel/2502.0/07001.html
Ganapatrao Kulkarni (9): KVM: arm64: nv: selftests: Add support to run guest code in vEL2. KVM: arm64: nv: selftests: Add simple test to run guest code in vEL2 KVM: arm64: nv: selftests: Enable hypervisor timer tests to run in vEL2 KVM: arm64: nv: selftests: enable aarch32_id_regs test to run in vEL2 KVM: arm64: nv: selftests: Enable vgic tests to run in vEL2 KVM: arm64: nv: selftests: Enable set_id_regs test to run in vEL2 KVM: arm64: nv: selftests: Enable test to run in vEL2 KVM: selftests: arm64: Extend kvm_page_table_test to run guest code in vEL2 KVM: arm64: nv: selftests: Enable page_fault_test test to run in vEL2
tools/testing/selftests/kvm/Makefile.kvm | 2 + tools/testing/selftests/kvm/arch_timer.c | 8 +- .../selftests/kvm/arm64/aarch32_id_regs.c | 34 ++++- .../testing/selftests/kvm/arm64/arch_timer.c | 118 +++++++++++++++--- .../selftests/kvm/arm64/nv_guest_hypervisor.c | 68 ++++++++++ .../selftests/kvm/arm64/page_fault_test.c | 35 +++++- .../testing/selftests/kvm/arm64/set_id_regs.c | 57 ++++++++- tools/testing/selftests/kvm/arm64/vgic_init.c | 54 +++++++- tools/testing/selftests/kvm/arm64/vgic_irq.c | 27 ++-- .../selftests/kvm/arm64/vgic_lpi_stress.c | 19 ++- .../testing/selftests/kvm/guest_print_test.c | 32 +++++ .../selftests/kvm/include/arm64/arch_timer.h | 16 +++ .../kvm/include/arm64/kvm_util_arch.h | 3 + .../selftests/kvm/include/arm64/nv_util.h | 45 +++++++ .../selftests/kvm/include/arm64/vgic.h | 1 + .../testing/selftests/kvm/include/kvm_util.h | 3 + .../selftests/kvm/include/timer_test.h | 1 + .../selftests/kvm/kvm_page_table_test.c | 30 ++++- tools/testing/selftests/kvm/lib/arm64/nv.c | 46 +++++++ .../selftests/kvm/lib/arm64/processor.c | 61 ++++++--- tools/testing/selftests/kvm/lib/arm64/vgic.c | 8 ++ 21 files changed, 604 insertions(+), 64 deletions(-) create mode 100644 tools/testing/selftests/kvm/arm64/nv_guest_hypervisor.c create mode 100644 tools/testing/selftests/kvm/include/arm64/nv_util.h create mode 100644 tools/testing/selftests/kvm/lib/arm64/nv.c
This patch adds required changes to vcpu init to run a guest code in vEL2 context and also adds NV specific helper functions.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- tools/testing/selftests/kvm/Makefile.kvm | 2 + .../kvm/include/arm64/kvm_util_arch.h | 3 + .../selftests/kvm/include/arm64/nv_util.h | 45 ++++++++++++++ .../selftests/kvm/include/arm64/vgic.h | 1 + .../testing/selftests/kvm/include/kvm_util.h | 3 + tools/testing/selftests/kvm/lib/arm64/nv.c | 46 ++++++++++++++ .../selftests/kvm/lib/arm64/processor.c | 61 ++++++++++++++----- tools/testing/selftests/kvm/lib/arm64/vgic.c | 8 +++ 8 files changed, 155 insertions(+), 14 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/arm64/nv_util.h create mode 100644 tools/testing/selftests/kvm/lib/arm64/nv.c
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm index f773f8f99249..3348f729d3b2 100644 --- a/tools/testing/selftests/kvm/Makefile.kvm +++ b/tools/testing/selftests/kvm/Makefile.kvm @@ -37,6 +37,7 @@ LIBKVM_arm64 += lib/arm64/processor.c LIBKVM_arm64 += lib/arm64/spinlock.c LIBKVM_arm64 += lib/arm64/ucall.c LIBKVM_arm64 += lib/arm64/vgic.c +LIBKVM_arm64 += lib/arm64/nv.c
LIBKVM_s390 += lib/s390/diag318_test_handler.c LIBKVM_s390 += lib/s390/processor.c @@ -155,6 +156,7 @@ TEST_GEN_PROGS_arm64 += arm64/vgic_irq TEST_GEN_PROGS_arm64 += arm64/vgic_lpi_stress TEST_GEN_PROGS_arm64 += arm64/vpmu_counter_access TEST_GEN_PROGS_arm64 += arm64/no-vgic-v3 +TEST_GEN_PROGS_arm64 += arm64/nv_guest_hypervisor TEST_GEN_PROGS_arm64 += access_tracking_perf_test TEST_GEN_PROGS_arm64 += arch_timer TEST_GEN_PROGS_arm64 += coalesced_io_test diff --git a/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h b/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h index e43a57d99b56..ab5279c24413 100644 --- a/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h +++ b/tools/testing/selftests/kvm/include/arm64/kvm_util_arch.h @@ -2,6 +2,9 @@ #ifndef SELFTEST_KVM_UTIL_ARCH_H #define SELFTEST_KVM_UTIL_ARCH_H
+#define CurrentEL_EL1 (1 << 2) +#define CurrentEL_EL2 (2 << 2) + struct kvm_vm_arch {};
#endif // SELFTEST_KVM_UTIL_ARCH_H diff --git a/tools/testing/selftests/kvm/include/arm64/nv_util.h b/tools/testing/selftests/kvm/include/arm64/nv_util.h new file mode 100644 index 000000000000..622a17c9d142 --- /dev/null +++ b/tools/testing/selftests/kvm/include/arm64/nv_util.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2025 Ampere Computing + */ +#ifndef SELFTEST_NV_UTIL_H +#define SELFTEST_NV_UTIL_H + +#include <linux/bitmap.h> +#include <vgic.h> + +#define HCR_NV2 (UL(1) << 45) +#define HCR_AT (UL(1) << 44) +#define HCR_NV (UL(1) << 42) +#define HCR_E2H (UL(1) << 34) +#define HCR_TTLB (UL(1) << 25) + +/* Enable NV2 and guest in VHE mode */ +#define HCR_EL2_NV_EANBLE (HCR_E2H | HCR_NV | HCR_NV2 | HCR_AT | HCR_TTLB) + +struct kvm_vm *nv_vm_create_with_vcpus_gic(uint32_t nr_vcpus, + struct kvm_vcpu **vcpus, int *gic_fd, void *guest_code); + +struct kvm_vm *__nv_vm_create_with_vcpus_gic(struct vm_shape shape, + uint32_t nr_vcpus, struct kvm_vcpu **vcpus, + uint64_t extra_mem_pages, int *gic_fd, void *guest_code); + +/* NV helpers */ +static inline void init_vcpu_nested(struct kvm_vcpu_init *init) +{ + init->features[0] |= (1 << KVM_ARM_VCPU_HAS_EL2); +} + +static inline bool kvm_arm_vcpu_has_el2(struct kvm_vcpu_init *init) +{ + unsigned long features = init->features[0]; + + return test_bit(KVM_ARM_VCPU_HAS_EL2, &features); +} + +static inline bool is_vcpu_nested(struct kvm_vcpu *vcpu) +{ + return vcpu->nested; +} + +#endif /* SELFTEST_NV_UTIL_H */ diff --git a/tools/testing/selftests/kvm/include/arm64/vgic.h b/tools/testing/selftests/kvm/include/arm64/vgic.h index c481d0c00a5d..46142fa36199 100644 --- a/tools/testing/selftests/kvm/include/arm64/vgic.h +++ b/tools/testing/selftests/kvm/include/arm64/vgic.h @@ -17,6 +17,7 @@ index)
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs); +void vgic_v3_close(int gic_fd);
#define VGIC_MAX_RESERVED 1023
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 373912464fb4..9b26b9124dc3 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -65,6 +65,9 @@ struct kvm_vcpu { struct kvm_dirty_gfn *dirty_gfns; uint32_t fetch_index; uint32_t dirty_gfns_count; +#ifdef __aarch64__ + bool nested; +#endif };
struct userspace_mem_regions { diff --git a/tools/testing/selftests/kvm/lib/arm64/nv.c b/tools/testing/selftests/kvm/lib/arm64/nv.c new file mode 100644 index 000000000000..e930808a7ed4 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/arm64/nv.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025 Ampere Computing LLC + */ + +#include <linux/compiler.h> +#include <assert.h> + +#include "guest_modes.h" +#include "kvm_util.h" +#include "nv_util.h" +#include "processor.h" + +struct kvm_vm *__nv_vm_create_with_vcpus_gic(struct vm_shape shape, uint32_t nr_vcpus, + struct kvm_vcpu **vcpus, uint64_t extra_mem_pages, int *gic_fd, void *guest_code) +{ + struct kvm_vcpu_init init; + struct kvm_vm *vm; + int i; + + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL2)); + + vm = __vm_create(shape, nr_vcpus, extra_mem_pages); + vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); + init_vcpu_nested(&init); + + for (i = 0; i < nr_vcpus; ++i) { + vcpus[i] = aarch64_vcpu_add(vm, i, &init, guest_code); + __TEST_REQUIRE(is_vcpu_nested(vcpus[i]), "Failed to Enable NV"); + } + + /* vgic is not created, If gic_fd argument is NULL */ + if (gic_fd) { + *gic_fd = vgic_v3_setup(vm, nr_vcpus, 64); + __TEST_REQUIRE(*gic_fd >= 0, "Failed to create vgic-v3"); + } + + return vm; +} + +struct kvm_vm *nv_vm_create_with_vcpus_gic(uint32_t nr_vcpus, + struct kvm_vcpu **vcpus, int *gic_fd, void *guest_code) +{ + return __nv_vm_create_with_vcpus_gic(VM_SHAPE_DEFAULT, + nr_vcpus, vcpus, 0, gic_fd, guest_code); +} diff --git a/tools/testing/selftests/kvm/lib/arm64/processor.c b/tools/testing/selftests/kvm/lib/arm64/processor.c index 7ba3aa3755f3..6e759981bf9e 100644 --- a/tools/testing/selftests/kvm/lib/arm64/processor.c +++ b/tools/testing/selftests/kvm/lib/arm64/processor.c @@ -10,6 +10,7 @@
#include "guest_modes.h" #include "kvm_util.h" +#include "nv_util.h" #include "processor.h" #include "ucall_common.h"
@@ -258,14 +259,49 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) } }
+static void aarch64_vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t sctlr_el1, + uint64_t tcr_el1, uint64_t ttbr0_el1) +{ + uint64_t fpen; + + /* + * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 + * registers, which the variable argument list macros do. + */ + fpen = 3 << 20; + + if (is_vcpu_nested(vcpu)) { + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPTR_EL2), fpen); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL2), sctlr_el1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL2), tcr_el1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL2), DEFAULT_MAIR_EL1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL2), ttbr0_el1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_HCR_EL2), HCR_EL2_NV_EANBLE); + } else { + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), fpen); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1); + + } + + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id); +} + void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) { struct kvm_vcpu_init default_init = { .target = -1, }; struct kvm_vm *vm = vcpu->vm; uint64_t sctlr_el1, tcr_el1, ttbr0_el1;
- if (!init) + if (!init) { init = &default_init; + } else { + /* Is this vcpu a Guest-Hypersior */ + if (kvm_arm_vcpu_has_el2(init)) + vcpu->nested = true; + }
if (init->target == -1) { struct kvm_vcpu_init preferred; @@ -275,12 +311,6 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init)
vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init);
- /* - * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 - * registers, which the variable argument list macros do. - */ - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); - sctlr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1)); tcr_el1 = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1));
@@ -349,11 +379,7 @@ void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) if (use_lpa2_pte_format(vm)) tcr_el1 |= (1ul << 59) /* DS */;
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1); - vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id); + aarch64_vcpu_set_reg(vcpu, sctlr_el1, tcr_el1, ttbr0_el1); }
void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) @@ -387,7 +413,11 @@ static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
aarch64_vcpu_setup(vcpu, init);
- vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); + if (is_vcpu_nested(vcpu)) + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SP_EL2), stack_vaddr + stack_size); + else + vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); + return vcpu; }
@@ -457,7 +487,10 @@ void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) { extern char vectors;
- vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); + if (is_vcpu_nested(vcpu)) + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL2), (uint64_t)&vectors); + else + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); }
void route_exception(struct ex_regs *regs, int vector) diff --git a/tools/testing/selftests/kvm/lib/arm64/vgic.c b/tools/testing/selftests/kvm/lib/arm64/vgic.c index 4427f43f73ea..67822b803d0f 100644 --- a/tools/testing/selftests/kvm/lib/arm64/vgic.c +++ b/tools/testing/selftests/kvm/lib/arm64/vgic.c @@ -79,6 +79,14 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs) return gic_fd; }
+void vgic_v3_close(int gic_fd) +{ + if (gic_fd < 0) + return; + + close(gic_fd); +} + /* should only work for level sensitive interrupts */ int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) {
Add simple test to run guest code with NV enabled. With NV enabled, guest code runs in vEL2 context.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- .../selftests/kvm/arm64/nv_guest_hypervisor.c | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 tools/testing/selftests/kvm/arm64/nv_guest_hypervisor.c
diff --git a/tools/testing/selftests/kvm/arm64/nv_guest_hypervisor.c b/tools/testing/selftests/kvm/arm64/nv_guest_hypervisor.c new file mode 100644 index 000000000000..7d7b3944e229 --- /dev/null +++ b/tools/testing/selftests/kvm/arm64/nv_guest_hypervisor.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025 Ampere Computing LLC + */ +#include <kvm_util.h> +#include <nv_util.h> +#include <processor.h> + +static void guest_code(void) +{ + if (read_sysreg(CurrentEL) == CurrentEL_EL2) + GUEST_PRINTF("Test PASS\n"); + else + GUEST_FAIL("Fail to run in vEL2\n"); + + GUEST_DONE(); +} + +static void guest_undef_handler(struct ex_regs *regs) +{ + GUEST_FAIL("Unexpected exception far_el1 = 0x%lx", read_sysreg(far_el1)); +} + +static void test_run_vcpu(struct kvm_vcpu *vcpu) +{ + struct ucall uc; + + do { + vcpu_run(vcpu); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_ABORT: + REPORT_GUEST_ASSERT(uc); + break; + case UCALL_PRINTF: + printf("%s", uc.buffer); + break; + case UCALL_DONE: + break; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + } while (uc.cmd != UCALL_DONE); +} + +static void test_nv_guest_hypervisor(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + int gic_fd = -1; + + vm = nv_vm_create_with_vcpus_gic(1, &vcpu, &gic_fd, guest_code); + vm_init_descriptor_tables(vm); + vcpu_init_descriptor_tables(vcpu); + vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, + ESR_ELx_EC_UNKNOWN, guest_undef_handler); + + test_run_vcpu(vcpu); + + vgic_v3_close(gic_fd); + kvm_vm_free(vm); +} + +int main(int argc, char *argv[]) +{ + test_nv_guest_hypervisor(); + return 0; +}
Adding required changes to enable and test HVTIMER and HPTIMER in vEL2. In default case, PTIMER and VTIMER are validated and with NV enabled (with argument "-g 1"), HPTIMER and HVTIMER are validated by injecting respective timer interrupts.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- tools/testing/selftests/kvm/arch_timer.c | 8 +- .../testing/selftests/kvm/arm64/arch_timer.c | 118 +++++++++++++++--- .../selftests/kvm/include/arm64/arch_timer.h | 16 +++ .../selftests/kvm/include/timer_test.h | 1 + 4 files changed, 123 insertions(+), 20 deletions(-)
diff --git a/tools/testing/selftests/kvm/arch_timer.c b/tools/testing/selftests/kvm/arch_timer.c index acb2cb596332..5c30bda0462b 100644 --- a/tools/testing/selftests/kvm/arch_timer.c +++ b/tools/testing/selftests/kvm/arch_timer.c @@ -35,6 +35,7 @@ struct test_args test_args = { .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, .timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US, .reserved = 1, + .is_nested = false, };
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; @@ -43,6 +44,7 @@ struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS]; static pthread_t pt_vcpu_run[KVM_MAX_VCPUS]; static unsigned long *vcpu_done_map; static pthread_mutex_t vcpu_done_map_lock; +bool is_nested;
static void *test_vcpu_run(void *arg) { @@ -193,6 +195,7 @@ static void test_print_help(char *name) pr_info("\t-o: Counter offset (in counter cycles, default: 0) [aarch64-only]\n"); pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n", TIMER_TEST_ERR_MARGIN_US); + pr_info("\t-g: Enable Nested Virtualization, run guest code as guest hypervisor (default: Disabled)\n"); pr_info("\t-h: print this help screen\n"); }
@@ -200,7 +203,7 @@ static bool parse_args(int argc, char *argv[]) { int opt;
- while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:")) != -1) { + while ((opt = getopt(argc, argv, "hn:i:p:m:o:e:g:")) != -1) { switch (opt) { case 'n': test_args.nr_vcpus = atoi_positive("Number of vCPUs", optarg); @@ -226,6 +229,9 @@ static bool parse_args(int argc, char *argv[]) test_args.counter_offset = strtol(optarg, NULL, 0); test_args.reserved = 0; break; + case 'g': + test_args.is_nested = atoi_non_negative("Is Nested", optarg); + break; case 'h': default: goto err; diff --git a/tools/testing/selftests/kvm/arm64/arch_timer.c b/tools/testing/selftests/kvm/arm64/arch_timer.c index eeba1cc87ff8..50bf05bb6f85 100644 --- a/tools/testing/selftests/kvm/arm64/arch_timer.c +++ b/tools/testing/selftests/kvm/arm64/arch_timer.c @@ -12,16 +12,22 @@ #include "timer_test.h" #include "ucall_common.h" #include "vgic.h" +#include <nv_util.h>
enum guest_stage { GUEST_STAGE_VTIMER_CVAL = 1, GUEST_STAGE_VTIMER_TVAL, GUEST_STAGE_PTIMER_CVAL, GUEST_STAGE_PTIMER_TVAL, + GUEST_STAGE_HVTIMER_CVAL, + GUEST_STAGE_HVTIMER_TVAL, + GUEST_STAGE_HPTIMER_CVAL, + GUEST_STAGE_HPTIMER_TVAL, GUEST_STAGE_MAX, };
static int vtimer_irq, ptimer_irq; +static int hvtimer_irq, hptimer_irq;
static void guest_configure_timer_action(struct test_vcpu_shared_data *shared_data) @@ -47,6 +53,26 @@ guest_configure_timer_action(struct test_vcpu_shared_data *shared_data) shared_data->xcnt = timer_get_cntct(PHYSICAL); timer_set_ctl(PHYSICAL, CTL_ENABLE); break; + case GUEST_STAGE_HVTIMER_CVAL: + timer_set_next_cval_ms(HVIRTUAL, test_args.timer_period_ms); + shared_data->xcnt = timer_get_cntct(HVIRTUAL); + timer_set_ctl(HVIRTUAL, CTL_ENABLE); + break; + case GUEST_STAGE_HVTIMER_TVAL: + timer_set_next_tval_ms(HVIRTUAL, test_args.timer_period_ms); + shared_data->xcnt = timer_get_cntct(HVIRTUAL); + timer_set_ctl(HVIRTUAL, CTL_ENABLE); + break; + case GUEST_STAGE_HPTIMER_CVAL: + timer_set_next_cval_ms(HPHYSICAL, test_args.timer_period_ms); + shared_data->xcnt = timer_get_cntct(HPHYSICAL); + timer_set_ctl(HPHYSICAL, CTL_ENABLE); + break; + case GUEST_STAGE_HPTIMER_TVAL: + timer_set_next_tval_ms(HPHYSICAL, test_args.timer_period_ms); + shared_data->xcnt = timer_get_cntct(HPHYSICAL); + timer_set_ctl(HPHYSICAL, CTL_ENABLE); + break; default: GUEST_ASSERT(0); } @@ -75,6 +101,16 @@ static void guest_validate_irq(unsigned int intid, accessor = PHYSICAL; timer_irq = ptimer_irq; break; + case GUEST_STAGE_HVTIMER_CVAL: + case GUEST_STAGE_HVTIMER_TVAL: + accessor = HVIRTUAL; + timer_irq = hvtimer_irq; + break; + case GUEST_STAGE_HPTIMER_CVAL: + case GUEST_STAGE_HPTIMER_TVAL: + accessor = HPHYSICAL; + timer_irq = hptimer_irq; + break; default: GUEST_ASSERT(0); return; @@ -142,38 +178,79 @@ static void guest_code(void) { uint32_t cpu = guest_get_vcpuid(); struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; + bool is_nested = false; + enum arch_timer vtimer, ptimer; + int vtmr_irq, ptmr_irq; + enum guest_stage stage_vtimer_cval, stage_vtimer_tval; + enum guest_stage stage_ptimer_cval, stage_ptimer_tval;
- local_irq_disable(); + if (read_sysreg(CurrentEL) == CurrentEL_EL2) + is_nested = true;
+ local_irq_disable(); gic_init(GIC_V3, test_args.nr_vcpus);
- timer_set_ctl(VIRTUAL, CTL_IMASK); - timer_set_ctl(PHYSICAL, CTL_IMASK); + if (is_nested) { + + vtimer = HVIRTUAL; + ptimer = HPHYSICAL; + vtmr_irq = hvtimer_irq; + ptmr_irq = hptimer_irq; + stage_vtimer_cval = GUEST_STAGE_HVTIMER_CVAL; + stage_vtimer_tval = GUEST_STAGE_HVTIMER_TVAL; + stage_ptimer_cval = GUEST_STAGE_HPTIMER_CVAL; + stage_ptimer_tval = GUEST_STAGE_HPTIMER_TVAL; + } else { + vtimer = VIRTUAL; + ptimer = PHYSICAL; + vtmr_irq = vtimer_irq; + ptmr_irq = ptimer_irq; + stage_vtimer_cval = GUEST_STAGE_VTIMER_CVAL; + stage_vtimer_tval = GUEST_STAGE_VTIMER_TVAL; + stage_ptimer_cval = GUEST_STAGE_PTIMER_CVAL; + stage_ptimer_tval = GUEST_STAGE_PTIMER_TVAL; + } + + timer_set_ctl(vtimer, CTL_IMASK); + timer_set_ctl(ptimer, CTL_IMASK); + gic_irq_enable(vtmr_irq); + gic_irq_enable(ptmr_irq);
- gic_irq_enable(vtimer_irq); - gic_irq_enable(ptimer_irq); local_irq_enable();
- guest_run_stage(shared_data, GUEST_STAGE_VTIMER_CVAL); - guest_run_stage(shared_data, GUEST_STAGE_VTIMER_TVAL); - guest_run_stage(shared_data, GUEST_STAGE_PTIMER_CVAL); - guest_run_stage(shared_data, GUEST_STAGE_PTIMER_TVAL); + guest_run_stage(shared_data, stage_vtimer_cval); + guest_run_stage(shared_data, stage_vtimer_tval); + guest_run_stage(shared_data, stage_ptimer_cval); + guest_run_stage(shared_data, stage_ptimer_tval);
GUEST_DONE(); }
static void test_init_timer_irq(struct kvm_vm *vm) { - /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ - vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, - KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq); - vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, - KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq);
- sync_global_to_guest(vm, ptimer_irq); - sync_global_to_guest(vm, vtimer_irq); - - pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq); + /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */ + if (is_vcpu_nested(vcpus[0])) { + vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, + KVM_ARM_VCPU_TIMER_IRQ_HPTIMER, &hptimer_irq); + vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, + KVM_ARM_VCPU_TIMER_IRQ_HVTIMER, &hvtimer_irq); + + sync_global_to_guest(vm, hptimer_irq); + sync_global_to_guest(vm, hvtimer_irq); + + pr_debug("hptimer_irq: %d; hvtimer_irq: %d\n", hptimer_irq, hvtimer_irq); + } else { + vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, + KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq); + vcpu_device_attr_get(vcpus[0], KVM_ARM_VCPU_TIMER_CTRL, + KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq); + + sync_global_to_guest(vm, ptimer_irq); + sync_global_to_guest(vm, vtimer_irq); + + pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq); + } }
static int gic_fd; @@ -184,7 +261,10 @@ struct kvm_vm *test_vm_create(void) unsigned int i; int nr_vcpus = test_args.nr_vcpus;
- vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); + if (test_args.is_nested) + vm = nv_vm_create_with_vcpus_gic(nr_vcpus, vcpus, NULL, guest_code); + else + vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus);
vm_init_descriptor_tables(vm); vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); diff --git a/tools/testing/selftests/kvm/include/arm64/arch_timer.h b/tools/testing/selftests/kvm/include/arm64/arch_timer.h index bf461de34785..82ba908fba8b 100644 --- a/tools/testing/selftests/kvm/include/arm64/arch_timer.h +++ b/tools/testing/selftests/kvm/include/arm64/arch_timer.h @@ -11,6 +11,8 @@ enum arch_timer { VIRTUAL, PHYSICAL, + HVIRTUAL, + HPHYSICAL, };
#define CTL_ENABLE (1 << 0) @@ -37,8 +39,10 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer)
switch (timer) { case VIRTUAL: + case HVIRTUAL: return read_sysreg(cntvct_el0); case PHYSICAL: + case HPHYSICAL: return read_sysreg(cntpct_el0); default: GUEST_FAIL("Unexpected timer type = %u", timer); @@ -52,9 +56,11 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval) { switch (timer) { case VIRTUAL: + case HVIRTUAL: write_sysreg(cval, cntv_cval_el0); break; case PHYSICAL: + case HPHYSICAL: write_sysreg(cval, cntp_cval_el0); break; default: @@ -68,8 +74,10 @@ static inline uint64_t timer_get_cval(enum arch_timer timer) { switch (timer) { case VIRTUAL: + case HVIRTUAL: return read_sysreg(cntv_cval_el0); case PHYSICAL: + case HPHYSICAL: return read_sysreg(cntp_cval_el0); default: GUEST_FAIL("Unexpected timer type = %u", timer); @@ -83,9 +91,11 @@ static inline void timer_set_tval(enum arch_timer timer, int32_t tval) { switch (timer) { case VIRTUAL: + case HVIRTUAL: write_sysreg(tval, cntv_tval_el0); break; case PHYSICAL: + case HPHYSICAL: write_sysreg(tval, cntp_tval_el0); break; default: @@ -100,8 +110,10 @@ static inline int32_t timer_get_tval(enum arch_timer timer) isb(); switch (timer) { case VIRTUAL: + case HVIRTUAL: return read_sysreg(cntv_tval_el0); case PHYSICAL: + case HPHYSICAL: return read_sysreg(cntp_tval_el0); default: GUEST_FAIL("Could not get timer %d\n", timer); @@ -115,9 +127,11 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl) { switch (timer) { case VIRTUAL: + case HVIRTUAL: write_sysreg(ctl, cntv_ctl_el0); break; case PHYSICAL: + case HPHYSICAL: write_sysreg(ctl, cntp_ctl_el0); break; default: @@ -131,8 +145,10 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer) { switch (timer) { case VIRTUAL: + case HVIRTUAL: return read_sysreg(cntv_ctl_el0); case PHYSICAL: + case HPHYSICAL: return read_sysreg(cntp_ctl_el0); default: GUEST_FAIL("Unexpected timer type = %u", timer); diff --git a/tools/testing/selftests/kvm/include/timer_test.h b/tools/testing/selftests/kvm/include/timer_test.h index 9b6edaafe6d4..95f61c4a8a80 100644 --- a/tools/testing/selftests/kvm/include/timer_test.h +++ b/tools/testing/selftests/kvm/include/timer_test.h @@ -26,6 +26,7 @@ struct test_args { /* Members of struct kvm_arm_counter_offset */ uint64_t counter_offset; uint64_t reserved; + bool is_nested; };
/* Shared variables between host and guest */
Extend the testing of AArch32 ID registers with guest code with NV enabled.
NV is enabled using command line argument and it is disabled by default.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- .../selftests/kvm/arm64/aarch32_id_regs.c | 34 +++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-)
diff --git a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c index cef8f7323ceb..8a9afda8a3e2 100644 --- a/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/aarch32_id_regs.c @@ -11,6 +11,7 @@ #include <stdint.h>
#include "kvm_util.h" +#include "nv_util.h" #include "processor.h" #include "test_util.h" #include <linux/bitfield.h> @@ -150,12 +151,38 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) return el0 == ID_AA64PFR0_EL1_EL0_IMP; }
-int main(void) +static void help(const char *name) +{ + pr_info("Usage: %s [-g enable]\n", name); + pr_info("\t-g: Enable Nested Virtualization, run guest code as guest hypervisor (default: Disabled)\n"); + exit(1); +} + +int main(int argc, char **argv) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; + int gic_fd; + int opt; + bool is_nested = false; + + + while ((opt = getopt(argc, argv, "h:g:")) != -1) { + switch (opt) { + case 'g': + is_nested = atoi_non_negative("Is Nested", optarg); + break; + case 'h': + default: + help(argv[0]); + break; + } + }
- vm = vm_create_with_one_vcpu(&vcpu, guest_main); + if (is_nested) + vm = nv_vm_create_with_vcpus_gic(1, &vcpu, &gic_fd, guest_main); + else + vm = vm_create_with_one_vcpu(&vcpu, guest_main);
TEST_REQUIRE(vcpu_aarch64_only(vcpu));
@@ -163,5 +190,8 @@ int main(void) test_user_raz_invariant(vcpu); test_guest_raz(vcpu);
+ if (is_nested) + close(gic_fd); + kvm_vm_free(vm); }
Extend the vgic_init, vgic_irq and vgic_lpi_stress to run with NV enabled(vEL2). NV enabled using command line argument and it is disabled by default. The NV mode is applicable to GICv3 tests only.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- tools/testing/selftests/kvm/arm64/vgic_init.c | 54 +++++++++++++++++-- tools/testing/selftests/kvm/arm64/vgic_irq.c | 27 ++++++---- .../selftests/kvm/arm64/vgic_lpi_stress.c | 19 +++++-- 3 files changed, 83 insertions(+), 17 deletions(-)
diff --git a/tools/testing/selftests/kvm/arm64/vgic_init.c b/tools/testing/selftests/kvm/arm64/vgic_init.c index b3b5fb0ff0a9..174350291c96 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_init.c +++ b/tools/testing/selftests/kvm/arm64/vgic_init.c @@ -13,6 +13,7 @@ #include "kvm_util.h" #include "processor.h" #include "vgic.h" +#include "nv_util.h"
#define NR_VCPUS 4
@@ -29,6 +30,7 @@ struct vm_gic { uint32_t gic_dev_type; };
+static bool is_nested; static uint64_t max_phys_size;
/* @@ -75,9 +77,19 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, struct kvm_vcpu *vcpus[]) { struct vm_gic v; + struct kvm_vcpu_init init; + int i;
v.gic_dev_type = gic_dev_type; - v.vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); + + v.vm = vm_create(nr_vcpus); + vm_ioctl(v.vm, KVM_ARM_PREFERRED_TARGET, &init); + if (is_nested) + init_vcpu_nested(&init); + + for (i = 0; i < nr_vcpus; i++) + vcpus[i] = aarch64_vcpu_add(v.vm, i, &init, guest_code); + v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
return v; @@ -336,14 +348,19 @@ static void test_vgic_then_vcpus(uint32_t gic_dev_type) struct kvm_vcpu *vcpus[NR_VCPUS]; struct vm_gic v; int ret, i; + struct kvm_vcpu_init init;
v = vm_gic_create_with_vcpus(gic_dev_type, 1, vcpus);
subtest_dist_rdist(&v);
/* Add the rest of the VCPUs */ + vm_ioctl(v.vm, KVM_ARM_PREFERRED_TARGET, &init); + if (is_nested) + init_vcpu_nested(&init); + for (i = 1; i < NR_VCPUS; ++i) - vcpus[i] = vm_vcpu_add(v.vm, i, guest_code); + vcpus[i] = aarch64_vcpu_add(v.vm, i, &init, guest_code);
ret = run_vcpu(vcpus[3]); TEST_ASSERT(ret == -EINVAL, "dist/rdist overlap detected on 1st vcpu run"); @@ -606,6 +623,7 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void) struct vm_gic v; int ret, i; uint64_t addr; + struct kvm_vcpu_init init;
v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1, vcpus);
@@ -619,8 +637,12 @@ static void test_v3_redist_ipa_range_check_at_vcpu_run(void) KVM_VGIC_V3_ADDR_TYPE_DIST, &addr);
/* Add the rest of the VCPUs */ - for (i = 1; i < NR_VCPUS; ++i) - vcpus[i] = vm_vcpu_add(v.vm, i, guest_code); + vm_ioctl(v.vm, KVM_ARM_PREFERRED_TARGET, &init); + if (is_nested) + init_vcpu_nested(&init); + + for (i = 1; i < NR_VCPUS; i++) + vcpus[i] = aarch64_vcpu_add(v.vm, i, &init, guest_code);
kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); @@ -733,11 +755,33 @@ void run_tests(uint32_t gic_dev_type) } }
-int main(int ac, char **av) +static void pr_usage(const char *name) +{ + pr_info("%s [-g nv] -h\n", name); + pr_info(" -g:\tEnable Nested Virtualization, run guest code as guest hypervisor (default: Disabled)\n"); +} + +int main(int argc, char **argv) { int ret; int pa_bits; int cnt_impl = 0; + int opt; + + while ((opt = getopt(argc, argv, "g:")) != -1) { + switch (opt) { + case 'g': + is_nested = atoi_non_negative("Is Nested", optarg); + break; + case 'h': + default: + pr_usage(argv[0]); + return 1; + } + } + + if (is_nested) + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL2));
pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits; max_phys_size = 1ULL << pa_bits; diff --git a/tools/testing/selftests/kvm/arm64/vgic_irq.c b/tools/testing/selftests/kvm/arm64/vgic_irq.c index f4ac28d53747..e4319f91f7cd 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_irq.c +++ b/tools/testing/selftests/kvm/arm64/vgic_irq.c @@ -15,6 +15,7 @@ #include "processor.h" #include "test_util.h" #include "kvm_util.h" +#include "nv_util.h" #include "gic.h" #include "gic_v3.h" #include "vgic.h" @@ -728,7 +729,7 @@ static void print_args(struct test_args *args) args->eoi_split); }
-static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) +static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split, bool is_nested) { struct ucall uc; int gic_fd; @@ -747,7 +748,10 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
print_args(&args);
- vm = vm_create_with_one_vcpu(&vcpu, guest_code); + if (is_nested) + vm = nv_vm_create_with_vcpus_gic(1, &vcpu, NULL, guest_code); + else + vm = vm_create_with_one_vcpu(&vcpu, guest_code);
vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); @@ -795,7 +799,8 @@ static void help(const char *name) "It has to be a multiple of 32 and between 64 and 1024.\n"); printf(" -e: if 1 then EOI is split into a write to DIR on top " "of writing EOI.\n"); - printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0)."); + printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).\n"); + printf(" -g: Enable Nested Virtualization, run guest code as guest hypervisor (default: Disabled)\n"); puts(""); exit(1); } @@ -807,8 +812,9 @@ int main(int argc, char **argv) bool level_sensitive = false; int opt; bool eoi_split = false; + bool is_nested = false;
- while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) { + while ((opt = getopt(argc, argv, "hn:e:l:g:")) != -1) { switch (opt) { case 'n': nr_irqs = atoi_non_negative("Number of IRQs", optarg); @@ -823,6 +829,9 @@ int main(int argc, char **argv) level_sensitive = (bool)atoi_paranoid(optarg); default_args = false; break; + case 'g': + is_nested = atoi_non_negative("Is Nested", optarg); + break; case 'h': default: help(argv[0]); @@ -835,12 +844,12 @@ int main(int argc, char **argv) * combinations. */ if (default_args) { - test_vgic(nr_irqs, false /* level */, false /* eoi_split */); - test_vgic(nr_irqs, false /* level */, true /* eoi_split */); - test_vgic(nr_irqs, true /* level */, false /* eoi_split */); - test_vgic(nr_irqs, true /* level */, true /* eoi_split */); + test_vgic(nr_irqs, false /* level */, false /* eoi_split */, is_nested); + test_vgic(nr_irqs, false /* level */, true /* eoi_split */, is_nested); + test_vgic(nr_irqs, true /* level */, false /* eoi_split */, is_nested); + test_vgic(nr_irqs, true /* level */, true /* eoi_split */, is_nested); } else { - test_vgic(nr_irqs, level_sensitive, eoi_split); + test_vgic(nr_irqs, level_sensitive, eoi_split, is_nested); }
return 0; diff --git a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c index fc4fe52fb6f8..63de3903b2c8 100644 --- a/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c +++ b/tools/testing/selftests/kvm/arm64/vgic_lpi_stress.c @@ -11,6 +11,7 @@ #include <sys/sysinfo.h>
#include "kvm_util.h" +#include "nv_util.h" #include "gic.h" #include "gic_v3.h" #include "gic_v3_its.h" @@ -43,10 +44,12 @@ static struct test_data {
vm_paddr_t lpi_prop_table; vm_paddr_t lpi_pend_tables; + bool is_nested; } test_data = { .nr_cpus = 1, .nr_devices = 1, .nr_event_ids = 16, + .is_nested = false, };
static void guest_irq_handler(struct ex_regs *regs) @@ -333,14 +336,20 @@ static void run_test(void) static void setup_vm(void) { int i; + bool is_nested = test_data.is_nested; + u32 nr_cpus = test_data.nr_cpus;
vcpus = malloc(test_data.nr_cpus * sizeof(struct kvm_vcpu)); TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
- vm = vm_create_with_vcpus(test_data.nr_cpus, guest_code, vcpus); + + if (is_nested) + vm = nv_vm_create_with_vcpus_gic(nr_cpus, vcpus, NULL, guest_code); + else + vm = vm_create_with_vcpus(nr_cpus, guest_code, vcpus);
vm_init_descriptor_tables(vm); - for (i = 0; i < test_data.nr_cpus; i++) + for (i = 0; i < nr_cpus; i++) vcpu_init_descriptor_tables(vcpus[i]);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); @@ -367,6 +376,7 @@ static void pr_usage(const char *name) pr_info(" -d:\tnumber of devices (default: %u)\n", test_data.nr_devices); pr_info(" -e:\tnumber of event IDs per device (default: %u)\n", test_data.nr_event_ids); pr_info(" -i:\tnumber of iterations (default: %lu)\n", nr_iterations); + pr_info(" -g:\tEnable Nested Virtualization, run guest code as guest hypervisor (default: Disabled)\n"); }
int main(int argc, char **argv) @@ -374,7 +384,7 @@ int main(int argc, char **argv) u32 nr_threads; int c;
- while ((c = getopt(argc, argv, "hv:d:e:i:")) != -1) { + while ((c = getopt(argc, argv, "hv:d:e:i:g:")) != -1) { switch (c) { case 'v': test_data.nr_cpus = atoi(optarg); @@ -388,6 +398,9 @@ int main(int argc, char **argv) case 'i': nr_iterations = strtoul(optarg, NULL, 0); break; + case 'g': + test_data.is_nested = atoi_non_negative("Is Nested", optarg); + break; case 'h': default: pr_usage(argv[0]);
Extend set_id_regs test to run guest code wth NV eanbled. Also added a check to avoid the writes to TGRAN*_2 fields when NV is enabled.
NV is enabled using command line argument and it is disabled by default.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- .../testing/selftests/kvm/arm64/set_id_regs.c | 57 ++++++++++++++++++- 1 file changed, 54 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/kvm/arm64/set_id_regs.c b/tools/testing/selftests/kvm/arm64/set_id_regs.c index 322b9d3b0125..86f69ec7ac0f 100644 --- a/tools/testing/selftests/kvm/arm64/set_id_regs.c +++ b/tools/testing/selftests/kvm/arm64/set_id_regs.c @@ -13,6 +13,7 @@ #include "kvm_util.h" #include "processor.h" #include "test_util.h" +#include "nv_util.h" #include <linux/bitfield.h>
enum ftr_type { @@ -67,6 +68,9 @@ struct test_feature_reg { .type = FTR_END, \ }
+static bool is_nested; +struct kvm_vcpu_init init; + static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = { S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DoubleLock, 0), REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, WRPs, 0), @@ -435,6 +439,24 @@ static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only) continue; }
+ if (is_nested) { + /* For NV, ID_AA64MMFR0_EL1.TGran4/16/64_2 + * are not allowed to write. + */ + if (reg_id == sys_reg(3, 0, 0, 7, 0)) { + switch (ftr_bits[j].shift) { + case 40: + case 36: + case 32: + ksft_test_result_skip("%s For NV guests\n", + ftr_bits[j].name); + continue; + default: + break; + } + } + } + /* Make sure the feature field is writable */ TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
@@ -658,7 +680,7 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu) * Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an * architectural reset of the vCPU. */ - aarch64_vcpu_setup(vcpu, NULL); + aarch64_vcpu_setup(vcpu, &init);
for (int i = 0; i < ARRAY_SIZE(test_regs); i++) test_assert_id_reg_unchanged(vcpu, test_regs[i].reg); @@ -673,20 +695,47 @@ static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu) ksft_test_result_pass("%s\n", __func__); }
-int main(void) +static void pr_usage(const char *name) +{ + pr_info("%s [-g nv] -h\n", name); + pr_info(" -g:\tEnable Nested Virtualization, run guest code as guest hypervisor (default: Disabled)\n"); +} + +int main(int argc, char **argv) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; bool aarch64_only; uint64_t val, el0; int test_cnt; + int opt, gic_fd; + + while ((opt = getopt(argc, argv, "g:")) != -1) { + switch (opt) { + case 'g': + is_nested = atoi_non_negative("Is Nested", optarg); + break; + case 'h': + default: + pr_usage(argv[0]); + return 1; + } + }
TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES)); TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_WRITABLE_IMP_ID_REGS)); + if (is_nested) + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL2));
vm = vm_create(1); + vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); vm_enable_cap(vm, KVM_CAP_ARM_WRITABLE_IMP_ID_REGS, 0); - vcpu = vm_vcpu_add(vm, 0, guest_code); + + if (is_nested) + init_vcpu_nested(&init); + + vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code); + gic_fd = vgic_v3_setup(vm, 1, 64);
/* Check for AARCH64 only system */ val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1)); @@ -714,6 +763,8 @@ int main(void)
test_reset_preserves_id_regs(vcpu);
+ if (is_nested) + close(gic_fd); kvm_vm_free(vm);
ksft_finished();
Modify the test to run the guest code with NV enabled. Added code is only applicable to ARM64.
NV is enabled using command line argument and it is disabled by default.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- .../testing/selftests/kvm/guest_print_test.c | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+)
diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c index bcf582852db9..4f786b88fdbe 100644 --- a/tools/testing/selftests/kvm/guest_print_test.c +++ b/tools/testing/selftests/kvm/guest_print_test.c @@ -15,6 +15,15 @@ #include "processor.h" #include "ucall_common.h"
+#ifdef __aarch64__ +#include "nv_util.h" +static void pr_usage(const char *name) +{ + pr_info("%s [-g nv] -h\n", name); + pr_info(" -g:\tEnable Nested Virtualization, run guest code as guest hypervisor (default: Disabled)\n"); +} +#endif + struct guest_vals { uint64_t a; uint64_t b; @@ -192,7 +201,30 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm;
+#ifdef __aarch64__ + int opt; + bool is_nested = false; + int gic_fd; + + while ((opt = getopt(argc, argv, "g:")) != -1) { + switch (opt) { + case 'g': + is_nested = atoi_non_negative("Is Nested", optarg); + break; + case 'h': + default: + pr_usage(argv[0]); + return 1; + } + } + + if (is_nested) + vm = nv_vm_create_with_vcpus_gic(1, &vcpu, &gic_fd, guest_code); + else + vm = vm_create_with_one_vcpu(&vcpu, guest_code); +#else vm = vm_create_with_one_vcpu(&vcpu, guest_code); +#endif
test_type_i64(vcpu, -1, -1); test_type_i64(vcpu, -1, 1);
Adding code to run guest_code in vEL2. NV is enabled using command line argument and it is disabled by default.
NV is only enabled on ARM64, for other architectures the test will exit with an ASSERT, if tried to run with NV enabled.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- .../selftests/kvm/kvm_page_table_test.c | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-)
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c index dd8b12f626d3..383f9d134ecb 100644 --- a/tools/testing/selftests/kvm/kvm_page_table_test.c +++ b/tools/testing/selftests/kvm/kvm_page_table_test.c @@ -20,6 +20,10 @@ #include "guest_modes.h" #include "ucall_common.h"
+#ifdef __aarch64__ +#include <nv_util.h> +#endif + #define TEST_MEM_SLOT_INDEX 1
/* Default size(1GB) of the memory for testing */ @@ -229,7 +233,9 @@ static void *vcpu_worker(void *data) struct test_params { uint64_t phys_offset; uint64_t test_mem_size; + bool is_nested; enum vm_mem_backing_src_type src_type; + int fd; };
static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg) @@ -252,8 +258,17 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
/* Create a VM with enough guest pages */ guest_num_pages = test_mem_size / guest_page_size; - vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus, guest_num_pages, + if (p->is_nested) { +#ifdef __aarch64__ + vm = __nv_vm_create_with_vcpus_gic(VM_SHAPE(mode), nr_vcpus, + test_args.vcpus, guest_num_pages, &p->fd, guest_code); +#else + TEST_FAIL("Nested Not Supported"); +#endif + } else { + vm = __vm_create_with_vcpus(VM_SHAPE(mode), nr_vcpus, guest_num_pages, guest_code, test_args.vcpus); + }
/* Align down GPA of the testing memslot */ if (!p->phys_offset) @@ -345,6 +360,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct timespec start; struct timespec ts_diff; int ret, i; + struct test_params *p = (struct test_params *)arg;
/* Create VM with vCPUs and make some pre-initialization */ vm = pre_init_before_test(mode, arg); @@ -414,6 +430,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) TEST_ASSERT(ret == 0, "Error in sem_destroy");
free(vcpu_threads); + if (p->is_nested) + close(p->fd); kvm_vm_free(vm); }
@@ -421,7 +439,7 @@ static void help(char *name) { puts(""); printf("usage: %s [-h] [-p offset] [-m mode] " - "[-b mem-size] [-v vcpus] [-s mem-type]\n", name); + "[-b mem-size] [-v vcpus] [-s mem-type] [-g nv]\n", name); puts(""); printf(" -p: specify guest physical test memory offset\n" " Warning: a low offset can conflict with the loaded test code.\n"); @@ -430,6 +448,8 @@ static void help(char *name) " (default: 1G)\n"); printf(" -v: specify the number of vCPUs to run\n" " (default: 1)\n"); + printf(" -g: Enable Nested Virtualization, run guest code as guest hypervisor.\n" + " (default: Disabled)\n"); backing_src_help("-s"); puts(""); } @@ -440,12 +460,13 @@ int main(int argc, char *argv[]) struct test_params p = { .test_mem_size = DEFAULT_TEST_MEM_SIZE, .src_type = DEFAULT_VM_MEM_SRC, + .is_nested = false, }; int opt;
guest_modes_append_default();
- while ((opt = getopt(argc, argv, "hp:m:b:v:s:")) != -1) { + while ((opt = getopt(argc, argv, "hp:m:b:v:s:g:")) != -1) { switch (opt) { case 'p': p.phys_offset = strtoull(optarg, NULL, 0); @@ -464,6 +485,9 @@ int main(int argc, char *argv[]) case 's': p.src_type = parse_backing_src_type(optarg); break; + case 'g': + p.is_nested = atoi_non_negative("Is Nested", optarg); + break; case 'h': default: help(argv[0]);
Extend page_fault_test to run guest code with NV enabled. NV is enabled using command line argument and it is disabled by default.
Signed-off-by: Ganapatrao Kulkarni gankulkarni@os.amperecomputing.com --- .../selftests/kvm/arm64/page_fault_test.c | 35 ++++++++++++++++--- 1 file changed, 30 insertions(+), 5 deletions(-)
diff --git a/tools/testing/selftests/kvm/arm64/page_fault_test.c b/tools/testing/selftests/kvm/arm64/page_fault_test.c index ec33a8f9c908..e3d91cb68a71 100644 --- a/tools/testing/selftests/kvm/arm64/page_fault_test.c +++ b/tools/testing/selftests/kvm/arm64/page_fault_test.c @@ -11,6 +11,7 @@ #include <fcntl.h> #include <test_util.h> #include <kvm_util.h> +#include <nv_util.h> #include <processor.h> #include <asm/sysreg.h> #include <linux/bitfield.h> @@ -70,6 +71,8 @@ struct test_params { struct test_desc *test_desc; };
+static bool is_nested; + static inline void flush_tlb_page(uint64_t vaddr) { uint64_t page = vaddr >> 12; @@ -701,14 +704,27 @@ static void run_test(enum vm_guest_mode mode, void *arg) struct kvm_vm *vm; struct kvm_vcpu *vcpu; struct uffd_desc *pt_uffd, *data_uffd; + int gic_fd = -1;
print_test_banner(mode, p);
- vm = ____vm_create(VM_SHAPE(mode)); - setup_memslots(vm, p); + if (is_nested) { + struct kvm_vcpu_init init; + + vm = ____vm_create(VM_SHAPE(mode)); + setup_memslots(vm, p); + vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init); + init_vcpu_nested(&init); + vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code); + gic_fd = vgic_v3_setup(vm, 1, 64); + } else { + vm = ____vm_create(VM_SHAPE(mode)); + setup_memslots(vm, p); + vcpu = vm_vcpu_add(vm, 0, guest_code); + } + kvm_vm_elf_load(vm, program_invocation_name); setup_ucall(vm); - vcpu = vm_vcpu_add(vm, 0, guest_code);
setup_gva_maps(vm);
@@ -728,6 +744,9 @@ static void run_test(enum vm_guest_mode mode, void *arg)
vcpu_run_loop(vm, vcpu, test);
+ if (is_nested) + close(gic_fd); + kvm_vm_free(vm); free_uffd(test, pt_uffd, data_uffd);
@@ -742,7 +761,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) static void help(char *name) { puts(""); - printf("usage: %s [-h] [-s mem-type]\n", name); + printf("usage: %s [-h] [-s mem-type] [-g nested]\n", name); puts(""); guest_modes_help(); backing_src_help("-s"); @@ -1115,7 +1134,7 @@ int main(int argc, char *argv[])
src_type = DEFAULT_VM_MEM_SRC;
- while ((opt = getopt(argc, argv, "hm:s:")) != -1) { + while ((opt = getopt(argc, argv, "hm:s:g:")) != -1) { switch (opt) { case 'm': guest_modes_cmdline(optarg); @@ -1123,6 +1142,9 @@ int main(int argc, char *argv[]) case 's': src_type = parse_backing_src_type(optarg); break; + case 'g': + is_nested = atoi_non_negative("Is Nested", optarg); + break; case 'h': default: help(argv[0]); @@ -1130,6 +1152,9 @@ int main(int argc, char *argv[]) } }
+ if (is_nested) + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_EL2)); + for_each_test_and_guest_mode(src_type); return 0; }
linux-kselftest-mirror@lists.linaro.org