The test is inspired by the pmu_event_filter_test which implemented by x86. On the arm64 platform, there is the same ability to set the pmu_event_filter through the KVM_ARM_VCPU_PMU_V3_FILTER attribute. So add the test for arm64.
The series first move some pmu common code from vpmu_counter_access to lib/aarch64/vpmu.c and include/aarch64/vpmu.h, which can be used by pmu_event_filter_test. Then fix a bug related to the [enable|disable]_counter, and at last, implement the test itself.
Changelog: ---------- v1->v2: - Improve the commit message. [Eric] - Fix the bug in [enable|disable]_counter. [Raghavendra & Marc] - Add the check if kvm has attr KVM_ARM_VCPU_PMU_V3_FILTER. - Add if host pmu support the test event throught pmceid0. - Split the test_invalid_filter() to another patch. [Eric]
v1: https://lore.kernel.org/all/20231123063750.2176250-1-shahuang@redhat.com/
Shaoqin Huang (5): KVM: selftests: aarch64: Make the [create|destroy]_vpmu_vm() public KVM: selftests: aarch64: Move pmu helper functions into vpmu.h KVM: selftests: aarch64: Fix the buggy [enable|disable]_counter KVM: selftests: aarch64: Introduce pmu_event_filter_test KVM: selftests: aarch64: Add invalid filter test in pmu_event_filter_test
tools/testing/selftests/kvm/Makefile | 2 + .../kvm/aarch64/pmu_event_filter_test.c | 267 ++++++++++++++++++ .../kvm/aarch64/vpmu_counter_access.c | 218 ++------------ .../selftests/kvm/include/aarch64/vpmu.h | 135 +++++++++ .../testing/selftests/kvm/lib/aarch64/vpmu.c | 74 +++++ 5 files changed, 502 insertions(+), 194 deletions(-) create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c create mode 100644 tools/testing/selftests/kvm/include/aarch64/vpmu.h create mode 100644 tools/testing/selftests/kvm/lib/aarch64/vpmu.c
Move the implementation of [create|destroy]_vpmu_vm() into lib/aarch64/pmu.c and export their declaration in a header so they can be reused by other tests.
The sync exception handler install is test specific so we move it out of the helper function.
No functional change intended.
Reviewed-by: Eric Auger eric.auger@redhat.com Signed-off-by: Shaoqin Huang shahuang@redhat.com --- tools/testing/selftests/kvm/Makefile | 1 + .../kvm/aarch64/vpmu_counter_access.c | 100 +++++------------- .../selftests/kvm/include/aarch64/vpmu.h | 16 +++ .../testing/selftests/kvm/lib/aarch64/vpmu.c | 64 +++++++++++ 4 files changed, 105 insertions(+), 76 deletions(-) create mode 100644 tools/testing/selftests/kvm/include/aarch64/vpmu.h create mode 100644 tools/testing/selftests/kvm/lib/aarch64/vpmu.c
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index a5963ab9215b..b60852c222ac 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -57,6 +57,7 @@ LIBKVM_aarch64 += lib/aarch64/processor.c LIBKVM_aarch64 += lib/aarch64/spinlock.c LIBKVM_aarch64 += lib/aarch64/ucall.c LIBKVM_aarch64 += lib/aarch64/vgic.c +LIBKVM_aarch64 += lib/aarch64/vpmu.c
LIBKVM_s390x += lib/s390x/diag318_test_handler.c LIBKVM_s390x += lib/s390x/processor.c diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 5ea78986e665..17305408a334 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -16,6 +16,7 @@ #include <processor.h> #include <test_util.h> #include <vgic.h> +#include <vpmu.h> #include <perf/arm_pmuv3.h> #include <linux/bitfield.h>
@@ -25,13 +26,7 @@ /* The cycle counter bit position that's common among the PMU registers */ #define ARMV8_PMU_CYCLE_IDX 31
-struct vpmu_vm { - struct kvm_vm *vm; - struct kvm_vcpu *vcpu; - int gic_fd; -}; - -static struct vpmu_vm vpmu_vm; +static struct vpmu_vm *vpmu_vm;
struct pmreg_sets { uint64_t set_reg_id; @@ -421,64 +416,6 @@ static void guest_code(uint64_t expected_pmcr_n) GUEST_DONE(); }
-#define GICD_BASE_GPA 0x8000000ULL -#define GICR_BASE_GPA 0x80A0000ULL - -/* Create a VM that has one vCPU with PMUv3 configured. */ -static void create_vpmu_vm(void *guest_code) -{ - struct kvm_vcpu_init init; - uint8_t pmuver, ec; - uint64_t dfr0, irq = 23; - struct kvm_device_attr irq_attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .attr = KVM_ARM_VCPU_PMU_V3_IRQ, - .addr = (uint64_t)&irq, - }; - struct kvm_device_attr init_attr = { - .group = KVM_ARM_VCPU_PMU_V3_CTRL, - .attr = KVM_ARM_VCPU_PMU_V3_INIT, - }; - - /* The test creates the vpmu_vm multiple times. Ensure a clean state */ - memset(&vpmu_vm, 0, sizeof(vpmu_vm)); - - vpmu_vm.vm = vm_create(1); - vm_init_descriptor_tables(vpmu_vm.vm); - for (ec = 0; ec < ESR_EC_NUM; ec++) { - vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec, - guest_sync_handler); - } - - /* Create vCPU with PMUv3 */ - vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); - init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); - vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code); - vcpu_init_descriptor_tables(vpmu_vm.vcpu); - vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64, - GICD_BASE_GPA, GICR_BASE_GPA); - __TEST_REQUIRE(vpmu_vm.gic_fd >= 0, - "Failed to create vgic-v3, skipping"); - - /* Make sure that PMUv3 support is indicated in the ID register */ - vcpu_get_reg(vpmu_vm.vcpu, - KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0); - pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0); - TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && - pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP, - "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); - - /* Initialize vPMU */ - vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); - vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr); -} - -static void destroy_vpmu_vm(void) -{ - close(vpmu_vm.gic_fd); - kvm_vm_free(vpmu_vm.vm); -} - static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) { struct ucall uc; @@ -497,13 +434,24 @@ static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) } }
+static void create_vpmu_vm_with_handler(void *guest_code) +{ + uint8_t ec; + vpmu_vm = create_vpmu_vm(guest_code); + + for (ec = 0; ec < ESR_EC_NUM; ec++) { + vm_install_sync_handler(vpmu_vm->vm, VECTOR_SYNC_CURRENT, ec, + guest_sync_handler); + } +} + static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail) { struct kvm_vcpu *vcpu; uint64_t pmcr, pmcr_orig;
- create_vpmu_vm(guest_code); - vcpu = vpmu_vm.vcpu; + create_vpmu_vm_with_handler(guest_code); + vcpu = vpmu_vm->vcpu;
vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig); pmcr = pmcr_orig; @@ -539,7 +487,7 @@ static void run_access_test(uint64_t pmcr_n) pr_debug("Test with pmcr_n %lu\n", pmcr_n);
test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); - vcpu = vpmu_vm.vcpu; + vcpu = vpmu_vm->vcpu;
/* Save the initial sp to restore them later to run the guest again */ vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp); @@ -550,7 +498,7 @@ static void run_access_test(uint64_t pmcr_n) * Reset and re-initialize the vCPU, and run the guest code again to * check if PMCR_EL0.N is preserved. */ - vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); + vm_ioctl(vpmu_vm->vm, KVM_ARM_PREFERRED_TARGET, &init); init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); aarch64_vcpu_setup(vcpu, &init); vcpu_init_descriptor_tables(vcpu); @@ -559,7 +507,7 @@ static void run_access_test(uint64_t pmcr_n)
run_vcpu(vcpu, pmcr_n);
- destroy_vpmu_vm(); + destroy_vpmu_vm(vpmu_vm); }
static struct pmreg_sets validity_check_reg_sets[] = { @@ -580,7 +528,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n) uint64_t valid_counters_mask, max_counters_mask;
test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); - vcpu = vpmu_vm.vcpu; + vcpu = vpmu_vm->vcpu;
valid_counters_mask = get_counters_mask(pmcr_n); max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS); @@ -621,7 +569,7 @@ static void run_pmregs_validity_test(uint64_t pmcr_n) KVM_ARM64_SYS_REG(clr_reg_id), reg_val); }
- destroy_vpmu_vm(); + destroy_vpmu_vm(vpmu_vm); }
/* @@ -634,7 +582,7 @@ static void run_error_test(uint64_t pmcr_n) pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
test_create_vpmu_vm_with_pmcr_n(pmcr_n, true); - destroy_vpmu_vm(); + destroy_vpmu_vm(vpmu_vm); }
/* @@ -645,9 +593,9 @@ static uint64_t get_pmcr_n_limit(void) { uint64_t pmcr;
- create_vpmu_vm(guest_code); - vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); - destroy_vpmu_vm(); + create_vpmu_vm_with_handler(guest_code); + vcpu_get_reg(vpmu_vm->vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); + destroy_vpmu_vm(vpmu_vm); return get_pmcr_n(pmcr); }
diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h new file mode 100644 index 000000000000..0a56183644ee --- /dev/null +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <kvm_util.h> + +#define GICD_BASE_GPA 0x8000000ULL +#define GICR_BASE_GPA 0x80A0000ULL + +struct vpmu_vm { + struct kvm_vm *vm; + struct kvm_vcpu *vcpu; + int gic_fd; +}; + +struct vpmu_vm *create_vpmu_vm(void *guest_code); + +void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c new file mode 100644 index 000000000000..b3de8fdc555e --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <kvm_util.h> +#include <processor.h> +#include <test_util.h> +#include <vgic.h> +#include <vpmu.h> +#include <perf/arm_pmuv3.h> + +/* Create a VM that has one vCPU with PMUv3 configured. */ +struct vpmu_vm *create_vpmu_vm(void *guest_code) +{ + struct kvm_vcpu_init init; + uint8_t pmuver; + uint64_t dfr0, irq = 23; + struct kvm_device_attr irq_attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_IRQ, + .addr = (uint64_t)&irq, + }; + struct kvm_device_attr init_attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_INIT, + }; + struct vpmu_vm *vpmu_vm; + + vpmu_vm = calloc(1, sizeof(*vpmu_vm)); + TEST_ASSERT(vpmu_vm != NULL, "Insufficient Memory"); + memset(vpmu_vm, 0, sizeof(vpmu_vm)); + + vpmu_vm->vm = vm_create(1); + vm_init_descriptor_tables(vpmu_vm->vm); + + /* Create vCPU with PMUv3 */ + vm_ioctl(vpmu_vm->vm, KVM_ARM_PREFERRED_TARGET, &init); + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); + vpmu_vm->vcpu = aarch64_vcpu_add(vpmu_vm->vm, 0, &init, guest_code); + vcpu_init_descriptor_tables(vpmu_vm->vcpu); + vpmu_vm->gic_fd = vgic_v3_setup(vpmu_vm->vm, 1, 64, + GICD_BASE_GPA, GICR_BASE_GPA); + __TEST_REQUIRE(vpmu_vm->gic_fd >= 0, + "Failed to create vgic-v3, skipping"); + + /* Make sure that PMUv3 support is indicated in the ID register */ + vcpu_get_reg(vpmu_vm->vcpu, + KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0); + pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0); + TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && + pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP, + "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); + + /* Initialize vPMU */ + vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); + vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr); + + return vpmu_vm; +} + +void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm) +{ + close(vpmu_vm->gic_fd); + kvm_vm_free(vpmu_vm->vm); + free(vpmu_vm); +}
Move those pmu helper functions into include/aarch64/vpmu.h, thus it can be used by other pmu test.
No functional change intended.
Reviewed-by: Eric Auger eric.auger@redhat.com Signed-off-by: Shaoqin Huang shahuang@redhat.com --- .../kvm/aarch64/vpmu_counter_access.c | 118 ----------------- .../selftests/kvm/include/aarch64/vpmu.h | 119 ++++++++++++++++++ 2 files changed, 119 insertions(+), 118 deletions(-)
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 17305408a334..62d6315790ab 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -20,12 +20,6 @@ #include <perf/arm_pmuv3.h> #include <linux/bitfield.h>
-/* The max number of the PMU event counters (excluding the cycle counter) */ -#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) - -/* The cycle counter bit position that's common among the PMU registers */ -#define ARMV8_PMU_CYCLE_IDX 31 - static struct vpmu_vm *vpmu_vm;
struct pmreg_sets { @@ -35,118 +29,6 @@ struct pmreg_sets {
#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr}
-static uint64_t get_pmcr_n(uint64_t pmcr) -{ - return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; -} - -static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) -{ - *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); - *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); -} - -static uint64_t get_counters_mask(uint64_t n) -{ - uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); - - if (n) - mask |= GENMASK(n - 1, 0); - return mask; -} - -/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ -static inline unsigned long read_sel_evcntr(int sel) -{ - write_sysreg(sel, pmselr_el0); - isb(); - return read_sysreg(pmxevcntr_el0); -} - -/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ -static inline void write_sel_evcntr(int sel, unsigned long val) -{ - write_sysreg(sel, pmselr_el0); - isb(); - write_sysreg(val, pmxevcntr_el0); - isb(); -} - -/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ -static inline unsigned long read_sel_evtyper(int sel) -{ - write_sysreg(sel, pmselr_el0); - isb(); - return read_sysreg(pmxevtyper_el0); -} - -/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ -static inline void write_sel_evtyper(int sel, unsigned long val) -{ - write_sysreg(sel, pmselr_el0); - isb(); - write_sysreg(val, pmxevtyper_el0); - isb(); -} - -static inline void enable_counter(int idx) -{ - uint64_t v = read_sysreg(pmcntenset_el0); - - write_sysreg(BIT(idx) | v, pmcntenset_el0); - isb(); -} - -static inline void disable_counter(int idx) -{ - uint64_t v = read_sysreg(pmcntenset_el0); - - write_sysreg(BIT(idx) | v, pmcntenclr_el0); - isb(); -} - -static void pmu_disable_reset(void) -{ - uint64_t pmcr = read_sysreg(pmcr_el0); - - /* Reset all counters, disabling them */ - pmcr &= ~ARMV8_PMU_PMCR_E; - write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); - isb(); -} - -#define RETURN_READ_PMEVCNTRN(n) \ - return read_sysreg(pmevcntr##n##_el0) -static unsigned long read_pmevcntrn(int n) -{ - PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); - return 0; -} - -#define WRITE_PMEVCNTRN(n) \ - write_sysreg(val, pmevcntr##n##_el0) -static void write_pmevcntrn(int n, unsigned long val) -{ - PMEVN_SWITCH(n, WRITE_PMEVCNTRN); - isb(); -} - -#define READ_PMEVTYPERN(n) \ - return read_sysreg(pmevtyper##n##_el0) -static unsigned long read_pmevtypern(int n) -{ - PMEVN_SWITCH(n, READ_PMEVTYPERN); - return 0; -} - -#define WRITE_PMEVTYPERN(n) \ - write_sysreg(val, pmevtyper##n##_el0) -static void write_pmevtypern(int n, unsigned long val) -{ - PMEVN_SWITCH(n, WRITE_PMEVTYPERN); - isb(); -} - /* * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0 * accessors that test cases will use. Each of the accessors will diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h index 0a56183644ee..e0cc1ca1c4b7 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -1,10 +1,17 @@ /* SPDX-License-Identifier: GPL-2.0 */
#include <kvm_util.h> +#include <perf/arm_pmuv3.h>
#define GICD_BASE_GPA 0x8000000ULL #define GICR_BASE_GPA 0x80A0000ULL
+/* The max number of the PMU event counters (excluding the cycle counter) */ +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) + +/* The cycle counter bit position that's common among the PMU registers */ +#define ARMV8_PMU_CYCLE_IDX 31 + struct vpmu_vm { struct kvm_vm *vm; struct kvm_vcpu *vcpu; @@ -14,3 +21,115 @@ struct vpmu_vm { struct vpmu_vm *create_vpmu_vm(void *guest_code);
void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); + +static inline uint64_t get_pmcr_n(uint64_t pmcr) +{ + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; +} + +static inline void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) +{ + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); +} + +static inline uint64_t get_counters_mask(uint64_t n) +{ + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); + + if (n) + mask |= GENMASK(n - 1, 0); + return mask; +} + +/* Read PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ +static inline unsigned long read_sel_evcntr(int sel) +{ + write_sysreg(sel, pmselr_el0); + isb(); + return read_sysreg(pmxevcntr_el0); +} + +/* Write PMEVTCNTR<n>_EL0 through PMXEVCNTR_EL0 */ +static inline void write_sel_evcntr(int sel, unsigned long val) +{ + write_sysreg(sel, pmselr_el0); + isb(); + write_sysreg(val, pmxevcntr_el0); + isb(); +} + +/* Read PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ +static inline unsigned long read_sel_evtyper(int sel) +{ + write_sysreg(sel, pmselr_el0); + isb(); + return read_sysreg(pmxevtyper_el0); +} + +/* Write PMEVTYPER<n>_EL0 through PMXEVTYPER_EL0 */ +static inline void write_sel_evtyper(int sel, unsigned long val) +{ + write_sysreg(sel, pmselr_el0); + isb(); + write_sysreg(val, pmxevtyper_el0); + isb(); +} + +static inline void enable_counter(int idx) +{ + uint64_t v = read_sysreg(pmcntenset_el0); + + write_sysreg(BIT(idx) | v, pmcntenset_el0); + isb(); +} + +static inline void disable_counter(int idx) +{ + uint64_t v = read_sysreg(pmcntenset_el0); + + write_sysreg(BIT(idx) | v, pmcntenclr_el0); + isb(); +} + +static inline void pmu_disable_reset(void) +{ + uint64_t pmcr = read_sysreg(pmcr_el0); + + /* Reset all counters, disabling them */ + pmcr &= ~ARMV8_PMU_PMCR_E; + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); + isb(); +} + +#define RETURN_READ_PMEVCNTRN(n) \ + return read_sysreg(pmevcntr##n##_el0) +static inline unsigned long read_pmevcntrn(int n) +{ + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); + return 0; +} + +#define WRITE_PMEVCNTRN(n) \ + write_sysreg(val, pmevcntr##n##_el0) +static inline void write_pmevcntrn(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); + isb(); +} + +#define READ_PMEVTYPERN(n) \ + return read_sysreg(pmevtyper##n##_el0) +static inline unsigned long read_pmevtypern(int n) +{ + PMEVN_SWITCH(n, READ_PMEVTYPERN); + return 0; +} + +#define WRITE_PMEVTYPERN(n) \ + write_sysreg(val, pmevtyper##n##_el0) +static inline void write_pmevtypern(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); + isb(); +}
In general, the set/clr registers should always be used in their write form, never in a RMW form (imagine an interrupt disabling a counter between the read and the write...).
The current implementation of [enable|disable]_counter both use the RMW form, fix them by directly write to the set/clr registers.
At the same time, it also fix the buggy disable_counter() which would end up disabling all the counters.
Signed-off-by: Shaoqin Huang shahuang@redhat.com --- tools/testing/selftests/kvm/include/aarch64/vpmu.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h index e0cc1ca1c4b7..644dae3814b5 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -78,17 +78,13 @@ static inline void write_sel_evtyper(int sel, unsigned long val)
static inline void enable_counter(int idx) { - uint64_t v = read_sysreg(pmcntenset_el0); - - write_sysreg(BIT(idx) | v, pmcntenset_el0); + write_sysreg(BIT(idx), pmcntenset_el0); isb(); }
static inline void disable_counter(int idx) { - uint64_t v = read_sysreg(pmcntenset_el0); - - write_sysreg(BIT(idx) | v, pmcntenclr_el0); + write_sysreg(BIT(idx), pmcntenclr_el0); isb(); }
Hi Shaoqin,
On 11/29/23 08:27, Shaoqin Huang wrote:
In general, the set/clr registers should always be used in their write form, never in a RMW form (imagine an interrupt disabling a counter between the read and the write...).
The current implementation of [enable|disable]_counter both use the RMW form, fix them by directly write to the set/clr registers.
At the same time, it also fix the buggy disable_counter() which would end up disabling all the counters.
Signed-off-by: Shaoqin Huang shahuang@redhat.com
Reviewed-by: Eric Auger eric.auger@redhat.com
Eric
tools/testing/selftests/kvm/include/aarch64/vpmu.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h index e0cc1ca1c4b7..644dae3814b5 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -78,17 +78,13 @@ static inline void write_sel_evtyper(int sel, unsigned long val) static inline void enable_counter(int idx) {
- uint64_t v = read_sysreg(pmcntenset_el0);
- write_sysreg(BIT(idx) | v, pmcntenset_el0);
- write_sysreg(BIT(idx), pmcntenset_el0); isb();
} static inline void disable_counter(int idx) {
- uint64_t v = read_sysreg(pmcntenset_el0);
- write_sysreg(BIT(idx) | v, pmcntenclr_el0);
- write_sysreg(BIT(idx), pmcntenclr_el0); isb();
}
Introduce pmu_event_filter_test for arm64 platforms. The test configures PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and check if the guest can use those events which user allow and can't use those events which use deny.
This test refactor the create_vpmu_vm() and make it a wrapper for __create_vpmu_vm(), which allows some extra init code before KVM_ARM_VCPU_PMU_V3_INIT.
And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the pmu event filter in KVM. And choose to filter two common event branches_retired and instructions_retired, and let guest use the two events in pmu. And check if the result is expected.
Signed-off-by: Shaoqin Huang shahuang@redhat.com --- tools/testing/selftests/kvm/Makefile | 1 + .../kvm/aarch64/pmu_event_filter_test.c | 231 ++++++++++++++++++ .../selftests/kvm/include/aarch64/vpmu.h | 4 + .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +- 4 files changed, 248 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b60852c222ac..5f126e1a1dbf 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/hypercalls TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test TEST_GEN_PROGS_aarch64 += aarch64/psci_test TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c new file mode 100644 index 000000000000..0e652fbdb37a --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * pmu_event_filter_test - Test user limit pmu event for guest. + * + * Copyright (c) 2023 Red Hat, Inc. + * + * This test checks if the guest only see the limited pmu event that userspace + * sets, if the guest can use those events which user allow, and if the guest + * can't use those events which user deny. + * This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER + * is supported on the host. + */ +#include <kvm_util.h> +#include <processor.h> +#include <vgic.h> +#include <vpmu.h> +#include <test_util.h> +#include <perf/arm_pmuv3.h> + +struct { + uint64_t branches_retired; + uint64_t instructions_retired; +} pmc_results; + +static struct vpmu_vm *vpmu_vm; +static uint64_t pmceid0; + +#define FILTER_NR 10 + +struct test_desc { + const char *name; + void (*check_result)(void); + struct kvm_pmu_event_filter filter[FILTER_NR]; +}; + +#define __DEFINE_FILTER(base, num, act) \ + ((struct kvm_pmu_event_filter) { \ + .base_event = base, \ + .nevents = num, \ + .action = act, \ + }) + +#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act) + +#define EMPTY_FILTER { 0 } + +#define SW_INCR 0x0 +#define INST_RETIRED 0x8 +#define BR_RETIRED 0x21 + +#define NUM_BRANCHES 10 + +static void run_and_measure_loop(void) +{ + asm volatile( + " mov x10, %[loop]\n" + "1: sub x10, x10, #1\n" + " cmp x10, #0x0\n" + " b.gt 1b\n" + : + : [loop] "r" (NUM_BRANCHES) + : "x10", "cc"); +} + +static void guest_code(void) +{ + uint64_t pmcr = read_sysreg(pmcr_el0); + + pmu_disable_reset(); + + write_pmevtypern(0, BR_RETIRED); + write_pmevtypern(1, INST_RETIRED); + enable_counter(0); + enable_counter(1); + write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0); + + run_and_measure_loop(); + + write_sysreg(pmcr, pmcr_el0); + + pmc_results.branches_retired = read_sysreg(pmevcntr0_el0); + pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0); + + GUEST_DONE(); +} + +static void guest_get_pmceid0(void) +{ + uint64_t pmceid0 = read_sysreg(pmceid0_el0); + + GUEST_PRINTF("%lx\n", pmceid0); + + GUEST_DONE(); +} + +static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg) +{ + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_FILTER, + }; + struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg; + + while (filter && filter->nevents != 0) { + attr.addr = (uint64_t)filter; + vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr); + filter++; + } +} + +static void create_vpmu_vm_with_filter(void *guest_code, + struct kvm_pmu_event_filter *filter) +{ + vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter); +} + +static void run_vcpu(struct kvm_vcpu *vcpu) +{ + struct ucall uc; + + while (1) { + vcpu_run(vcpu); + switch (get_ucall(vcpu, &uc)) { + case UCALL_DONE: + return; + case UCALL_PRINTF: + pmceid0 = strtoll(uc.buffer, NULL, 16); + break; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + } +} + +static void check_pmc_counting(void) +{ + uint64_t br = pmc_results.branches_retired; + uint64_t ir = pmc_results.instructions_retired; + + TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = " + "%lu (expected %u)", br, NUM_BRANCHES); + TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir); +} + +static void check_pmc_not_counting(void) +{ + uint64_t br = pmc_results.branches_retired; + uint64_t ir = pmc_results.instructions_retired; + + TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br); + TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir); +} + +static void run_vcpu_and_sync_pmc_results(void) +{ + memset(&pmc_results, 0, sizeof(pmc_results)); + sync_global_to_guest(vpmu_vm->vm, pmc_results); + + run_vcpu(vpmu_vm->vcpu); + + sync_global_from_guest(vpmu_vm->vm, pmc_results); +} + +static void run_test(struct test_desc *t) +{ + pr_debug("Test: %s\n", t->name); + + create_vpmu_vm_with_filter(guest_code, t->filter); + + run_vcpu_and_sync_pmc_results(); + + t->check_result(); + + destroy_vpmu_vm(vpmu_vm); +} + +static struct test_desc tests[] = { + {"without_filter", check_pmc_counting, { EMPTY_FILTER }}, + {"member_allow_filter", check_pmc_counting, + {DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0), + DEFINE_FILTER(BR_RETIRED, 0), EMPTY_FILTER}}, + {"member_deny_filter", check_pmc_not_counting, + {DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1), + DEFINE_FILTER(BR_RETIRED, 1), EMPTY_FILTER}}, + {"not_member_deny_filter", check_pmc_counting, + {DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}}, + {"not_member_allow_filter", check_pmc_not_counting, + {DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}}, + { 0 } +}; + +static void for_each_test(void) +{ + struct test_desc *t; + + for (t = &tests[0]; t->name; t++) + run_test(t); +} + +static bool kvm_supports_pmu_event_filter(void) +{ + int r; + + vpmu_vm = create_vpmu_vm(guest_code); + + r = __kvm_has_device_attr(vpmu_vm->vcpu->fd, KVM_ARM_VCPU_PMU_V3_CTRL, + KVM_ARM_VCPU_PMU_V3_FILTER); + + destroy_vpmu_vm(vpmu_vm); + return !r; +} + +static bool host_pmu_supports_events(void) +{ + vpmu_vm = create_vpmu_vm(guest_get_pmceid0); + + run_vcpu(vpmu_vm->vcpu); + + destroy_vpmu_vm(vpmu_vm); + + return pmceid0 & (BR_RETIRED | INST_RETIRED); +} + +int main(void) +{ + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); + TEST_REQUIRE(kvm_supports_pmu_event_filter()); + TEST_REQUIRE(host_pmu_supports_events()); + + for_each_test(); +} diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h index 644dae3814b5..f103d0824f8a 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -18,6 +18,10 @@ struct vpmu_vm { int gic_fd; };
+struct vpmu_vm *__create_vpmu_vm(void *guest_code, + void (*init_pmu)(struct vpmu_vm *vm, void *arg), + void *arg); + struct vpmu_vm *create_vpmu_vm(void *guest_code);
void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c index b3de8fdc555e..76ea03d607f1 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c @@ -7,8 +7,9 @@ #include <vpmu.h> #include <perf/arm_pmuv3.h>
-/* Create a VM that has one vCPU with PMUv3 configured. */ -struct vpmu_vm *create_vpmu_vm(void *guest_code) +struct vpmu_vm *__create_vpmu_vm(void *guest_code, + void (*init_pmu)(struct vpmu_vm *vm, void *arg), + void *arg) { struct kvm_vcpu_init init; uint8_t pmuver; @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code) "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
/* Initialize vPMU */ + if (init_pmu) + init_pmu(vpmu_vm, arg); + vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
return vpmu_vm; }
+/* Create a VM that has one vCPU with PMUv3 configured. */ +struct vpmu_vm *create_vpmu_vm(void *guest_code) +{ + return __create_vpmu_vm(guest_code, NULL, NULL); +} + void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm) { close(vpmu_vm->gic_fd);
Hi Shaoqin,
On 11/29/23 08:27, Shaoqin Huang wrote:
Introduce pmu_event_filter_test for arm64 platforms. The test configures PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and check if the guest can use those events which user allow and can't use those events which use deny.
This test refactor the create_vpmu_vm() and make it a wrapper for __create_vpmu_vm(), which allows some extra init code before KVM_ARM_VCPU_PMU_V3_INIT.
And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the pmu event filter in KVM. And choose to filter two common event branches_retired and instructions_retired, and let guest use the two events in pmu. And check if the result is expected.
Signed-off-by: Shaoqin Huang shahuang@redhat.com
tools/testing/selftests/kvm/Makefile | 1 + .../kvm/aarch64/pmu_event_filter_test.c | 231 ++++++++++++++++++ .../selftests/kvm/include/aarch64/vpmu.h | 4 + .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +- 4 files changed, 248 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b60852c222ac..5f126e1a1dbf 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/hypercalls TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test TEST_GEN_PROGS_aarch64 += aarch64/psci_test TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c new file mode 100644 index 000000000000..0e652fbdb37a --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/*
- pmu_event_filter_test - Test user limit pmu event for guest.
- Copyright (c) 2023 Red Hat, Inc.
- This test checks if the guest only see the limited pmu event that userspace
- sets, if the guest can use those events which user allow, and if the guest
- can't use those events which user deny.
- This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
- is supported on the host.
- */
+#include <kvm_util.h> +#include <processor.h> +#include <vgic.h> +#include <vpmu.h> +#include <test_util.h> +#include <perf/arm_pmuv3.h>
+struct {
- uint64_t branches_retired;
- uint64_t instructions_retired;
+} pmc_results;
+static struct vpmu_vm *vpmu_vm; +static uint64_t pmceid0;
+#define FILTER_NR 10
+struct test_desc {
- const char *name;
- void (*check_result)(void);
- struct kvm_pmu_event_filter filter[FILTER_NR];
+}; +> +#define __DEFINE_FILTER(base, num, act) \
- ((struct kvm_pmu_event_filter) { \
.base_event = base, \
.nevents = num, \
.action = act, \
- })
+#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
+#define EMPTY_FILTER { 0 }
+#define SW_INCR 0x0 +#define INST_RETIRED 0x8 +#define BR_RETIRED 0x21
+#define NUM_BRANCHES 10
+static void run_and_measure_loop(void) +{
- asm volatile(
" mov x10, %[loop]\n"
"1: sub x10, x10, #1\n"
" cmp x10, #0x0\n"
" b.gt 1b\n"
:
: [loop] "r" (NUM_BRANCHES)
: "x10", "cc");
+}
+static void guest_code(void) +{
- uint64_t pmcr = read_sysreg(pmcr_el0);
- pmu_disable_reset();
- write_pmevtypern(0, BR_RETIRED);
- write_pmevtypern(1, INST_RETIRED);
- enable_counter(0);
- enable_counter(1);
- write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
- run_and_measure_loop();
- write_sysreg(pmcr, pmcr_el0);
- pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
- pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
- GUEST_DONE();
+}
+static void guest_get_pmceid0(void) +{
- uint64_t pmceid0 = read_sysreg(pmceid0_el0);
- GUEST_PRINTF("%lx\n", pmceid0);
- GUEST_DONE();
+}
+static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg) +{
- struct kvm_device_attr attr = {
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
.attr = KVM_ARM_VCPU_PMU_V3_FILTER,
- };
- struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
- while (filter && filter->nevents != 0) {
attr.addr = (uint64_t)filter;
vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
filter++;
- }
+}
+static void create_vpmu_vm_with_filter(void *guest_code,
struct kvm_pmu_event_filter *filter)
+{
- vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
+}
+static void run_vcpu(struct kvm_vcpu *vcpu) +{
- struct ucall uc;
- while (1) {
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
return;
case UCALL_PRINTF:
pmceid0 = strtoll(uc.buffer, NULL, 16);
break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
- }
+}
+static void check_pmc_counting(void) +{
- uint64_t br = pmc_results.branches_retired;
- uint64_t ir = pmc_results.instructions_retired;
- TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
"%lu (expected %u)", br, NUM_BRANCHES);
have you tested on several machines? My experience with some events (MEM_ACCESS for instance) is that you have variance (sometimes significant) on some event count. I am a little bit scared that having this br == NUM_BRANCHES check without taking into account some margin will cause failures on some HW.
in v1 I suggested to read to PMCEID* in a guest code to check if the event is supported. This method would also have the benefice to allow testing more complex filter range combinations.
- TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
+}
+static void check_pmc_not_counting(void) +{
- uint64_t br = pmc_results.branches_retired;
- uint64_t ir = pmc_results.instructions_retired;
- TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
- TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
+}
+static void run_vcpu_and_sync_pmc_results(void) +{
- memset(&pmc_results, 0, sizeof(pmc_results));
- sync_global_to_guest(vpmu_vm->vm, pmc_results);
- run_vcpu(vpmu_vm->vcpu);
- sync_global_from_guest(vpmu_vm->vm, pmc_results);
+}
+static void run_test(struct test_desc *t) +{
- pr_debug("Test: %s\n", t->name);
- create_vpmu_vm_with_filter(guest_code, t->filter);
- run_vcpu_and_sync_pmc_results();
- t->check_result();
- destroy_vpmu_vm(vpmu_vm);
+}
+static struct test_desc tests[] = {
- {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
- {"member_allow_filter", check_pmc_counting,
{DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
Note the doc says that Event 0 (SW_INCR) is never filtered, as it doesn't count a hardware event
I would use the defines exposed in the uapi
+#define KVM_PMU_EVENT_ALLOW 0 +#define KVM_PMU_EVENT_DENY 1
DEFINE_FILTER(BR_RETIRED, 0), EMPTY_FILTER}},
- {"member_deny_filter", check_pmc_not_counting,
{DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
what is the purpose of SW_INCR. YOu do not seem to test it anyway?
DEFINE_FILTER(BR_RETIRED, 1), EMPTY_FILTER}},
- {"not_member_deny_filter", check_pmc_counting,
{DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
- {"not_member_allow_filter", check_pmc_not_counting,
{DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
- { 0 }
+};
+static void for_each_test(void) +{
- struct test_desc *t;
- for (t = &tests[0]; t->name; t++)
run_test(t);
+}
+static bool kvm_supports_pmu_event_filter(void) +{
- int r;
- vpmu_vm = create_vpmu_vm(guest_code);
- r = __kvm_has_device_attr(vpmu_vm->vcpu->fd, KVM_ARM_VCPU_PMU_V3_CTRL,
KVM_ARM_VCPU_PMU_V3_FILTER);
you can use __vcpu_has_device_attr directly
- destroy_vpmu_vm(vpmu_vm);
- return !r;
+}
+static bool host_pmu_supports_events(void) +{
- vpmu_vm = create_vpmu_vm(guest_get_pmceid0);
- run_vcpu(vpmu_vm->vcpu);
- destroy_vpmu_vm(vpmu_vm);
- return pmceid0 & (BR_RETIRED | INST_RETIRED);
this will return true if either event is supported. I suspect this is not what you want.
+}
+int main(void) +{
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
- TEST_REQUIRE(kvm_supports_pmu_event_filter());
- TEST_REQUIRE(host_pmu_supports_events());
- for_each_test();
+} diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h index 644dae3814b5..f103d0824f8a 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -18,6 +18,10 @@ struct vpmu_vm { int gic_fd; }; +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
void (*init_pmu)(struct vpmu_vm *vm, void *arg),
void *arg);
struct vpmu_vm *create_vpmu_vm(void *guest_code); void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c index b3de8fdc555e..76ea03d607f1 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c @@ -7,8 +7,9 @@ #include <vpmu.h> #include <perf/arm_pmuv3.h> -/* Create a VM that has one vCPU with PMUv3 configured. */ -struct vpmu_vm *create_vpmu_vm(void *guest_code) +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
void (*init_pmu)(struct vpmu_vm *vm, void *arg),
void *arg)
{ struct kvm_vcpu_init init; uint8_t pmuver; @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code) "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); /* Initialize vPMU */
- if (init_pmu)
init_pmu(vpmu_vm, arg);
- vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
return vpmu_vm; } +/* Create a VM that has one vCPU with PMUv3 configured. */ +struct vpmu_vm *create_vpmu_vm(void *guest_code) +{
- return __create_vpmu_vm(guest_code, NULL, NULL);
+}
void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm) { close(vpmu_vm->gic_fd);
Thanks
Eric
Hi Shaoqin,
On 12/14/23 14:45, Eric Auger wrote:
Hi Shaoqin,
On 11/29/23 08:27, Shaoqin Huang wrote:
Introduce pmu_event_filter_test for arm64 platforms. The test configures PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and check if the guest can use those events which user allow and can't use those events which use deny.
This test refactor the create_vpmu_vm() and make it a wrapper for __create_vpmu_vm(), which allows some extra init code before KVM_ARM_VCPU_PMU_V3_INIT.
And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the pmu event filter in KVM. And choose to filter two common event branches_retired and instructions_retired, and let guest use the two events in pmu. And check if the result is expected.
Signed-off-by: Shaoqin Huang shahuang@redhat.com
tools/testing/selftests/kvm/Makefile | 1 + .../kvm/aarch64/pmu_event_filter_test.c | 231 ++++++++++++++++++ .../selftests/kvm/include/aarch64/vpmu.h | 4 + .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +- 4 files changed, 248 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b60852c222ac..5f126e1a1dbf 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/hypercalls TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test TEST_GEN_PROGS_aarch64 += aarch64/psci_test TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c new file mode 100644 index 000000000000..0e652fbdb37a --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/*
- pmu_event_filter_test - Test user limit pmu event for guest.
- Copyright (c) 2023 Red Hat, Inc.
- This test checks if the guest only see the limited pmu event that userspace
- sets, if the guest can use those events which user allow, and if the guest
- can't use those events which user deny.
- This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
- is supported on the host.
- */
+#include <kvm_util.h> +#include <processor.h> +#include <vgic.h> +#include <vpmu.h> +#include <test_util.h> +#include <perf/arm_pmuv3.h>
+struct {
- uint64_t branches_retired;
- uint64_t instructions_retired;
+} pmc_results;
+static struct vpmu_vm *vpmu_vm; +static uint64_t pmceid0;
+#define FILTER_NR 10
+struct test_desc {
- const char *name;
- void (*check_result)(void);
- struct kvm_pmu_event_filter filter[FILTER_NR];
+}; +> +#define __DEFINE_FILTER(base, num, act) \
- ((struct kvm_pmu_event_filter) { \
.base_event = base, \
.nevents = num, \
.action = act, \
- })
+#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
+#define EMPTY_FILTER { 0 }
+#define SW_INCR 0x0 +#define INST_RETIRED 0x8 +#define BR_RETIRED 0x21
+#define NUM_BRANCHES 10
+static void run_and_measure_loop(void) +{
- asm volatile(
" mov x10, %[loop]\n"
"1: sub x10, x10, #1\n"
" cmp x10, #0x0\n"
" b.gt 1b\n"
:
: [loop] "r" (NUM_BRANCHES)
: "x10", "cc");
+}
+static void guest_code(void) +{
- uint64_t pmcr = read_sysreg(pmcr_el0);
- pmu_disable_reset();
- write_pmevtypern(0, BR_RETIRED);
- write_pmevtypern(1, INST_RETIRED);
- enable_counter(0);
- enable_counter(1);
- write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
- run_and_measure_loop();
- write_sysreg(pmcr, pmcr_el0);
- pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
- pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
- GUEST_DONE();
+}
+static void guest_get_pmceid0(void) +{
- uint64_t pmceid0 = read_sysreg(pmceid0_el0);
- GUEST_PRINTF("%lx\n", pmceid0);
- GUEST_DONE();
+}
+static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg) +{
- struct kvm_device_attr attr = {
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
.attr = KVM_ARM_VCPU_PMU_V3_FILTER,
- };
- struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
- while (filter && filter->nevents != 0) {
attr.addr = (uint64_t)filter;
vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
filter++;
- }
+}
+static void create_vpmu_vm_with_filter(void *guest_code,
struct kvm_pmu_event_filter *filter)
+{
- vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
+}
+static void run_vcpu(struct kvm_vcpu *vcpu) +{
- struct ucall uc;
- while (1) {
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
return;
case UCALL_PRINTF:
pmceid0 = strtoll(uc.buffer, NULL, 16);
break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
- }
+}
+static void check_pmc_counting(void) +{
- uint64_t br = pmc_results.branches_retired;
- uint64_t ir = pmc_results.instructions_retired;
- TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
"%lu (expected %u)", br, NUM_BRANCHES);
have you tested on several machines? My experience with some events (MEM_ACCESS for instance) is that you have variance (sometimes significant) on some event count. I am a little bit scared that having this br == NUM_BRANCHES check without taking into account some margin will cause failures on some HW.
I confirm the usual suspect, Amberwing, does not like this check ;-)
augere@qualcomm-amberwing-rep-06:~/UPSTREAM/linux/tools/testing/selftests/kvm/aarch64# ./pmu_event_filter_test ==== Test Assertion Failure ==== aarch64/pmu_event_filter_test.c:141: br && br == NUM_BRANCHES pid=7750 tid=7750 errno=4 - Interrupted system call 1 0x0000000000401d6b: check_pmc_counting at pmu_event_filter_test.c:141 2 0x0000000000401967: run_test at pmu_event_filter_test.c:173 3 (inlined by) for_each_test at pmu_event_filter_test.c:198 4 (inlined by) main at pmu_event_filter_test.c:264 5 0x0000ffffaaa6c79b: ?? ??:0 6 0x0000ffffaaa6c86b: ?? ??:0 7 0x0000000000401aaf: _start at ??:? Branch instructions retired = 15 (expected 10)
Eric
in v1 I suggested to read to PMCEID* in a guest code to check if the event is supported. This method would also have the benefice to allow testing more complex filter range combinations.
- TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
+}
+static void check_pmc_not_counting(void) +{
- uint64_t br = pmc_results.branches_retired;
- uint64_t ir = pmc_results.instructions_retired;
- TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
- TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
+}
+static void run_vcpu_and_sync_pmc_results(void) +{
- memset(&pmc_results, 0, sizeof(pmc_results));
- sync_global_to_guest(vpmu_vm->vm, pmc_results);
- run_vcpu(vpmu_vm->vcpu);
- sync_global_from_guest(vpmu_vm->vm, pmc_results);
+}
+static void run_test(struct test_desc *t) +{
- pr_debug("Test: %s\n", t->name);
- create_vpmu_vm_with_filter(guest_code, t->filter);
- run_vcpu_and_sync_pmc_results();
- t->check_result();
- destroy_vpmu_vm(vpmu_vm);
+}
+static struct test_desc tests[] = {
- {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
- {"member_allow_filter", check_pmc_counting,
{DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
Note the doc says that Event 0 (SW_INCR) is never filtered, as it doesn't count a hardware event
I would use the defines exposed in the uapi
+#define KVM_PMU_EVENT_ALLOW 0 +#define KVM_PMU_EVENT_DENY 1
DEFINE_FILTER(BR_RETIRED, 0), EMPTY_FILTER}},
- {"member_deny_filter", check_pmc_not_counting,
{DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
what is the purpose of SW_INCR. YOu do not seem to test it anyway?
DEFINE_FILTER(BR_RETIRED, 1), EMPTY_FILTER}},
- {"not_member_deny_filter", check_pmc_counting,
{DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
- {"not_member_allow_filter", check_pmc_not_counting,
{DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
- { 0 }
+};
+static void for_each_test(void) +{
- struct test_desc *t;
- for (t = &tests[0]; t->name; t++)
run_test(t);
+}
+static bool kvm_supports_pmu_event_filter(void) +{
- int r;
- vpmu_vm = create_vpmu_vm(guest_code);
- r = __kvm_has_device_attr(vpmu_vm->vcpu->fd, KVM_ARM_VCPU_PMU_V3_CTRL,
KVM_ARM_VCPU_PMU_V3_FILTER);
you can use __vcpu_has_device_attr directly
- destroy_vpmu_vm(vpmu_vm);
- return !r;
+}
+static bool host_pmu_supports_events(void) +{
- vpmu_vm = create_vpmu_vm(guest_get_pmceid0);
- run_vcpu(vpmu_vm->vcpu);
- destroy_vpmu_vm(vpmu_vm);
- return pmceid0 & (BR_RETIRED | INST_RETIRED);
this will return true if either event is supported. I suspect this is not what you want.
+}
+int main(void) +{
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
- TEST_REQUIRE(kvm_supports_pmu_event_filter());
- TEST_REQUIRE(host_pmu_supports_events());
- for_each_test();
+} diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h index 644dae3814b5..f103d0824f8a 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -18,6 +18,10 @@ struct vpmu_vm { int gic_fd; }; +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
void (*init_pmu)(struct vpmu_vm *vm, void *arg),
void *arg);
struct vpmu_vm *create_vpmu_vm(void *guest_code); void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c index b3de8fdc555e..76ea03d607f1 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c @@ -7,8 +7,9 @@ #include <vpmu.h> #include <perf/arm_pmuv3.h> -/* Create a VM that has one vCPU with PMUv3 configured. */ -struct vpmu_vm *create_vpmu_vm(void *guest_code) +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
void (*init_pmu)(struct vpmu_vm *vm, void *arg),
void *arg)
{ struct kvm_vcpu_init init; uint8_t pmuver; @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code) "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); /* Initialize vPMU */
- if (init_pmu)
init_pmu(vpmu_vm, arg);
- vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
return vpmu_vm; } +/* Create a VM that has one vCPU with PMUv3 configured. */ +struct vpmu_vm *create_vpmu_vm(void *guest_code) +{
- return __create_vpmu_vm(guest_code, NULL, NULL);
+}
void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm) { close(vpmu_vm->gic_fd);
Thanks
Eric
Hi Shaoqin,
On 11/29/23 08:27, Shaoqin Huang wrote:
Introduce pmu_event_filter_test for arm64 platforms. The test configures PMUv3 for a vCPU, and sets different pmu event filters for the vCPU, and check if the guest can use those events which user allow and can't use those events which use deny.
This test refactor the create_vpmu_vm() and make it a wrapper for __create_vpmu_vm(), which allows some extra init code before KVM_ARM_VCPU_PMU_V3_INIT.
And this test use the KVM_ARM_VCPU_PMU_V3_FILTER attribute to set the pmu event filter in KVM. And choose to filter two common event branches_retired and instructions_retired, and let guest use the two events in pmu. And check if the result is expected.
Signed-off-by: Shaoqin Huang shahuang@redhat.com
tools/testing/selftests/kvm/Makefile | 1 + .../kvm/aarch64/pmu_event_filter_test.c | 231 ++++++++++++++++++ .../selftests/kvm/include/aarch64/vpmu.h | 4 + .../testing/selftests/kvm/lib/aarch64/vpmu.c | 14 +- 4 files changed, 248 insertions(+), 2 deletions(-) create mode 100644 tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index b60852c222ac..5f126e1a1dbf 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -155,6 +155,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/hypercalls TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test +TEST_GEN_PROGS_aarch64 += aarch64/pmu_event_filter_test TEST_GEN_PROGS_aarch64 += aarch64/psci_test TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c new file mode 100644 index 000000000000..0e652fbdb37a --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/*
- pmu_event_filter_test - Test user limit pmu event for guest.
- Copyright (c) 2023 Red Hat, Inc.
- This test checks if the guest only see the limited pmu event that userspace
- sets, if the guest can use those events which user allow, and if the guest
- can't use those events which user deny.
- This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
- is supported on the host.
- */
+#include <kvm_util.h> +#include <processor.h> +#include <vgic.h> +#include <vpmu.h> +#include <test_util.h> +#include <perf/arm_pmuv3.h>
+struct {
- uint64_t branches_retired;
- uint64_t instructions_retired;
+} pmc_results;
+static struct vpmu_vm *vpmu_vm; +static uint64_t pmceid0;
+#define FILTER_NR 10
+struct test_desc {
- const char *name;
- void (*check_result)(void);
- struct kvm_pmu_event_filter filter[FILTER_NR];
+};
+#define __DEFINE_FILTER(base, num, act) \
- ((struct kvm_pmu_event_filter) { \
.base_event = base, \
.nevents = num, \
.action = act, \
- })
+#define DEFINE_FILTER(base, act) __DEFINE_FILTER(base, 1, act)
+#define EMPTY_FILTER { 0 }
+#define SW_INCR 0x0 +#define INST_RETIRED 0x8 +#define BR_RETIRED 0x21
+#define NUM_BRANCHES 10
+static void run_and_measure_loop(void) +{
- asm volatile(
" mov x10, %[loop]\n"
"1: sub x10, x10, #1\n"
" cmp x10, #0x0\n"
" b.gt 1b\n"
:
: [loop] "r" (NUM_BRANCHES)
: "x10", "cc");
+}
+static void guest_code(void) +{
- uint64_t pmcr = read_sysreg(pmcr_el0);
- pmu_disable_reset();
- write_pmevtypern(0, BR_RETIRED);
- write_pmevtypern(1, INST_RETIRED);
- enable_counter(0);
- enable_counter(1);
- write_sysreg(pmcr | ARMV8_PMU_PMCR_E, pmcr_el0);
- run_and_measure_loop();
- write_sysreg(pmcr, pmcr_el0);
- pmc_results.branches_retired = read_sysreg(pmevcntr0_el0);
- pmc_results.instructions_retired = read_sysreg(pmevcntr1_el0);
- GUEST_DONE();
+}
+static void guest_get_pmceid0(void) +{
- uint64_t pmceid0 = read_sysreg(pmceid0_el0);
- GUEST_PRINTF("%lx\n", pmceid0);
- GUEST_DONE();
+}
+static void pmu_event_filter_init(struct vpmu_vm *vm, void *arg) +{
- struct kvm_device_attr attr = {
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
.attr = KVM_ARM_VCPU_PMU_V3_FILTER,
- };
- struct kvm_pmu_event_filter *filter = (struct kvm_pmu_event_filter *)arg;
- while (filter && filter->nevents != 0) {
attr.addr = (uint64_t)filter;
vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
filter++;
- }
+}
+static void create_vpmu_vm_with_filter(void *guest_code,
struct kvm_pmu_event_filter *filter)
+{
- vpmu_vm = __create_vpmu_vm(guest_code, pmu_event_filter_init, filter);
+}
+static void run_vcpu(struct kvm_vcpu *vcpu) +{
- struct ucall uc;
- while (1) {
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_DONE:
return;
case UCALL_PRINTF:
pmceid0 = strtoll(uc.buffer, NULL, 16);
break;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
- }
+}
+static void check_pmc_counting(void) +{
- uint64_t br = pmc_results.branches_retired;
- uint64_t ir = pmc_results.instructions_retired;
- TEST_ASSERT(br && br == NUM_BRANCHES, "Branch instructions retired = "
"%lu (expected %u)", br, NUM_BRANCHES);
- TEST_ASSERT(ir, "Instructions retired = %lu (expected > 0)", ir);
+}
+static void check_pmc_not_counting(void) +{
- uint64_t br = pmc_results.branches_retired;
- uint64_t ir = pmc_results.instructions_retired;
- TEST_ASSERT(!br, "Branch instructions retired = %lu (expected 0)", br);
- TEST_ASSERT(!ir, "Instructions retired = %lu (expected 0)", ir);
+}
+static void run_vcpu_and_sync_pmc_results(void) +{
- memset(&pmc_results, 0, sizeof(pmc_results));
- sync_global_to_guest(vpmu_vm->vm, pmc_results);
- run_vcpu(vpmu_vm->vcpu);
- sync_global_from_guest(vpmu_vm->vm, pmc_results);
+}
+static void run_test(struct test_desc *t) +{
- pr_debug("Test: %s\n", t->name);
- create_vpmu_vm_with_filter(guest_code, t->filter);
- run_vcpu_and_sync_pmc_results();
- t->check_result();
- destroy_vpmu_vm(vpmu_vm);
+}
+static struct test_desc tests[] = {
- {"without_filter", check_pmc_counting, { EMPTY_FILTER }},
- {"member_allow_filter", check_pmc_counting,
{DEFINE_FILTER(SW_INCR, 0), DEFINE_FILTER(INST_RETIRED, 0),
DEFINE_FILTER(BR_RETIRED, 0), EMPTY_FILTER}},
- {"member_deny_filter", check_pmc_not_counting,
{DEFINE_FILTER(SW_INCR, 1), DEFINE_FILTER(INST_RETIRED, 1),
DEFINE_FILTER(BR_RETIRED, 1), EMPTY_FILTER}},
- {"not_member_deny_filter", check_pmc_counting,
{DEFINE_FILTER(SW_INCR, 1), EMPTY_FILTER}},
- {"not_member_allow_filter", check_pmc_not_counting,
{DEFINE_FILTER(SW_INCR, 0), EMPTY_FILTER}},
- { 0 }
+};
+static void for_each_test(void) +{
- struct test_desc *t;
- for (t = &tests[0]; t->name; t++)
run_test(t);
+}
+static bool kvm_supports_pmu_event_filter(void) +{
- int r;
- vpmu_vm = create_vpmu_vm(guest_code);
- r = __kvm_has_device_attr(vpmu_vm->vcpu->fd, KVM_ARM_VCPU_PMU_V3_CTRL,
KVM_ARM_VCPU_PMU_V3_FILTER);
- destroy_vpmu_vm(vpmu_vm);
- return !r;
+}
+static bool host_pmu_supports_events(void) +{
- vpmu_vm = create_vpmu_vm(guest_get_pmceid0);
- run_vcpu(vpmu_vm->vcpu);
- destroy_vpmu_vm(vpmu_vm);
- return pmceid0 & (BR_RETIRED | INST_RETIRED);
+}
+int main(void) +{
- TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
- TEST_REQUIRE(kvm_supports_pmu_event_filter());
- TEST_REQUIRE(host_pmu_supports_events());
- for_each_test();
+} diff --git a/tools/testing/selftests/kvm/include/aarch64/vpmu.h b/tools/testing/selftests/kvm/include/aarch64/vpmu.h index 644dae3814b5..f103d0824f8a 100644 --- a/tools/testing/selftests/kvm/include/aarch64/vpmu.h +++ b/tools/testing/selftests/kvm/include/aarch64/vpmu.h @@ -18,6 +18,10 @@ struct vpmu_vm { int gic_fd; }; +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
void (*init_pmu)(struct vpmu_vm *vm, void *arg),
void *arg);
struct vpmu_vm *create_vpmu_vm(void *guest_code); void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm); diff --git a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c index b3de8fdc555e..76ea03d607f1 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/vpmu.c +++ b/tools/testing/selftests/kvm/lib/aarch64/vpmu.c @@ -7,8 +7,9 @@ #include <vpmu.h> #include <perf/arm_pmuv3.h> -/* Create a VM that has one vCPU with PMUv3 configured. */ -struct vpmu_vm *create_vpmu_vm(void *guest_code) +struct vpmu_vm *__create_vpmu_vm(void *guest_code,
void (*init_pmu)(struct vpmu_vm *vm, void *arg),
void *arg)
{ struct kvm_vcpu_init init; uint8_t pmuver; @@ -50,12 +51,21 @@ struct vpmu_vm *create_vpmu_vm(void *guest_code) "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); /* Initialize vPMU */
- if (init_pmu)
init_pmu(vpmu_vm, arg);
- vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); vcpu_ioctl(vpmu_vm->vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
return vpmu_vm; } +/* Create a VM that has one vCPU with PMUv3 configured. */ +struct vpmu_vm *create_vpmu_vm(void *guest_code) +{
- return __create_vpmu_vm(guest_code, NULL, NULL);
+}
void destroy_vpmu_vm(struct vpmu_vm *vpmu_vm) { close(vpmu_vm->gic_fd);
While reading the doc again I can see there would be other interesting scenari to test such as
"Note: "Cancelling" a filter by registering the opposite action for the same range doesn't change the default action. For example, installing an ALLOW filter for event range [0:10) as the first filter and then applying a DENY action for the same range will leave the whole range as disabled."
also filter ranges. Using PMCEID* would simplify your life I think.
However this is more work and maybe goes beyond your original intent. Up to you ...
Eric
Add the invalid filter test to double check if the KVM_ARM_VCPU_PMU_V3_FILTER will return the expected error.
Signed-off-by: Shaoqin Huang shahuang@redhat.com --- .../kvm/aarch64/pmu_event_filter_test.c | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+)
diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c index 0e652fbdb37a..4c375417b194 100644 --- a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c @@ -7,6 +7,7 @@ * This test checks if the guest only see the limited pmu event that userspace * sets, if the guest can use those events which user allow, and if the guest * can't use those events which user deny. + * It also checks that setting invalid filter ranges return the expected error. * This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER * is supported on the host. */ @@ -197,6 +198,39 @@ static void for_each_test(void) run_test(t); }
+static void set_invalid_filter(struct vpmu_vm *vm, void *arg) +{ + struct kvm_pmu_event_filter invalid; + struct kvm_device_attr attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_FILTER, + .addr = (uint64_t)&invalid, + }; + int ret = 0; + + /* The max event number is (1 << 16), set a range large than it. */ + invalid = __DEFINE_FILTER(BIT(15), BIT(15)+1, 0); + ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr); + TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter range " + "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)", + ret, errno); + + ret = 0; + + /* Set the Invalid action. */ + invalid = __DEFINE_FILTER(0, 1, 3); + ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr); + TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter action " + "ret = %d, errno = %d (expected ret = -1, errno = EINVAL)", + ret, errno); +} + +static void test_invalid_filter(void) +{ + vpmu_vm = __create_vpmu_vm(guest_code, set_invalid_filter, NULL); + destroy_vpmu_vm(vpmu_vm); +} + static bool kvm_supports_pmu_event_filter(void) { int r; @@ -228,4 +262,6 @@ int main(void) TEST_REQUIRE(host_pmu_supports_events());
for_each_test(); + + test_invalid_filter(); }
Hi Shaoqin
On 11/29/23 08:27, Shaoqin Huang wrote:
Add the invalid filter test to double check if the KVM_ARM_VCPU_PMU_V3_FILTER will return the expected error.
... in which situations? filter beyond the 16b event space or incorrect action.
Signed-off-by: Shaoqin Huang shahuang@redhat.com
.../kvm/aarch64/pmu_event_filter_test.c | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+)
diff --git a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c index 0e652fbdb37a..4c375417b194 100644 --- a/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/aarch64/pmu_event_filter_test.c @@ -7,6 +7,7 @@
- This test checks if the guest only see the limited pmu event that userspace
- sets, if the guest can use those events which user allow, and if the guest
- can't use those events which user deny.
*/
- It also checks that setting invalid filter ranges return the expected error.
- This test runs only when KVM_CAP_ARM_PMU_V3, KVM_ARM_VCPU_PMU_V3_FILTER
- is supported on the host.
@@ -197,6 +198,39 @@ static void for_each_test(void) run_test(t); } +static void set_invalid_filter(struct vpmu_vm *vm, void *arg) +{
- struct kvm_pmu_event_filter invalid;
- struct kvm_device_attr attr = {
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
.attr = KVM_ARM_VCPU_PMU_V3_FILTER,
.addr = (uint64_t)&invalid,
- };
- int ret = 0;
- /* The max event number is (1 << 16), set a range large than it. */
larger
- invalid = __DEFINE_FILTER(BIT(15), BIT(15)+1, 0);
doc says must fit within the event space defined by the PMU architecture (10 bits on ARMv8.0, 16 bits from ARMv8.1 onwards).
- ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
- TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter range "
"ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
ret, errno);
- ret = 0;
- /* Set the Invalid action. */
- invalid = __DEFINE_FILTER(0, 1, 3);
- ret = __vcpu_ioctl(vm->vcpu, KVM_SET_DEVICE_ATTR, &attr);
- TEST_ASSERT(ret && errno == EINVAL, "Set Invalid filter action "
"ret = %d, errno = %d (expected ret = -1, errno = EINVAL)",
ret, errno);
+}
+static void test_invalid_filter(void) +{
- vpmu_vm = __create_vpmu_vm(guest_code, set_invalid_filter, NULL);
- destroy_vpmu_vm(vpmu_vm);
+}
static bool kvm_supports_pmu_event_filter(void) { int r; @@ -228,4 +262,6 @@ int main(void) TEST_REQUIRE(host_pmu_supports_events()); for_each_test();
- test_invalid_filter();
}
Eric
linux-kselftest-mirror@lists.linaro.org