When we re-enter the VM after handling a PMU interrupt, calculate whether it was any of the guest counters that overflowed and inject an interrupt into the guest if so.
Signed-off-by: Colton Lewis coltonlewis@google.com --- arch/arm64/include/asm/kvm_pmu.h | 2 ++ arch/arm64/kvm/pmu-emul.c | 4 ++-- arch/arm64/kvm/pmu-part.c | 22 +++++++++++++++++++++- arch/arm64/kvm/pmu.c | 6 +++++- 4 files changed, 30 insertions(+), 4 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_pmu.h b/arch/arm64/include/asm/kvm_pmu.h index 03e3bd318e4b..d047def897bc 100644 --- a/arch/arm64/include/asm/kvm_pmu.h +++ b/arch/arm64/include/asm/kvm_pmu.h @@ -86,6 +86,8 @@ bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu); void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); void kvm_vcpu_pmu_resync_el0(void); +bool kvm_pmu_emul_overflow_status(struct kvm_vcpu *vcpu); +bool kvm_pmu_part_overflow_status(struct kvm_vcpu *vcpu);
#define kvm_vcpu_has_pmu(vcpu) \ (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index bcaa9f7a8ca2..6f41fc3e3f74 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -268,7 +268,7 @@ void kvm_pmu_reprogram_counter_mask(struct kvm_vcpu *vcpu, u64 val) * counter where the values of the global enable control, PMOVSSET_EL0[n], and * PMINTENSET_EL1[n] are all 1. */ -bool kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) +bool kvm_pmu_emul_overflow_status(struct kvm_vcpu *vcpu) { u64 reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
@@ -405,7 +405,7 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, kvm_pmu_counter_increment(vcpu, BIT(idx + 1), ARMV8_PMUV3_PERFCTR_CHAIN);
- if (kvm_pmu_overflow_status(vcpu)) { + if (kvm_pmu_emul_overflow_status(vcpu)) { kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
if (!in_nmi()) diff --git a/arch/arm64/kvm/pmu-part.c b/arch/arm64/kvm/pmu-part.c index bd04c37b19b9..165d1eae2634 100644 --- a/arch/arm64/kvm/pmu-part.c +++ b/arch/arm64/kvm/pmu-part.c @@ -279,7 +279,7 @@ void kvm_pmu_load(struct kvm_vcpu *vcpu) write_pmcr(val);
/* - * Loading these registers is tricky because of + * Loading these registers is more intricate because of * 1. Applying only the bits for guest counters (indicated by mask) * 2. Setting and clearing are different registers */ @@ -355,3 +355,23 @@ void kvm_pmu_handle_guest_irq(u64 govf)
__vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, govf); } + +/** + * kvm_pmu_part_overflow_status() - Determine if any guest counters have overflowed + * @vcpu: Ponter to struct kvm_vcpu + * + * Determine if any guest counters have overflowed and therefore an + * IRQ needs to be injected into the guest. + * + * Return: True if there was an overflow, false otherwise + */ +bool kvm_pmu_part_overflow_status(struct kvm_vcpu *vcpu) +{ + struct arm_pmu *pmu = vcpu->kvm->arch.arm_pmu; + u64 mask = kvm_pmu_guest_counter_mask(pmu); + u64 pmovs = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); + u64 pmint = read_pmintenset(); + u64 pmcr = read_pmcr(); + + return (pmcr & ARMV8_PMU_PMCR_E) && (mask & pmovs & pmint); +} diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c index 9469f1e0a0b6..6ab0d23f9251 100644 --- a/arch/arm64/kvm/pmu.c +++ b/arch/arm64/kvm/pmu.c @@ -407,7 +407,11 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) struct kvm_pmu *pmu = &vcpu->arch.pmu; bool overflow;
- overflow = kvm_pmu_overflow_status(vcpu); + if (kvm_vcpu_pmu_is_partitioned(vcpu)) + overflow = kvm_pmu_part_overflow_status(vcpu); + else + overflow = kvm_pmu_emul_overflow_status(vcpu); + if (pmu->irq_level == overflow) return;