From: Raghavendra Rao Ananta rananta@google.com
When guest causes synchronous instruction external abort, VMM may need to inject instruction abort to guest. However, KVM_SET_VCPU_EVENTS currently only allows injecting external data aborts.
Extend the KVM_SET_VCPU_EVENTS ioctl to allow userspace injecting instruction abort into the guest.
Signed-off-by: Jiaqi Yan jiaqiyan@google.com Signed-off-by: Raghavendra Rao Ananta rananta@google.com --- arch/arm64/include/uapi/asm/kvm.h | 3 ++- arch/arm64/kvm/arm.c | 1 + arch/arm64/kvm/guest.c | 15 ++++++++++----- include/uapi/linux/kvm.h | 1 + 4 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index ed5f3892674c7..643e8c4825451 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -184,8 +184,9 @@ struct kvm_vcpu_events { __u8 serror_pending; __u8 serror_has_esr; __u8 ext_dabt_pending; + __u8 ext_iabt_pending; /* Align it to 8 bytes */ - __u8 pad[5]; + __u8 pad[4]; __u64 serror_esr; } exception; __u32 reserved[12]; diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 7a1a8210ff918..3d86d0ae7898b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -315,6 +315,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: case KVM_CAP_ARM_NISV_TO_USER: case KVM_CAP_ARM_INJECT_EXT_DABT: + case KVM_CAP_ARM_INJECT_EXT_IABT: case KVM_CAP_SET_GUEST_DEBUG: case KVM_CAP_VCPU_ATTRIBUTES: case KVM_CAP_PTP_KVM: diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 16ba5e9ac86c3..d3c7b5015f20e 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -826,9 +826,9 @@ int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, events->exception.serror_esr = vcpu_get_vsesr(vcpu);
/* - * We never return a pending ext_dabt here because we deliver it to - * the virtual CPU directly when setting the event and it's no longer - * 'pending' at this point. + * We never return a pending ext_dabt or ext_iabt here because we + * deliver it to the virtual CPU directly when setting the event + * and it's no longer 'pending' at this point. */
return 0; @@ -853,16 +853,21 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, bool serror_pending = events->exception.serror_pending; bool has_esr = events->exception.serror_has_esr; bool ext_dabt_pending = events->exception.ext_dabt_pending; + bool ext_iabt_pending = events->exception.ext_iabt_pending; u64 esr = events->exception.serror_esr; int ret = 0;
+ /* DABT and IABT cannot happen at the same time. */ + if (ext_dabt_pending && ext_iabt_pending) + return -EINVAL; /* * Immediately commit the pending SEA to the vCPU's architectural * state which is necessary since we do not return a pending SEA * to userspace via KVM_GET_VCPU_EVENTS. */ - if (ext_dabt_pending) { - ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); + if (ext_dabt_pending || ext_iabt_pending) { + ret = kvm_inject_sea(vcpu, ext_iabt_pending, + kvm_vcpu_get_hfar(vcpu)); commit_pending_events(vcpu); }
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index e4e566ff348b0..a7b047f95887c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -957,6 +957,7 @@ struct kvm_enable_cap { #define KVM_CAP_ARM_EL2_E2H0 241 #define KVM_CAP_RISCV_MP_STATE_RESET 242 #define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243 +#define KVM_CAP_ARM_INJECT_EXT_IABT 245
struct kvm_irq_routing_irqchip { __u32 irqchip;