From: James Morse james.morse@arm.com
commit 6685f5d572c22e1003e7c0d089afe1c64340ab1f upstream.
commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to guests, but didn't add trap handling. A previous patch supplied the missing trap handling.
Existing VMs that have the MPAM field of ID_AA64PFR0_EL1 set need to be migratable, but there is little point enabling the MPAM CPU interface on new VMs until there is something a guest can do with it.
Clear the MPAM field from the guest's ID_AA64PFR0_EL1 and on hardware that supports MPAM, politely ignore the VMMs attempts to set this bit.
Guests exposed to this bug have the sanitised value of the MPAM field, so only the correct value needs to be ignored. This means the field can continue to be used to block migration to incompatible hardware (between MPAM=1 and MPAM=5), and the VMM can't rely on the field being ignored.
Signed-off-by: James Morse james.morse@arm.com Co-developed-by: Joey Gouly joey.gouly@arm.com Signed-off-by: Joey Gouly joey.gouly@arm.com Reviewed-by: Gavin Shan gshan@redhat.com Tested-by: Shameer Kolothum shameerali.kolothum.thodi@huawei.com Reviewed-by: Marc Zyngier maz@kernel.org Link: https://lore.kernel.org/r/20241030160317.2528209-7-joey.gouly@arm.com Signed-off-by: Oliver Upton oliver.upton@linux.dev [maz: adapted to lack of ID_FILTERED()] Signed-off-by: Marc Zyngier maz@kernel.org Cc: stable@vger.kernel.org --- arch/arm64/kvm/sys_regs.c | 55 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index ff8c4e1b847ed..fbed433283c9b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1535,6 +1535,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2); val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR); + val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac); break; case SYS_ID_AA64PFR2_EL1: /* We only expose FPMR */ @@ -1724,6 +1725,13 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
+ /* + * MPAM is disabled by default as KVM also needs a set of PARTID to + * program the MPAMVPMx_EL2 PARTID remapping registers with. But some + * older kernels let the guest see the ID bit. + */ + val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + return val; }
@@ -1834,6 +1842,42 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, return set_id_reg(vcpu, rd, val); }
+static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, u64 user_val) +{ + u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK; + + /* + * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits + * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to + * guests, but didn't add trap handling. KVM doesn't support MPAM and + * always returns an UNDEF for these registers. The guest must see 0 + * for this field. + * + * But KVM must also accept values from user-space that were provided + * by KVM. On CPUs that support MPAM, permit user-space to write + * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field. + */ + if ((hw_val & mpam_mask) == (user_val & mpam_mask)) + user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + + return set_id_reg(vcpu, rd, user_val); +} + +static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, u64 user_val) +{ + u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); + u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK; + + /* See set_id_aa64pfr0_el1 for comment about MPAM */ + if ((hw_val & mpam_mask) == (user_val & mpam_mask)) + user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; + + return set_id_reg(vcpu, rd, user_val); +} + /* * cpufeature ID register user accessors * @@ -2377,7 +2421,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg, .get_user = get_id_reg, - .set_user = set_id_reg, + .set_user = set_id_aa64pfr0_el1, .reset = read_sanitised_id_aa64pfr0_el1, .val = ~(ID_AA64PFR0_EL1_AMU | ID_AA64PFR0_EL1_MPAM | @@ -2385,7 +2429,12 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64PFR0_EL1_RAS | ID_AA64PFR0_EL1_AdvSIMD | ID_AA64PFR0_EL1_FP), }, - ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR | + { SYS_DESC(SYS_ID_AA64PFR1_EL1), + .access = access_id_reg, + .get_user = get_id_reg, + .set_user = set_id_aa64pfr1_el1, + .reset = kvm_read_sanitised_id_reg, + .val = ~(ID_AA64PFR1_EL1_PFAR | ID_AA64PFR1_EL1_DF2 | ID_AA64PFR1_EL1_MTEX | ID_AA64PFR1_EL1_THE | @@ -2397,7 +2446,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64PFR1_EL1_RES0 | ID_AA64PFR1_EL1_MPAM_frac | ID_AA64PFR1_EL1_RAS_frac | - ID_AA64PFR1_EL1_MTE)), + ID_AA64PFR1_EL1_MTE), }, ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR), ID_UNALLOCATED(4,3), ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
[ Sasha's backport helper bot ]
Hi,
The upstream commit SHA1 provided is correct: 6685f5d572c22e1003e7c0d089afe1c64340ab1f
WARNING: Author mismatch between patch and upstream commit: Backport author: Marc Zyngier maz@kernel.org Commit author: James Morse james.morse@arm.com
Status in newer kernel trees: 6.12.y | Not found
Note: The patch differs from the upstream commit: --- 1: 6685f5d572c2 ! 1: aab69d989e76 KVM: arm64: Disable MPAM visibility by default and ignore VMM writes @@ Metadata ## Commit message ## KVM: arm64: Disable MPAM visibility by default and ignore VMM writes
+ commit 6685f5d572c22e1003e7c0d089afe1c64340ab1f upstream. + commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to guests, but didn't add trap handling. A previous patch supplied the missing trap @@ Commit message Reviewed-by: Marc Zyngier maz@kernel.org Link: https://lore.kernel.org/r/20241030160317.2528209-7-joey.gouly@arm.com Signed-off-by: Oliver Upton oliver.upton@linux.dev + [maz: adapted to lack of ID_FILTERED()] + Signed-off-by: Marc Zyngier maz@kernel.org + Cc: stable@vger.kernel.org
## arch/arm64/kvm/sys_regs.c ## @@ arch/arm64/kvm/sys_regs.c: static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, @@ arch/arm64/kvm/sys_regs.c: static u64 __kvm_read_sanitised_id_reg(const struct k break; case SYS_ID_AA64PFR2_EL1: /* We only expose FPMR */ -@@ arch/arm64/kvm/sys_regs.c: static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val) +@@ arch/arm64/kvm/sys_regs.c: static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
@@ arch/arm64/kvm/sys_regs.c: static u64 sanitise_id_aa64pfr0_el1(const struct kvm_ }
@@ arch/arm64/kvm/sys_regs.c: static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, + return set_id_reg(vcpu, rd, val); }
- static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, -- const struct sys_reg_desc *rd, u64 val) ++static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd, u64 user_val) - { -- return set_id_reg(vcpu, rd, val); ++{ + u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK; + @@ arch/arm64/kvm/sys_regs.c: static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, + user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK; + + return set_id_reg(vcpu, rd, user_val); - } - ++} ++ /* + * cpufeature ID register user accessors + * @@ arch/arm64/kvm/sys_regs.c: static const struct sys_reg_desc sys_reg_descs[] = { - ID_AA64PFR0_EL1_RAS | - ID_AA64PFR0_EL1_AdvSIMD | - ID_AA64PFR0_EL1_FP)), + { SYS_DESC(SYS_ID_AA64PFR0_EL1), + .access = access_id_reg, + .get_user = get_id_reg, +- .set_user = set_id_reg, ++ .set_user = set_id_aa64pfr0_el1, + .reset = read_sanitised_id_aa64pfr0_el1, + .val = ~(ID_AA64PFR0_EL1_AMU | + ID_AA64PFR0_EL1_MPAM | +@@ arch/arm64/kvm/sys_regs.c: static const struct sys_reg_desc sys_reg_descs[] = { + ID_AA64PFR0_EL1_RAS | + ID_AA64PFR0_EL1_AdvSIMD | + ID_AA64PFR0_EL1_FP), }, - ID_WRITABLE(ID_AA64PFR1_EL1, ~(ID_AA64PFR1_EL1_PFAR | -+ ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1, -+ ~(ID_AA64PFR1_EL1_PFAR | ++ { SYS_DESC(SYS_ID_AA64PFR1_EL1), ++ .access = access_id_reg, ++ .get_user = get_id_reg, ++ .set_user = set_id_aa64pfr1_el1, ++ .reset = kvm_read_sanitised_id_reg, ++ .val = ~(ID_AA64PFR1_EL1_PFAR | ID_AA64PFR1_EL1_DF2 | ID_AA64PFR1_EL1_MTEX | ID_AA64PFR1_EL1_THE | +@@ arch/arm64/kvm/sys_regs.c: static const struct sys_reg_desc sys_reg_descs[] = { + ID_AA64PFR1_EL1_RES0 | + ID_AA64PFR1_EL1_MPAM_frac | + ID_AA64PFR1_EL1_RAS_frac | +- ID_AA64PFR1_EL1_MTE)), ++ ID_AA64PFR1_EL1_MTE), }, + ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR), + ID_UNALLOCATED(4,3), + ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), ---
Results of testing on various branches:
| Branch | Patch Apply | Build Test | |---------------------------|-------------|------------| | stable/linux-6.12.y | Success | Success |
linux-stable-mirror@lists.linaro.org