This series contains a bit of a grab bag of improvements to the floating point tests, mainly fp-ptrace. Globally over all the tests we start using defines from the generated sysregs (following the example of the KVM selftests) for SVCR, stop being quite so wasteful with registers when calling into the assembler code then expand the coverage of both ZA writes and FPMR (which was not there since fp-ptrace and the 2023 dpISA extensions were on the list at the same time).
Signed-off-by: Mark Brown broonie@kernel.org --- Changes in v2: - Drop attempt to reference sysreg-defs.h due to build issues, just import the defines directly into fp-ptrace. We can revisit later. - Link to v1: https://lore.kernel.org/r/20241107-arm64-fp-ptrace-fpmr-v1-0-3e5e0b6e3be9@ke...
--- Mark Brown (3): kselftets/arm64: Use flag bits for features in fp-ptrace assembler code kselftest/arm64: Expand the set of ZA writes fp-ptrace does kselftest/arm64: Add FPMR coverage to fp-ptrace
tools/testing/selftests/arm64/fp/fp-ptrace-asm.S | 41 ++++-- tools/testing/selftests/arm64/fp/fp-ptrace.c | 161 +++++++++++++++++++++-- tools/testing/selftests/arm64/fp/fp-ptrace.h | 12 ++ tools/testing/selftests/arm64/fp/sme-inst.h | 2 + 4 files changed, 188 insertions(+), 28 deletions(-) --- base-commit: 8e929cb546ee42c9a61d24fae60605e9e3192354 change-id: 20241105-arm64-fp-ptrace-fpmr-ff061facd3da
Best regards,
The assembler portions of fp-ptrace are passed feature flags by the C code indicating which architectural features are supported. Currently these use an entire register for each flag which is wasteful and gets cumbersome as new flags are added. Switch to using flag bits in a single register to make things easier to maintain.
No functional change.
Signed-off-by: Mark Brown broonie@kernel.org --- tools/testing/selftests/arm64/fp/fp-ptrace-asm.S | 32 +++++++++++++----------- tools/testing/selftests/arm64/fp/fp-ptrace.c | 17 ++++++++++--- tools/testing/selftests/arm64/fp/fp-ptrace.h | 10 ++++++++ 3 files changed, 41 insertions(+), 18 deletions(-)
diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S b/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S index 7ad59d92d02b28e4a6b328fde96039329ea8862a..5e7e9c878f2ce797e3ba5f4033a42526830393e6 100644 --- a/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S +++ b/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S @@ -15,10 +15,7 @@
// Load and save register values with pauses for ptrace // -// x0 - SVE in use -// x1 - SME in use -// x2 - SME2 in use -// x3 - FA64 supported +// x0 - HAVE_ flags indicating which features are in use
.globl load_and_save load_and_save: @@ -44,7 +41,7 @@ load_and_save: ldp q30, q31, [x7, #16 * 30]
// SME? - cbz x1, check_sve_in + tbz x0, #HAVE_SME_SHIFT, check_sve_in
adrp x7, svcr_in ldr x7, [x7, :lo12:svcr_in] @@ -64,7 +61,7 @@ load_and_save: bne 1b
// ZT? - cbz x2, check_sm_in + tbz x0, #HAVE_SME2_SHIFT, check_sm_in adrp x6, zt_in add x6, x6, :lo12:zt_in _ldr_zt 6 @@ -72,12 +69,16 @@ load_and_save: // In streaming mode? check_sm_in: tbz x7, #SVCR_SM_SHIFT, check_sve_in - mov x4, x3 // Load FFR if we have FA64 + + // Load FFR if we have FA64 + mov x4, #0 + tbz x0, #HAVE_FA64_SHIFT, load_sve + mov x4, #1 b load_sve
// SVE? check_sve_in: - cbz x0, wait_for_writes + tbz x0, #HAVE_SVE_SHIFT, wait_for_writes mov x4, #1
load_sve: @@ -165,8 +166,7 @@ wait_for_writes: stp q28, q29, [x7, #16 * 28] stp q30, q31, [x7, #16 * 30]
- // SME? - cbz x1, check_sve_out + tbz x0, #HAVE_SME_SHIFT, check_sve_out
rdsvl 11, 1 adrp x6, sme_vl_out @@ -187,7 +187,7 @@ wait_for_writes: bne 1b
// ZT? - cbz x2, check_sm_out + tbz x0, #HAVE_SME2_SHIFT, check_sm_out adrp x6, zt_out add x6, x6, :lo12:zt_out _str_zt 6 @@ -195,12 +195,16 @@ wait_for_writes: // In streaming mode? check_sm_out: tbz x7, #SVCR_SM_SHIFT, check_sve_out - mov x4, x3 // FFR? + + // Do we have FA64 and FFR? + mov x4, #0 + tbz x0, #HAVE_FA64_SHIFT, read_sve + mov x4, #1 b read_sve
// SVE? check_sve_out: - cbz x0, wait_for_reads + tbz x0, #HAVE_SVE_SHIFT, wait_for_reads mov x4, #1
rdvl x7, #1 @@ -271,7 +275,7 @@ wait_for_reads: brk #0
// Ensure we don't leave ourselves in streaming mode - cbz x1, out + tbz x0, #HAVE_SME_SHIFT, out msr S3_3_C4_C2_2, xzr
out: diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.c b/tools/testing/selftests/arm64/fp/fp-ptrace.c index c7ceafe5f4712b2c93823c1025f3a23ac0594325..d96af27487fa642e94ecc971f53cb78c233e7b44 100644 --- a/tools/testing/selftests/arm64/fp/fp-ptrace.c +++ b/tools/testing/selftests/arm64/fp/fp-ptrace.c @@ -82,7 +82,7 @@ uint64_t sve_vl_out; uint64_t sme_vl_out; uint64_t svcr_in, svcr_expected, svcr_out;
-void load_and_save(int sve, int sme, int sme2, int fa64); +void load_and_save(int flags);
static bool got_alarm;
@@ -198,7 +198,7 @@ static int vl_expected(struct test_config *config)
static void run_child(struct test_config *config) { - int ret; + int ret, flags;
/* Let the parent attach to us */ ret = ptrace(PTRACE_TRACEME, 0, 0, 0); @@ -224,8 +224,17 @@ static void run_child(struct test_config *config) }
/* Load values and wait for the parent */ - load_and_save(sve_supported(), sme_supported(), - sme2_supported(), fa64_supported()); + flags = 0; + if (sve_supported()) + flags |= HAVE_SVE; + if (sme_supported()) + flags |= HAVE_SME; + if (sme2_supported()) + flags |= HAVE_SME2; + if (fa64_supported()) + flags |= HAVE_FA64; + + load_and_save(flags);
exit(0); } diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.h b/tools/testing/selftests/arm64/fp/fp-ptrace.h index db4f2c4d750c5c04e3d257e37a1966296ca74956..36ca627e1980f6a384d9ed0f2e9d4bd32d90f893 100644 --- a/tools/testing/selftests/arm64/fp/fp-ptrace.h +++ b/tools/testing/selftests/arm64/fp/fp-ptrace.h @@ -10,4 +10,14 @@ #define SVCR_SM (1 << SVCR_SM_SHIFT) #define SVCR_ZA (1 << SVCR_ZA_SHIFT)
+#define HAVE_SVE_SHIFT 0 +#define HAVE_SME_SHIFT 1 +#define HAVE_SME2_SHIFT 2 +#define HAVE_FA64_SHIFT 3 + +#define HAVE_SVE (1 << HAVE_SVE_SHIFT) +#define HAVE_SME (1 << HAVE_SME_SHIFT) +#define HAVE_SME2 (1 << HAVE_SME2_SHIFT) +#define HAVE_FA64 (1 << HAVE_FA64_SHIFT) + #endif
Currently our test for implementable ZA writes is written in a bit of a convoluted fashion which excludes all changes where we clear SVCR.SM even though we can actually support that since changing the vector length resets SVCR. Make the logic more direct, enabling us to actually run these cases.
Signed-off-by: Mark Brown broonie@kernel.org --- tools/testing/selftests/arm64/fp/fp-ptrace.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-)
diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.c b/tools/testing/selftests/arm64/fp/fp-ptrace.c index d96af27487fa642e94ecc971f53cb78c233e7b44..56cf6e02c535b5c1cf1134c5b1973605c96024ee 100644 --- a/tools/testing/selftests/arm64/fp/fp-ptrace.c +++ b/tools/testing/selftests/arm64/fp/fp-ptrace.c @@ -1078,21 +1078,19 @@ static void sve_write(pid_t child, struct test_config *config)
static bool za_write_supported(struct test_config *config) { - if (config->svcr_expected & SVCR_SM) { - if (!(config->svcr_in & SVCR_SM)) + if (config->sme_vl_in != config->sme_vl_expected) { + /* Changing the SME VL exits streaming mode. */ + if (config->svcr_expected & SVCR_SM) { return false; - - /* Changing the SME VL exits streaming mode */ - if (config->sme_vl_in != config->sme_vl_expected) { + } + } else { + /* Otherwise we can't change streaming mode */ + if ((config->svcr_in & SVCR_SM) != + (config->svcr_expected & SVCR_SM)) { return false; } }
- /* Can't disable SM outside a VL change */ - if ((config->svcr_in & SVCR_SM) && - !(config->svcr_expected & SVCR_SM)) - return false; - return true; }
Add coverage for FPMR to fp-ptrace. FPMR can be available independently of SVE and SME, if SME is supported then FPMR is cleared by entering and exiting streaming mode. As with other registers we generate random values to load into the register, we restrict these to bitfields which are always defined. We also leave bitfields where the valid values are affected by the set of supported FP8 formats zero to reduce complexity, it is unlikely that specific bitfields will be affected by ptrace issues.
Signed-off-by: Mark Brown broonie@kernel.org --- tools/testing/selftests/arm64/fp/fp-ptrace-asm.S | 23 +++-- tools/testing/selftests/arm64/fp/fp-ptrace.c | 126 +++++++++++++++++++++++ tools/testing/selftests/arm64/fp/fp-ptrace.h | 2 + tools/testing/selftests/arm64/fp/sme-inst.h | 2 + 4 files changed, 146 insertions(+), 7 deletions(-)
diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S b/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S index 5e7e9c878f2ce797e3ba5f4033a42526830393e6..6195b9969d67e15d46ca71f1d273b2d43ef4ae7a 100644 --- a/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S +++ b/tools/testing/selftests/arm64/fp/fp-ptrace-asm.S @@ -71,14 +71,12 @@ check_sm_in: tbz x7, #SVCR_SM_SHIFT, check_sve_in
// Load FFR if we have FA64 - mov x4, #0 - tbz x0, #HAVE_FA64_SHIFT, load_sve - mov x4, #1 + ubfx x4, x0, #HAVE_FA64_SHIFT, #1 b load_sve
// SVE? check_sve_in: - tbz x0, #HAVE_SVE_SHIFT, wait_for_writes + tbz x0, #HAVE_SVE_SHIFT, check_fpmr_in mov x4, #1
load_sve: @@ -143,6 +141,13 @@ load_sve: ldr p14, [x7, #14, MUL VL] ldr p15, [x7, #15, MUL VL]
+ // This has to come after we set PSTATE.SM +check_fpmr_in: + tbz x0, #HAVE_FPMR_SHIFT, wait_for_writes + adrp x7, fpmr_in + ldr x7, [x7, :lo12:fpmr_in] + msr FPMR, x7 + wait_for_writes: // Wait for the parent brk #0 @@ -166,6 +171,12 @@ wait_for_writes: stp q28, q29, [x7, #16 * 28] stp q30, q31, [x7, #16 * 30]
+ tbz x0, #HAVE_FPMR_SHIFT, check_sme_out + mrs x7, REG_FPMR + adrp x6, fpmr_out + str x7, [x6, :lo12:fpmr_out] + +check_sme_out: tbz x0, #HAVE_SME_SHIFT, check_sve_out
rdsvl 11, 1 @@ -197,9 +208,7 @@ check_sm_out: tbz x7, #SVCR_SM_SHIFT, check_sve_out
// Do we have FA64 and FFR? - mov x4, #0 - tbz x0, #HAVE_FA64_SHIFT, read_sve - mov x4, #1 + ubfx x4, x0, #HAVE_FA64_SHIFT, #1 b read_sve
// SVE? diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.c b/tools/testing/selftests/arm64/fp/fp-ptrace.c index 56cf6e02c535b5c1cf1134c5b1973605c96024ee..4930e03a7b9903eab85a1e004354939f6a9fe9d4 100644 --- a/tools/testing/selftests/arm64/fp/fp-ptrace.c +++ b/tools/testing/selftests/arm64/fp/fp-ptrace.c @@ -31,6 +31,14 @@
#include "fp-ptrace.h"
+#include <linux/bits.h> + +#define FPMR_LSCALE2_MASK GENMASK(37, 32) +#define FPMR_NSCALE_MASK GENMASK(31, 24) +#define FPMR_LSCALE_MASK GENMASK(22, 16) +#define FPMR_OSC_MASK GENMASK(15, 15) +#define FPMR_OSM_MASK GENMASK(14, 14) + /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */ #ifndef NT_ARM_SVE #define NT_ARM_SVE 0x405 @@ -48,11 +56,22 @@ #define NT_ARM_ZT 0x40d #endif
+#ifndef NT_ARM_FPMR +#define NT_ARM_FPMR 0x40e +#endif + #define ARCH_VQ_MAX 256
/* VL 128..2048 in powers of 2 */ #define MAX_NUM_VLS 5
+/* + * FPMR bits we can set without doing feature checks to see if values + * are valid. + */ +#define FPMR_SAFE_BITS (FPMR_LSCALE2_MASK | FPMR_NSCALE_MASK | \ + FPMR_LSCALE_MASK | FPMR_OSC_MASK | FPMR_OSM_MASK) + #define NUM_FPR 32 __uint128_t v_in[NUM_FPR]; __uint128_t v_expected[NUM_FPR]; @@ -78,6 +97,8 @@ char zt_in[ZT_SIG_REG_BYTES]; char zt_expected[ZT_SIG_REG_BYTES]; char zt_out[ZT_SIG_REG_BYTES];
+uint64_t fpmr_in, fpmr_expected, fpmr_out; + uint64_t sve_vl_out; uint64_t sme_vl_out; uint64_t svcr_in, svcr_expected, svcr_out; @@ -128,6 +149,11 @@ static bool fa64_supported(void) return getauxval(AT_HWCAP2) & HWCAP2_SME_FA64; }
+static bool fpmr_supported(void) +{ + return getauxval(AT_HWCAP2) & HWCAP2_FPMR; +} + static bool compare_buffer(const char *name, void *out, void *expected, size_t size) { @@ -233,6 +259,8 @@ static void run_child(struct test_config *config) flags |= HAVE_SME2; if (fa64_supported()) flags |= HAVE_FA64; + if (fpmr_supported()) + flags |= HAVE_FPMR;
load_and_save(flags);
@@ -321,6 +349,14 @@ static void read_child_regs(pid_t child) iov_child.iov_len = sizeof(zt_out); read_one_child_regs(child, "ZT", &iov_parent, &iov_child); } + + if (fpmr_supported()) { + iov_parent.iov_base = &fpmr_out; + iov_parent.iov_len = sizeof(fpmr_out); + iov_child.iov_base = &fpmr_out; + iov_child.iov_len = sizeof(fpmr_out); + read_one_child_regs(child, "FPMR", &iov_parent, &iov_child); + } }
static bool continue_breakpoint(pid_t child, @@ -595,6 +631,26 @@ static bool check_ptrace_values_zt(pid_t child, struct test_config *config) return compare_buffer("initial ZT", buf, zt_in, ZT_SIG_REG_BYTES); }
+static bool check_ptrace_values_fpmr(pid_t child, struct test_config *config) +{ + uint64_t val; + struct iovec iov; + int ret; + + if (!fpmr_supported()) + return true; + + iov.iov_base = &val; + iov.iov_len = sizeof(val); + ret = ptrace(PTRACE_GETREGSET, child, NT_ARM_FPMR, &iov); + if (ret != 0) { + ksft_print_msg("Failed to read initial FPMR: %s (%d)\n", + strerror(errno), errno); + return false; + } + + return compare_buffer("initial FPMR", &val, &fpmr_in, sizeof(val)); +}
static bool check_ptrace_values(pid_t child, struct test_config *config) { @@ -629,6 +685,9 @@ static bool check_ptrace_values(pid_t child, struct test_config *config) if (!check_ptrace_values_zt(child, config)) pass = false;
+ if (!check_ptrace_values_fpmr(child, config)) + pass = false; + return pass; }
@@ -832,11 +891,18 @@ static void set_initial_values(struct test_config *config) { int vq = __sve_vq_from_vl(vl_in(config)); int sme_vq = __sve_vq_from_vl(config->sme_vl_in); + bool sm_change;
svcr_in = config->svcr_in; svcr_expected = config->svcr_expected; svcr_out = 0;
+ if (sme_supported() && + (svcr_in & SVCR_SM) != (svcr_expected & SVCR_SM)) + sm_change = true; + else + sm_change = false; + fill_random(&v_in, sizeof(v_in)); memcpy(v_expected, v_in, sizeof(v_in)); memset(v_out, 0, sizeof(v_out)); @@ -883,6 +949,21 @@ static void set_initial_values(struct test_config *config) memset(zt_expected, 0, ZT_SIG_REG_BYTES); memset(zt_out, 0, sizeof(zt_out)); } + + if (fpmr_supported()) { + fill_random(&fpmr_in, sizeof(fpmr_in)); + fpmr_in &= FPMR_SAFE_BITS; + + /* Entering or exiting streaming mode clears FPMR */ + if (sm_change) + fpmr_expected = 0; + else + fpmr_expected = fpmr_in; + } else { + fpmr_in = 0; + fpmr_expected = 0; + fpmr_out = 0; + } }
static bool check_memory_values(struct test_config *config) @@ -933,6 +1014,12 @@ static bool check_memory_values(struct test_config *config) if (!compare_buffer("saved ZT", zt_out, zt_expected, ZT_SIG_REG_BYTES)) pass = false;
+ if (fpmr_out != fpmr_expected) { + ksft_print_msg("Mismatch in saved FPMR: %lx != %lx\n", + fpmr_out, fpmr_expected); + pass = false; + } + return pass; }
@@ -1010,6 +1097,36 @@ static void fpsimd_write(pid_t child, struct test_config *test_config) strerror(errno), errno); }
+static bool fpmr_write_supported(struct test_config *config) +{ + if (!fpmr_supported()) + return false; + + if (!sve_sme_same(config)) + return false; + + return true; +} + +static void fpmr_write_expected(struct test_config *config) +{ + fill_random(&fpmr_expected, sizeof(fpmr_expected)); + fpmr_expected &= FPMR_SAFE_BITS; +} + +static void fpmr_write(pid_t child, struct test_config *config) +{ + struct iovec iov; + int ret; + + iov.iov_len = sizeof(fpmr_expected); + iov.iov_base = &fpmr_expected; + ret = ptrace(PTRACE_SETREGSET, child, NT_ARM_FPMR, &iov); + if (ret != 0) + ksft_print_msg("Failed to write FPMR: %s (%d)\n", + strerror(errno), errno); +} + static void sve_write_expected(struct test_config *config) { int vl = vl_expected(config); @@ -1266,6 +1383,12 @@ static struct test_definition base_test_defs[] = { .set_expected_values = fpsimd_write_expected, .modify_values = fpsimd_write, }, + { + .name = "FPMR write", + .supported = fpmr_write_supported, + .set_expected_values = fpmr_write_expected, + .modify_values = fpmr_write, + }, };
static struct test_definition sve_test_defs[] = { @@ -1475,6 +1598,9 @@ int main(void) if (fa64_supported()) ksft_print_msg("FA64 supported\n");
+ if (fpmr_supported()) + ksft_print_msg("FPMR supported\n"); + ksft_set_plan(tests);
/* Get signal handers ready before we start any children */ diff --git a/tools/testing/selftests/arm64/fp/fp-ptrace.h b/tools/testing/selftests/arm64/fp/fp-ptrace.h index 36ca627e1980f6a384d9ed0f2e9d4bd32d90f893..c06919aaf1f70bee4b607f71e3213ef2ddf8b97d 100644 --- a/tools/testing/selftests/arm64/fp/fp-ptrace.h +++ b/tools/testing/selftests/arm64/fp/fp-ptrace.h @@ -14,10 +14,12 @@ #define HAVE_SME_SHIFT 1 #define HAVE_SME2_SHIFT 2 #define HAVE_FA64_SHIFT 3 +#define HAVE_FPMR_SHIFT 4
#define HAVE_SVE (1 << HAVE_SVE_SHIFT) #define HAVE_SME (1 << HAVE_SME_SHIFT) #define HAVE_SME2 (1 << HAVE_SME2_SHIFT) #define HAVE_FA64 (1 << HAVE_FA64_SHIFT) +#define HAVE_FPMR (1 << HAVE_FPMR_SHIFT)
#endif diff --git a/tools/testing/selftests/arm64/fp/sme-inst.h b/tools/testing/selftests/arm64/fp/sme-inst.h index 9292bba5400bb81b8e34769fa3eb70811746d8b8..85b9184e0835c59dbd5674b0210e6b9a43c1be4c 100644 --- a/tools/testing/selftests/arm64/fp/sme-inst.h +++ b/tools/testing/selftests/arm64/fp/sme-inst.h @@ -5,6 +5,8 @@ #ifndef SME_INST_H #define SME_INST_H
+#define REG_FPMR S3_3_C4_C4_2 + /* * RDSVL X\nx, #\imm */
On Tue, Nov 12, 2024 at 01:08:16PM +0000, Mark Brown wrote:
- // This has to come after we set PSTATE.SM
+check_fpmr_in:
- tbz x0, #HAVE_FPMR_SHIFT, wait_for_writes
- adrp x7, fpmr_in
- ldr x7, [x7, :lo12:fpmr_in]
- msr FPMR, x7
Did this build for you? I may not have a new enough assembler.
fp-ptrace-asm.S:149:6: error: expected writable system register or pstate msr FPMR, x7
I changed it to REG_FPMR locally.
On Tue, Nov 12, 2024 at 01:26:52PM +0000, Catalin Marinas wrote:
On Tue, Nov 12, 2024 at 01:08:16PM +0000, Mark Brown wrote:
- // This has to come after we set PSTATE.SM
+check_fpmr_in:
- tbz x0, #HAVE_FPMR_SHIFT, wait_for_writes
- adrp x7, fpmr_in
- ldr x7, [x7, :lo12:fpmr_in]
- msr FPMR, x7
Did this build for you? I may not have a new enough assembler.
Yes, it even ran through tests. I suspect it's picking up some (possibly stale generated?) header file which has the FPMR define in it.
fp-ptrace-asm.S:149:6: error: expected writable system register or pstate msr FPMR, x7
I changed it to REG_FPMR locally.
Thanks.
On Tue, 12 Nov 2024 13:08:13 +0000, Mark Brown wrote:
This series contains a bit of a grab bag of improvements to the floating point tests, mainly fp-ptrace. Globally over all the tests we start using defines from the generated sysregs (following the example of the KVM selftests) for SVCR, stop being quite so wasteful with registers when calling into the assembler code then expand the coverage of both ZA writes and FPMR (which was not there since fp-ptrace and the 2023 dpISA extensions were on the list at the same time).
[...]
Applied to arm64 (for-next/kselftest), thanks!
[1/3] kselftets/arm64: Use flag bits for features in fp-ptrace assembler code https://git.kernel.org/arm64/c/c0350076c13e [2/3] kselftest/arm64: Expand the set of ZA writes fp-ptrace does https://git.kernel.org/arm64/c/7e9c5b00009a [3/3] kselftest/arm64: Add FPMR coverage to fp-ptrace https://git.kernel.org/arm64/c/7dbd26d0b22d
linux-kselftest-mirror@lists.linaro.org