The SBI Firmware Feature extension allows the S-mode to request some specific features (either hardware or software) to be enabled. This series uses this extension to request misaligned access exception delegation to S-mode in order to let the kernel handle it. It also adds support for the KVM FWFT SBI extension based on the misaligned access handling infrastructure.
FWFT SBI extension is part of the SBI V3.0 specifications [1]. It can be tested using the qemu provided at [2] which contains the series from [3]. Upstream kvm-unit-tests can be used inside kvm to tests the correct delegation of misaligned exceptions. Upstream OpenSBI can be used.
Note: Since SBI V3.0 is not yet ratified, FWFT extension API is split between interface only and implementation, allowing to pick only the interface which do not have hard dependencies on SBI.
The tests can be run using the kselftest from series [4].
$ qemu-system-riscv64 \ -cpu rv64,trap-misaligned-access=true,v=true \ -M virt \ -m 1024M \ -bios fw_dynamic.bin \ -kernel Image ...
# ./misaligned TAP version 13 1..23 # Starting 23 tests from 1 test cases. # RUN global.gp_load_lh ... # OK global.gp_load_lh ok 1 global.gp_load_lh # RUN global.gp_load_lhu ... # OK global.gp_load_lhu ok 2 global.gp_load_lhu # RUN global.gp_load_lw ... # OK global.gp_load_lw ok 3 global.gp_load_lw # RUN global.gp_load_lwu ... # OK global.gp_load_lwu ok 4 global.gp_load_lwu # RUN global.gp_load_ld ... # OK global.gp_load_ld ok 5 global.gp_load_ld # RUN global.gp_load_c_lw ... # OK global.gp_load_c_lw ok 6 global.gp_load_c_lw # RUN global.gp_load_c_ld ... # OK global.gp_load_c_ld ok 7 global.gp_load_c_ld # RUN global.gp_load_c_ldsp ... # OK global.gp_load_c_ldsp ok 8 global.gp_load_c_ldsp # RUN global.gp_load_sh ... # OK global.gp_load_sh ok 9 global.gp_load_sh # RUN global.gp_load_sw ... # OK global.gp_load_sw ok 10 global.gp_load_sw # RUN global.gp_load_sd ... # OK global.gp_load_sd ok 11 global.gp_load_sd # RUN global.gp_load_c_sw ... # OK global.gp_load_c_sw ok 12 global.gp_load_c_sw # RUN global.gp_load_c_sd ... # OK global.gp_load_c_sd ok 13 global.gp_load_c_sd # RUN global.gp_load_c_sdsp ... # OK global.gp_load_c_sdsp ok 14 global.gp_load_c_sdsp # RUN global.fpu_load_flw ... # OK global.fpu_load_flw ok 15 global.fpu_load_flw # RUN global.fpu_load_fld ... # OK global.fpu_load_fld ok 16 global.fpu_load_fld # RUN global.fpu_load_c_fld ... # OK global.fpu_load_c_fld ok 17 global.fpu_load_c_fld # RUN global.fpu_load_c_fldsp ... # OK global.fpu_load_c_fldsp ok 18 global.fpu_load_c_fldsp # RUN global.fpu_store_fsw ... # OK global.fpu_store_fsw ok 19 global.fpu_store_fsw # RUN global.fpu_store_fsd ... # OK global.fpu_store_fsd ok 20 global.fpu_store_fsd # RUN global.fpu_store_c_fsd ... # OK global.fpu_store_c_fsd ok 21 global.fpu_store_c_fsd # RUN global.fpu_store_c_fsdsp ... # OK global.fpu_store_c_fsdsp ok 22 global.fpu_store_c_fsdsp # RUN global.gen_sigbus ... [12797.988647] misaligned[618]: unhandled signal 7 code 0x1 at 0x0000000000014dc0 in misaligned[4dc0,10000+76000] [12797.988990] CPU: 0 UID: 0 PID: 618 Comm: misaligned Not tainted 6.13.0-rc6-00008-g4ec4468967c9-dirty #51 [12797.989169] Hardware name: riscv-virtio,qemu (DT) [12797.989264] epc : 0000000000014dc0 ra : 0000000000014d00 sp : 00007fffe165d100 [12797.989407] gp : 000000000008f6e8 tp : 0000000000095760 t0 : 0000000000000008 [12797.989544] t1 : 00000000000965d8 t2 : 000000000008e830 s0 : 00007fffe165d160 [12797.989692] s1 : 000000000000001a a0 : 0000000000000000 a1 : 0000000000000002 [12797.989831] a2 : 0000000000000000 a3 : 0000000000000000 a4 : ffffffffdeadbeef [12797.989964] a5 : 000000000008ef61 a6 : 626769735f6e0000 a7 : fffffffffffff000 [12797.990094] s2 : 0000000000000001 s3 : 00007fffe165d838 s4 : 00007fffe165d848 [12797.990238] s5 : 000000000000001a s6 : 0000000000010442 s7 : 0000000000010200 [12797.990391] s8 : 000000000000003a s9 : 0000000000094508 s10: 0000000000000000 [12797.990526] s11: 0000555567460668 t3 : 00007fffe165d070 t4 : 00000000000965d0 [12797.990656] t5 : fefefefefefefeff t6 : 0000000000000073 [12797.990756] status: 0000000200004020 badaddr: 000000000008ef61 cause: 0000000000000006 [12797.990911] Code: 8793 8791 3423 fcf4 3783 fc84 c737 dead 0713 eef7 (c398) 0001 # OK global.gen_sigbus ok 23 global.gen_sigbus # PASSED: 23 / 23 tests passed. # Totals: pass:23 fail:0 xfail:0 xpass:0 skip:0 error:0
With kvm-tools:
# lkvm run -k sbi.flat -m 128 Info: # lkvm run -k sbi.flat -m 128 -c 1 --name guest-97 Info: Removed ghost socket file "/root/.lkvm//guest-97.sock".
########################################################################## # kvm-unit-tests ##########################################################################
... [test messages elided] PASS: sbi: fwft: FWFT extension probing no error PASS: sbi: fwft: get/set reserved feature 0x6 error == SBI_ERR_DENIED PASS: sbi: fwft: get/set reserved feature 0x3fffffff error == SBI_ERR_DENIED PASS: sbi: fwft: get/set reserved feature 0x80000000 error == SBI_ERR_DENIED PASS: sbi: fwft: get/set reserved feature 0xbfffffff error == SBI_ERR_DENIED PASS: sbi: fwft: misaligned_deleg: Get misaligned deleg feature no error PASS: sbi: fwft: misaligned_deleg: Set misaligned deleg feature invalid value error PASS: sbi: fwft: misaligned_deleg: Set misaligned deleg feature invalid value error PASS: sbi: fwft: misaligned_deleg: Set misaligned deleg feature value no error PASS: sbi: fwft: misaligned_deleg: Set misaligned deleg feature value 0 PASS: sbi: fwft: misaligned_deleg: Set misaligned deleg feature value no error PASS: sbi: fwft: misaligned_deleg: Set misaligned deleg feature value 1 PASS: sbi: fwft: misaligned_deleg: Verify misaligned load exception trap in supervisor SUMMARY: 50 tests, 2 unexpected failures, 12 skipped
This series is available at [5].
Link: https://github.com/riscv-non-isa/riscv-sbi-doc/releases/download/vv3.0-rc2/r... [1] Link: https://github.com/rivosinc/qemu/tree/dev/cleger/misaligned [2] Link: https://lore.kernel.org/all/20241211211933.198792-3-fkonrad@amd.com/T/ [3] Link: https://lore.kernel.org/linux-riscv/20250414123543.1615478-1-cleger@rivosinc... [4] Link: https://github.com/rivosinc/linux/tree/dev/cleger/fwft [5] ---
V6: - Rename FWFT interface to remove "_local" - Fix test for MEDELEG values in KVM FWFT support - Add __init for unaligned_access_init() - Rebased on master
V5: - Return ERANGE as mapping for SBI_ERR_BAD_RANGE - Removed unused sbi_fwft_get() - Fix kernel for sbi_fwft_local_set_cpumask() - Fix indentation for sbi_fwft_local_set() - Remove spurious space in kvm_sbi_fwft_ops. - Rebased on origin/master - Remove fixes commits and sent them as a separate series [4]
V4: - Check SBI version 3.0 instead of 2.0 for FWFT presence - Use long for kvm_sbi_fwft operation return value - Init KVM sbi extension even if default_disabled - Remove revert_on_fail parameter for sbi_fwft_feature_set(). - Fix comments for sbi_fwft_set/get() - Only handle local features (there are no globals yet in the spec) - Add new SBI errors to sbi_err_map_linux_errno()
V3: - Added comment about kvm sbi fwft supported/set/get callback requirements - Move struct kvm_sbi_fwft_feature in kvm_sbi_fwft.c - Add a FWFT interface
V2: - Added Kselftest for misaligned testing - Added get_user() usage instead of __get_user() - Reenable interrupt when possible in misaligned access handling - Document that riscv supports unaligned-traps - Fix KVM extension state when an init function is present - Rework SBI misaligned accesses trap delegation code - Added support for CPU hotplugging - Added KVM SBI reset callback - Added reset for KVM SBI FWFT lock - Return SBI_ERR_DENIED_LOCKED when LOCK flag is set
Clément Léger (14): riscv: sbi: add Firmware Feature (FWFT) SBI extensions definitions riscv: sbi: remove useless parenthesis riscv: sbi: add new SBI error mappings riscv: sbi: add FWFT extension interface riscv: sbi: add SBI FWFT extension calls riscv: misaligned: request misaligned exception from SBI riscv: misaligned: use on_each_cpu() for scalar misaligned access probing riscv: misaligned: use correct CONFIG_ ifdef for misaligned_access_speed riscv: misaligned: move emulated access uniformity check in a function riscv: misaligned: add a function to check misalign trap delegability RISC-V: KVM: add SBI extension init()/deinit() functions RISC-V: KVM: add SBI extension reset callback RISC-V: KVM: add support for FWFT SBI extension RISC-V: KVM: add support for SBI_FWFT_MISALIGNED_DELEG
arch/riscv/include/asm/cpufeature.h | 8 +- arch/riscv/include/asm/kvm_host.h | 5 +- arch/riscv/include/asm/kvm_vcpu_sbi.h | 12 + arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h | 29 +++ arch/riscv/include/asm/sbi.h | 60 +++++ arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kernel/sbi.c | 81 ++++++- arch/riscv/kernel/traps_misaligned.c | 110 ++++++++- arch/riscv/kernel/unaligned_access_speed.c | 8 +- arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu.c | 7 +- arch/riscv/kvm/vcpu_sbi.c | 54 +++++ arch/riscv/kvm/vcpu_sbi_fwft.c | 252 +++++++++++++++++++++ arch/riscv/kvm/vcpu_sbi_sta.c | 3 +- 14 files changed, 613 insertions(+), 18 deletions(-) create mode 100644 arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h create mode 100644 arch/riscv/kvm/vcpu_sbi_fwft.c
The Firmware Features extension (FWFT) was added as part of the SBI 3.0 specification. Add SBI definitions to use this extension.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Samuel Holland samuel.holland@sifive.com Tested-by: Samuel Holland samuel.holland@sifive.com Reviewed-by: Deepak Gupta debug@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/include/asm/sbi.h | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 3d250824178b..bb077d0c912f 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -35,6 +35,7 @@ enum sbi_ext_id { SBI_EXT_DBCN = 0x4442434E, SBI_EXT_STA = 0x535441, SBI_EXT_NACL = 0x4E41434C, + SBI_EXT_FWFT = 0x46574654,
/* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START = 0x08000000, @@ -402,6 +403,33 @@ enum sbi_ext_nacl_feature { #define SBI_NACL_SHMEM_SRET_X(__i) ((__riscv_xlen / 8) * (__i)) #define SBI_NACL_SHMEM_SRET_X_LAST 31
+/* SBI function IDs for FW feature extension */ +#define SBI_EXT_FWFT_SET 0x0 +#define SBI_EXT_FWFT_GET 0x1 + +enum sbi_fwft_feature_t { + SBI_FWFT_MISALIGNED_EXC_DELEG = 0x0, + SBI_FWFT_LANDING_PAD = 0x1, + SBI_FWFT_SHADOW_STACK = 0x2, + SBI_FWFT_DOUBLE_TRAP = 0x3, + SBI_FWFT_PTE_AD_HW_UPDATING = 0x4, + SBI_FWFT_POINTER_MASKING_PMLEN = 0x5, + SBI_FWFT_LOCAL_RESERVED_START = 0x6, + SBI_FWFT_LOCAL_RESERVED_END = 0x3fffffff, + SBI_FWFT_LOCAL_PLATFORM_START = 0x40000000, + SBI_FWFT_LOCAL_PLATFORM_END = 0x7fffffff, + + SBI_FWFT_GLOBAL_RESERVED_START = 0x80000000, + SBI_FWFT_GLOBAL_RESERVED_END = 0xbfffffff, + SBI_FWFT_GLOBAL_PLATFORM_START = 0xc0000000, + SBI_FWFT_GLOBAL_PLATFORM_END = 0xffffffff, +}; + +#define SBI_FWFT_PLATFORM_FEATURE_BIT BIT(30) +#define SBI_FWFT_GLOBAL_FEATURE_BIT BIT(31) + +#define SBI_FWFT_SET_FLAG_LOCK BIT(0) + /* SBI spec version fields */ #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 @@ -419,6 +447,11 @@ enum sbi_ext_nacl_feature { #define SBI_ERR_ALREADY_STARTED -7 #define SBI_ERR_ALREADY_STOPPED -8 #define SBI_ERR_NO_SHMEM -9 +#define SBI_ERR_INVALID_STATE -10 +#define SBI_ERR_BAD_RANGE -11 +#define SBI_ERR_TIMEOUT -12 +#define SBI_ERR_IO -13 +#define SBI_ERR_DENIED_LOCKED -14
extern unsigned long sbi_spec_version; struct sbiret {
A few parenthesis in check for SBI version/extension were useless, remove them.
Signed-off-by: Clément Léger cleger@rivosinc.com --- arch/riscv/kernel/sbi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 1989b8cade1b..1d44c35305a9 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -609,7 +609,7 @@ void __init sbi_init(void) } else { __sbi_rfence = __sbi_rfence_v01; } - if ((sbi_spec_version >= sbi_mk_version(0, 3)) && + if (sbi_spec_version >= sbi_mk_version(0, 3) && sbi_probe_extension(SBI_EXT_SRST)) { pr_info("SBI SRST extension detected\n"); pm_power_off = sbi_srst_power_off; @@ -617,8 +617,8 @@ void __init sbi_init(void) sbi_srst_reboot_nb.priority = 192; register_restart_handler(&sbi_srst_reboot_nb); } - if ((sbi_spec_version >= sbi_mk_version(2, 0)) && - (sbi_probe_extension(SBI_EXT_DBCN) > 0)) { + if (sbi_spec_version >= sbi_mk_version(2, 0) && + sbi_probe_extension(SBI_EXT_DBCN) > 0) { pr_info("SBI DBCN extension detected\n"); sbi_debug_console_available = true; }
On Thu, Apr 24, 2025 at 07:31:49PM +0200, Clément Léger wrote:
A few parenthesis in check for SBI version/extension were useless, remove them.
Signed-off-by: Clément Léger cleger@rivosinc.com
arch/riscv/kernel/sbi.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 1989b8cade1b..1d44c35305a9 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -609,7 +609,7 @@ void __init sbi_init(void) } else { __sbi_rfence = __sbi_rfence_v01; }
if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
sbi_probe_extension(SBI_EXT_SRST)) { pr_info("SBI SRST extension detected\n"); pm_power_off = sbi_srst_power_off;if (sbi_spec_version >= sbi_mk_version(0, 3) &&
@@ -617,8 +617,8 @@ void __init sbi_init(void) sbi_srst_reboot_nb.priority = 192; register_restart_handler(&sbi_srst_reboot_nb); }
if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
(sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
}sbi_probe_extension(SBI_EXT_DBCN) > 0) { pr_info("SBI DBCN extension detected\n"); sbi_debug_console_available = true;
-- 2.49.0
Reviewed-by: Andrew Jones ajones@ventanamicro.com
A few new errors have been added with SBI V3.0, maps them as close as possible to errno values.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/include/asm/sbi.h | 10 ++++++++++ 1 file changed, 10 insertions(+)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index bb077d0c912f..7ec249fea880 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -536,11 +536,21 @@ static inline int sbi_err_map_linux_errno(int err) case SBI_SUCCESS: return 0; case SBI_ERR_DENIED: + case SBI_ERR_DENIED_LOCKED: return -EPERM; case SBI_ERR_INVALID_PARAM: + case SBI_ERR_INVALID_STATE: return -EINVAL; + case SBI_ERR_BAD_RANGE: + return -ERANGE; case SBI_ERR_INVALID_ADDRESS: return -EFAULT; + case SBI_ERR_NO_SHMEM: + return -ENOMEM; + case SBI_ERR_TIMEOUT: + return -ETIME; + case SBI_ERR_IO: + return -EIO; case SBI_ERR_NOT_SUPPORTED: case SBI_ERR_FAILURE: default:
This SBI extensions enables supervisor mode to control feature that are under M-mode control (For instance, Svadu menvcfg ADUE bit, Ssdbltrp DTE, etc). Add an interface to set local features for a specific cpu mask as well as for the online cpu mask.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/include/asm/sbi.h | 17 +++++++++++ arch/riscv/kernel/sbi.c | 57 ++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+)
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 7ec249fea880..3bbef56bcefc 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -503,6 +503,23 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, unsigned long asid); long sbi_probe_extension(int ext);
+int sbi_fwft_set(u32 feature, unsigned long value, unsigned long flags); +int sbi_fwft_set_cpumask(const cpumask_t *mask, u32 feature, + unsigned long value, unsigned long flags); +/** + * sbi_fwft_set_online_cpus() - Set a feature on all online cpus + * @feature: The feature to be set + * @value: The feature value to be set + * @flags: FWFT feature set flags + * + * Return: 0 on success, appropriate linux error code otherwise. + */ +static inline int sbi_fwft_set_online_cpus(u32 feature, unsigned long value, + unsigned long flags) +{ + return sbi_fwft_set_cpumask(cpu_online_mask, feature, value, flags); +} + /* Check if current SBI specification version is 0.1 or not */ static inline int sbi_spec_is_0_1(void) { diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 1d44c35305a9..d57e4dae7dac 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -299,6 +299,63 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask, return 0; }
+/** + * sbi_fwft_set() - Set a feature on the local hart + * @feature: The feature ID to be set + * @value: The feature value to be set + * @flags: FWFT feature set flags + * + * Return: 0 on success, appropriate linux error code otherwise. + */ +int sbi_fwft_set(u32 feature, unsigned long value, unsigned long flags) +{ + return -EOPNOTSUPP; +} + +struct fwft_set_req { + u32 feature; + unsigned long value; + unsigned long flags; + atomic_t error; +}; + +static void cpu_sbi_fwft_set(void *arg) +{ + struct fwft_set_req *req = arg; + int ret; + + ret = sbi_fwft_set(req->feature, req->value, req->flags); + if (ret) + atomic_set(&req->error, ret); +} + +/** + * sbi_fwft_set_cpumask() - Set a feature for the specified cpumask + * @mask: CPU mask of cpus that need the feature to be set + * @feature: The feature ID to be set + * @value: The feature value to be set + * @flags: FWFT feature set flags + * + * Return: 0 on success, appropriate linux error code otherwise. + */ +int sbi_fwft_set_cpumask(const cpumask_t *mask, u32 feature, + unsigned long value, unsigned long flags) +{ + struct fwft_set_req req = { + .feature = feature, + .value = value, + .flags = flags, + .error = ATOMIC_INIT(0), + }; + + if (feature & SBI_FWFT_GLOBAL_FEATURE_BIT) + return -EINVAL; + + on_each_cpu_mask(mask, cpu_sbi_fwft_set, &req, 1); + + return atomic_read(&req.error); +} + /** * sbi_set_timer() - Program the timer for next timer event. * @stime_value: The value after which next timer event should fire.
Add FWFT extension calls. This will be ratified in SBI V3.0 hence, it is provided as a separate commit that can be left out if needed.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/kernel/sbi.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-)
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index d57e4dae7dac..070014ff35d4 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -299,6 +299,8 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask, return 0; }
+static bool sbi_fwft_supported; + /** * sbi_fwft_set() - Set a feature on the local hart * @feature: The feature ID to be set @@ -309,7 +311,15 @@ static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask, */ int sbi_fwft_set(u32 feature, unsigned long value, unsigned long flags) { - return -EOPNOTSUPP; + struct sbiret ret; + + if (!sbi_fwft_supported) + return -EOPNOTSUPP; + + ret = sbi_ecall(SBI_EXT_FWFT, SBI_EXT_FWFT_SET, + feature, value, flags, 0, 0, 0); + + return sbi_err_map_linux_errno(ret.error); }
struct fwft_set_req { @@ -348,6 +358,9 @@ int sbi_fwft_set_cpumask(const cpumask_t *mask, u32 feature, .error = ATOMIC_INIT(0), };
+ if (!sbi_fwft_supported) + return -EOPNOTSUPP; + if (feature & SBI_FWFT_GLOBAL_FEATURE_BIT) return -EINVAL;
@@ -679,6 +692,11 @@ void __init sbi_init(void) pr_info("SBI DBCN extension detected\n"); sbi_debug_console_available = true; } + if (sbi_spec_version >= sbi_mk_version(3, 0) && + sbi_probe_extension(SBI_EXT_FWFT)) { + pr_info("SBI FWFT extension detected\n"); + sbi_fwft_supported = true; + } } else { __sbi_set_timer = __sbi_set_timer_v01; __sbi_send_ipi = __sbi_send_ipi_v01;
Now that the kernel can handle misaligned accesses in S-mode, request misaligned access exception delegation from SBI. This uses the FWFT SBI extension defined in SBI version 3.0.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/include/asm/cpufeature.h | 3 +- arch/riscv/kernel/traps_misaligned.c | 71 +++++++++++++++++++++- arch/riscv/kernel/unaligned_access_speed.c | 8 ++- 3 files changed, 77 insertions(+), 5 deletions(-)
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index f56b409361fb..dbe5970d4fe6 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -67,8 +67,9 @@ void __init riscv_user_isa_enable(void); _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate)
bool __init check_unaligned_access_emulated_all_cpus(void); +void unaligned_access_init(void); +int cpu_online_unaligned_access_init(unsigned int cpu); #if defined(CONFIG_RISCV_SCALAR_MISALIGNED) -void check_unaligned_access_emulated(struct work_struct *work __always_unused); void unaligned_emulation_finish(void); bool unaligned_ctl_available(void); DECLARE_PER_CPU(long, misaligned_access_speed); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 97c674d7d34f..6bb734abf9a4 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -16,6 +16,7 @@ #include <asm/entry-common.h> #include <asm/hwprobe.h> #include <asm/cpufeature.h> +#include <asm/sbi.h> #include <asm/vector.h>
#define INSN_MATCH_LB 0x3 @@ -629,7 +630,7 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
static bool unaligned_ctl __read_mostly;
-void check_unaligned_access_emulated(struct work_struct *work __always_unused) +static void check_unaligned_access_emulated(struct work_struct *work __always_unused) { int cpu = smp_processor_id(); long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); @@ -640,6 +641,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused) __asm__ __volatile__ ( " "REG_L" %[tmp], 1(%[ptr])\n" : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory"); +} + +static int cpu_online_check_unaligned_access_emulated(unsigned int cpu) +{ + long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); + + check_unaligned_access_emulated(NULL);
/* * If unaligned_ctl is already set, this means that we detected that all @@ -648,9 +656,10 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused) */ if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) { pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n"); - while (true) - cpu_relax(); + return -EINVAL; } + + return 0; }
bool __init check_unaligned_access_emulated_all_cpus(void) @@ -682,4 +691,60 @@ bool __init check_unaligned_access_emulated_all_cpus(void) { return false; } +static int cpu_online_check_unaligned_access_emulated(unsigned int cpu) +{ + return 0; +} +#endif + +#ifdef CONFIG_RISCV_SBI + +static bool misaligned_traps_delegated; + +static int cpu_online_sbi_unaligned_setup(unsigned int cpu) +{ + if (sbi_fwft_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0) && + misaligned_traps_delegated) { + pr_crit("Misaligned trap delegation non homogeneous (expected delegated)"); + return -EINVAL; + } + + return 0; +} + +void __init unaligned_access_init(void) +{ + int ret; + + ret = sbi_fwft_set_online_cpus(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0); + if (ret) + return; + + misaligned_traps_delegated = true; + pr_info("SBI misaligned access exception delegation ok\n"); + /* + * Note that we don't have to take any specific action here, if + * the delegation is successful, then + * check_unaligned_access_emulated() will verify that indeed the + * platform traps on misaligned accesses. + */ +} +#else +void __init unaligned_access_init(void) {} + +static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused) +{ + return 0; +} #endif + +int cpu_online_unaligned_access_init(unsigned int cpu) +{ + int ret; + + ret = cpu_online_sbi_unaligned_setup(cpu); + if (ret) + return ret; + + return cpu_online_check_unaligned_access_emulated(cpu); +} diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index b8ba13819d05..ae2068425fbc 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -236,6 +236,11 @@ arch_initcall_sync(lock_and_set_unaligned_access_static_branch);
static int riscv_online_cpu(unsigned int cpu) { + int ret = cpu_online_unaligned_access_init(cpu); + + if (ret) + return ret; + /* We are already set since the last check */ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) { goto exit; @@ -248,7 +253,6 @@ static int riscv_online_cpu(unsigned int cpu) { static struct page *buf;
- check_unaligned_access_emulated(NULL); buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); if (!buf) { pr_warn("Allocation failure, not measuring misaligned performance\n"); @@ -439,6 +443,8 @@ static int __init check_unaligned_access_all_cpus(void) { int cpu;
+ unaligned_access_init(); + if (unaligned_scalar_speed_param != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) { pr_info("scalar unaligned access speed set to '%s' (%lu) by command line\n", speed_str[unaligned_scalar_speed_param], unaligned_scalar_speed_param);
schedule_on_each_cpu() was used without any good reason while documented as very slow. This call was in the boot path, so better use on_each_cpu() for scalar misaligned checking. Vector misaligned check still needs to use schedule_on_each_cpu() since it requires irqs to be enabled but that's less of a problem since this code is ran in a kthread. Add a comment to explicit that.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/kernel/traps_misaligned.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 6bb734abf9a4..e1fe39cc6709 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -610,6 +610,10 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void) { int cpu;
+ /* + * While being documented as very slow, schedule_on_each_cpu() is used since + * kernel_vector_begin() expects irqs to be enabled or it will panic() + */ schedule_on_each_cpu(check_vector_unaligned_access_emulated);
for_each_online_cpu(cpu) @@ -630,7 +634,7 @@ bool __init check_vector_unaligned_access_emulated_all_cpus(void)
static bool unaligned_ctl __read_mostly;
-static void check_unaligned_access_emulated(struct work_struct *work __always_unused) +static void check_unaligned_access_emulated(void *arg __always_unused) { int cpu = smp_processor_id(); long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); @@ -671,7 +675,7 @@ bool __init check_unaligned_access_emulated_all_cpus(void) * accesses emulated since tasks requesting such control can run on any * CPU. */ - schedule_on_each_cpu(check_unaligned_access_emulated); + on_each_cpu(check_unaligned_access_emulated, NULL, 1);
for_each_online_cpu(cpu) if (per_cpu(misaligned_access_speed, cpu)
misaligned_access_speed is defined under CONFIG_RISCV_SCALAR_MISALIGNED but was used under CONFIG_RISCV_PROBE_UNALIGNED_ACCESS. Fix that by using the correct config option.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/kernel/traps_misaligned.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index e1fe39cc6709..e02e9b4b0fc5 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -362,7 +362,7 @@ static int handle_scalar_misaligned_load(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
-#ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED; #endif
Split the code that check for the uniformity of misaligned accesses performance on all cpus from check_unaligned_access_emulated_all_cpus() to its own function which will be used for delegation check. No functional changes intended.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/kernel/traps_misaligned.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index e02e9b4b0fc5..410b2e0e0765 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -666,10 +666,20 @@ static int cpu_online_check_unaligned_access_emulated(unsigned int cpu) return 0; }
-bool __init check_unaligned_access_emulated_all_cpus(void) +static bool all_cpus_unaligned_scalar_access_emulated(void) { int cpu;
+ for_each_online_cpu(cpu) + if (per_cpu(misaligned_access_speed, cpu) != + RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED) + return false; + + return true; +} + +bool __init check_unaligned_access_emulated_all_cpus(void) +{ /* * We can only support PR_UNALIGN controls if all CPUs have misaligned * accesses emulated since tasks requesting such control can run on any @@ -677,10 +687,8 @@ bool __init check_unaligned_access_emulated_all_cpus(void) */ on_each_cpu(check_unaligned_access_emulated, NULL, 1);
- for_each_online_cpu(cpu) - if (per_cpu(misaligned_access_speed, cpu) - != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED) - return false; + if (!all_cpus_unaligned_scalar_access_emulated()) + return false;
unaligned_ctl = true; return true;
Checking for the delegability of the misaligned access trap is needed for the KVM FWFT extension implementation. Add a function to get the delegability of the misaligned trap exception.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/include/asm/cpufeature.h | 5 +++++ arch/riscv/kernel/traps_misaligned.c | 17 +++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-)
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index dbe5970d4fe6..3a87f612035c 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -72,12 +72,17 @@ int cpu_online_unaligned_access_init(unsigned int cpu); #if defined(CONFIG_RISCV_SCALAR_MISALIGNED) void unaligned_emulation_finish(void); bool unaligned_ctl_available(void); +bool misaligned_traps_can_delegate(void); DECLARE_PER_CPU(long, misaligned_access_speed); #else static inline bool unaligned_ctl_available(void) { return false; } +static inline bool misaligned_traps_can_delegate(void) +{ + return false; +} #endif
bool __init check_vector_unaligned_access_emulated_all_cpus(void); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 410b2e0e0765..1d3be999220f 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -709,10 +709,10 @@ static int cpu_online_check_unaligned_access_emulated(unsigned int cpu) } #endif
-#ifdef CONFIG_RISCV_SBI - static bool misaligned_traps_delegated;
+#ifdef CONFIG_RISCV_SBI + static int cpu_online_sbi_unaligned_setup(unsigned int cpu) { if (sbi_fwft_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0) && @@ -748,6 +748,7 @@ static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused) { return 0; } + #endif
int cpu_online_unaligned_access_init(unsigned int cpu) @@ -760,3 +761,15 @@ int cpu_online_unaligned_access_init(unsigned int cpu)
return cpu_online_check_unaligned_access_emulated(cpu); } + +bool misaligned_traps_can_delegate(void) +{ + /* + * Either we successfully requested misaligned traps delegation for all + * CPUS or the SBI does not implemented FWFT extension but delegated the + * exception by default. + */ + return misaligned_traps_delegated || + all_cpus_unaligned_scalar_access_emulated(); +} +EXPORT_SYMBOL_GPL(misaligned_traps_can_delegate);
On 24-04-2025 23:01, Clément Léger wrote:
- /*
* Either we successfully requested misaligned traps delegation for all
* CPUS or the SBI does not implemented FWFT extension but delegated the
"CPUS" -> corrected to "CPUs" (case and plural form). "does not implemented" -> corrected to "does not implement" "CPUs, or the SBI does not implement the FWFT extension"
* exception by default.
*/
- return misaligned_traps_delegated ||
all_cpus_unaligned_scalar_access_emulated();
+}
Thanks, Alok
The FWFT SBI extension will need to dynamically allocate memory and do init time specific initialization. Add an init/deinit callbacks that allows to do so.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/include/asm/kvm_vcpu_sbi.h | 9 +++++++++ arch/riscv/kvm/vcpu.c | 2 ++ arch/riscv/kvm/vcpu_sbi.c | 26 ++++++++++++++++++++++++++ 3 files changed, 37 insertions(+)
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 4ed6203cdd30..bcb90757b149 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -49,6 +49,14 @@ struct kvm_vcpu_sbi_extension {
/* Extension specific probe function */ unsigned long (*probe)(struct kvm_vcpu *vcpu); + + /* + * Init/deinit function called once during VCPU init/destroy. These + * might be use if the SBI extensions need to allocate or do specific + * init time only configuration. + */ + int (*init)(struct kvm_vcpu *vcpu); + void (*deinit)(struct kvm_vcpu *vcpu); };
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); @@ -69,6 +77,7 @@ const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx); int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num, unsigned long *reg_val); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 60d684c76c58..877bcc85c067 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -185,6 +185,8 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { + kvm_riscv_vcpu_sbi_deinit(vcpu); + /* Cleanup VCPU AIA context */ kvm_riscv_vcpu_aia_deinit(vcpu);
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index d1c83a77735e..3139f171c20f 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -508,5 +508,31 @@ void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu) scontext->ext_status[idx] = ext->default_disabled ? KVM_RISCV_SBI_EXT_STATUS_DISABLED : KVM_RISCV_SBI_EXT_STATUS_ENABLED; + + if (ext->init && ext->init(vcpu) != 0) + scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE; + } +} + +void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; + const struct kvm_riscv_sbi_extension_entry *entry; + const struct kvm_vcpu_sbi_extension *ext; + int idx, i; + + for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { + entry = &sbi_ext[i]; + ext = entry->ext_ptr; + idx = entry->ext_idx; + + if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status)) + continue; + + if (scontext->ext_status[idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE || + !ext->deinit) + continue; + + ext->deinit(vcpu); } }
Currently, only the STA extension needed a reset function but that's going to be the case for FWFT as well. Add a reset callback that can be implemented by SBI extensions.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/include/asm/kvm_host.h | 1 - arch/riscv/include/asm/kvm_vcpu_sbi.h | 2 ++ arch/riscv/kvm/vcpu.c | 2 +- arch/riscv/kvm/vcpu_sbi.c | 24 ++++++++++++++++++++++++ arch/riscv/kvm/vcpu_sbi_sta.c | 3 ++- 5 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 0e9c2fab6378..4fa02e082142 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -407,7 +407,6 @@ void __kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); bool kvm_riscv_vcpu_stopped(struct kvm_vcpu *vcpu);
-void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
#endif /* __RISCV_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index bcb90757b149..cb68b3a57c8f 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -57,6 +57,7 @@ struct kvm_vcpu_sbi_extension { */ int (*init)(struct kvm_vcpu *vcpu); void (*deinit)(struct kvm_vcpu *vcpu); + void (*reset)(struct kvm_vcpu *vcpu); };
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); @@ -78,6 +79,7 @@ bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx); int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num, unsigned long *reg_val); diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 877bcc85c067..542747e2c7f5 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -94,7 +94,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.hfence_tail = 0; memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
- kvm_riscv_vcpu_sbi_sta_reset(vcpu); + kvm_riscv_vcpu_sbi_reset(vcpu);
/* Reset the guest CSRs for hotplug usecase */ if (loaded) diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 3139f171c20f..50be079b5528 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -536,3 +536,27 @@ void kvm_riscv_vcpu_sbi_deinit(struct kvm_vcpu *vcpu) ext->deinit(vcpu); } } + +void kvm_riscv_vcpu_sbi_reset(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; + const struct kvm_riscv_sbi_extension_entry *entry; + const struct kvm_vcpu_sbi_extension *ext; + int idx, i; + + for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { + entry = &sbi_ext[i]; + ext = entry->ext_ptr; + idx = entry->ext_idx; + + if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status)) + continue; + + if (scontext->ext_status[idx] != KVM_RISCV_SBI_EXT_STATUS_ENABLED || + !ext->reset) + continue; + + ext->reset(vcpu); + } +} + diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c index 5f35427114c1..cc6cb7c8f0e4 100644 --- a/arch/riscv/kvm/vcpu_sbi_sta.c +++ b/arch/riscv/kvm/vcpu_sbi_sta.c @@ -16,7 +16,7 @@ #include <asm/sbi.h> #include <asm/uaccess.h>
-void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu) +static void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu) { vcpu->arch.sta.shmem = INVALID_GPA; vcpu->arch.sta.last_steal = 0; @@ -156,6 +156,7 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = { .extid_end = SBI_EXT_STA, .handler = kvm_sbi_ext_sta_handler, .probe = kvm_sbi_ext_sta_probe, + .reset = kvm_riscv_vcpu_sbi_sta_reset, };
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
Add basic infrastructure to support the FWFT extension in KVM.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/include/asm/kvm_host.h | 4 + arch/riscv/include/asm/kvm_vcpu_sbi.h | 1 + arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h | 29 +++ arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/Makefile | 1 + arch/riscv/kvm/vcpu_sbi.c | 4 + arch/riscv/kvm/vcpu_sbi_fwft.c | 216 +++++++++++++++++++++ 7 files changed, 256 insertions(+) create mode 100644 arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h create mode 100644 arch/riscv/kvm/vcpu_sbi_fwft.c
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 4fa02e082142..c3f880763b9a 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -19,6 +19,7 @@ #include <asm/kvm_vcpu_fp.h> #include <asm/kvm_vcpu_insn.h> #include <asm/kvm_vcpu_sbi.h> +#include <asm/kvm_vcpu_sbi_fwft.h> #include <asm/kvm_vcpu_timer.h> #include <asm/kvm_vcpu_pmu.h>
@@ -281,6 +282,9 @@ struct kvm_vcpu_arch { /* Performance monitoring context */ struct kvm_pmu pmu_context;
+ /* Firmware feature SBI extension context */ + struct kvm_sbi_fwft fwft_context; + /* 'static' configurations which are set only once */ struct kvm_vcpu_config cfg;
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index cb68b3a57c8f..ffd03fed0c06 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -98,6 +98,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_susp; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_fwft; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h b/arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h new file mode 100644 index 000000000000..9ba841355758 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_sbi_fwft.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2025 Rivos Inc. + * + * Authors: + * Clément Léger cleger@rivosinc.com + */ + +#ifndef __KVM_VCPU_RISCV_FWFT_H +#define __KVM_VCPU_RISCV_FWFT_H + +#include <asm/sbi.h> + +struct kvm_sbi_fwft_feature; + +struct kvm_sbi_fwft_config { + const struct kvm_sbi_fwft_feature *feature; + bool supported; + unsigned long flags; +}; + +/* FWFT data structure per vcpu */ +struct kvm_sbi_fwft { + struct kvm_sbi_fwft_config *configs; +}; + +#define vcpu_to_fwft(vcpu) (&(vcpu)->arch.fwft_context) + +#endif /* !__KVM_VCPU_RISCV_FWFT_H */ diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 5f59fd226cc5..5ba77a3d9f6e 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -204,6 +204,7 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_DBCN, KVM_RISCV_SBI_EXT_STA, KVM_RISCV_SBI_EXT_SUSP, + KVM_RISCV_SBI_EXT_FWFT, KVM_RISCV_SBI_EXT_MAX, };
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 4e0bba91d284..06e2d52a9b88 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -26,6 +26,7 @@ kvm-y += vcpu_onereg.o kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o kvm-y += vcpu_sbi.o kvm-y += vcpu_sbi_base.o +kvm-y += vcpu_sbi_fwft.o kvm-y += vcpu_sbi_hsm.o kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_sbi_pmu.o kvm-y += vcpu_sbi_replace.o diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index 50be079b5528..0748810c0252 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -78,6 +78,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = { .ext_idx = KVM_RISCV_SBI_EXT_STA, .ext_ptr = &vcpu_sbi_ext_sta, }, + { + .ext_idx = KVM_RISCV_SBI_EXT_FWFT, + .ext_ptr = &vcpu_sbi_ext_fwft, + }, { .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL, .ext_ptr = &vcpu_sbi_ext_experimental, diff --git a/arch/riscv/kvm/vcpu_sbi_fwft.c b/arch/riscv/kvm/vcpu_sbi_fwft.c new file mode 100644 index 000000000000..b0f66c7bf010 --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_fwft.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025 Rivos Inc. + * + * Authors: + * Clément Léger cleger@rivosinc.com + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <asm/cpufeature.h> +#include <asm/sbi.h> +#include <asm/kvm_vcpu_sbi.h> +#include <asm/kvm_vcpu_sbi_fwft.h> + +struct kvm_sbi_fwft_feature { + /** + * @id: Feature ID + */ + enum sbi_fwft_feature_t id; + + /** + * @supported: Check if the feature is supported on the vcpu + * + * This callback is optional, if not provided the feature is assumed to + * be supported + */ + bool (*supported)(struct kvm_vcpu *vcpu); + + /** + * @set: Set the feature value + * + * Return SBI_SUCCESS on success or an SBI error (SBI_ERR_*) + * + * This callback is mandatory + */ + long (*set)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf, unsigned long value); + + /** + * @get: Get the feature current value + * + * Return SBI_SUCCESS on success or an SBI error (SBI_ERR_*) + * + * This callback is mandatory + */ + long (*get)(struct kvm_vcpu *vcpu, struct kvm_sbi_fwft_config *conf, unsigned long *value); +}; + +static const enum sbi_fwft_feature_t kvm_fwft_defined_features[] = { + SBI_FWFT_MISALIGNED_EXC_DELEG, + SBI_FWFT_LANDING_PAD, + SBI_FWFT_SHADOW_STACK, + SBI_FWFT_DOUBLE_TRAP, + SBI_FWFT_PTE_AD_HW_UPDATING, + SBI_FWFT_POINTER_MASKING_PMLEN, +}; + +static bool kvm_fwft_is_defined_feature(enum sbi_fwft_feature_t feature) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kvm_fwft_defined_features); i++) { + if (kvm_fwft_defined_features[i] == feature) + return true; + } + + return false; +} + +static const struct kvm_sbi_fwft_feature features[] = { +}; + +static struct kvm_sbi_fwft_config * +kvm_sbi_fwft_get_config(struct kvm_vcpu *vcpu, enum sbi_fwft_feature_t feature) +{ + int i; + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + + for (i = 0; i < ARRAY_SIZE(features); i++) { + if (fwft->configs[i].feature->id == feature) + return &fwft->configs[i]; + } + + return NULL; +} + +static int kvm_fwft_get_feature(struct kvm_vcpu *vcpu, u32 feature, + struct kvm_sbi_fwft_config **conf) +{ + struct kvm_sbi_fwft_config *tconf; + + tconf = kvm_sbi_fwft_get_config(vcpu, feature); + if (!tconf) { + if (kvm_fwft_is_defined_feature(feature)) + return SBI_ERR_NOT_SUPPORTED; + + return SBI_ERR_DENIED; + } + + if (!tconf->supported) + return SBI_ERR_NOT_SUPPORTED; + + *conf = tconf; + + return SBI_SUCCESS; +} + +static int kvm_sbi_fwft_set(struct kvm_vcpu *vcpu, u32 feature, + unsigned long value, unsigned long flags) +{ + int ret; + struct kvm_sbi_fwft_config *conf; + + ret = kvm_fwft_get_feature(vcpu, feature, &conf); + if (ret) + return ret; + + if ((flags & ~SBI_FWFT_SET_FLAG_LOCK) != 0) + return SBI_ERR_INVALID_PARAM; + + if (conf->flags & SBI_FWFT_SET_FLAG_LOCK) + return SBI_ERR_DENIED_LOCKED; + + conf->flags = flags; + + return conf->feature->set(vcpu, conf, value); +} + +static int kvm_sbi_fwft_get(struct kvm_vcpu *vcpu, unsigned long feature, + unsigned long *value) +{ + int ret; + struct kvm_sbi_fwft_config *conf; + + ret = kvm_fwft_get_feature(vcpu, feature, &conf); + if (ret) + return ret; + + return conf->feature->get(vcpu, conf, value); +} + +static int kvm_sbi_ext_fwft_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_vcpu_sbi_return *retdata) +{ + int ret; + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long funcid = cp->a6; + + switch (funcid) { + case SBI_EXT_FWFT_SET: + ret = kvm_sbi_fwft_set(vcpu, cp->a0, cp->a1, cp->a2); + break; + case SBI_EXT_FWFT_GET: + ret = kvm_sbi_fwft_get(vcpu, cp->a0, &retdata->out_val); + break; + default: + ret = SBI_ERR_NOT_SUPPORTED; + break; + } + + retdata->err_val = ret; + + return 0; +} + +static int kvm_sbi_ext_fwft_init(struct kvm_vcpu *vcpu) +{ + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + const struct kvm_sbi_fwft_feature *feature; + struct kvm_sbi_fwft_config *conf; + int i; + + fwft->configs = kcalloc(ARRAY_SIZE(features), sizeof(struct kvm_sbi_fwft_config), + GFP_KERNEL); + if (!fwft->configs) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(features); i++) { + feature = &features[i]; + conf = &fwft->configs[i]; + if (feature->supported) + conf->supported = feature->supported(vcpu); + else + conf->supported = true; + + conf->feature = feature; + } + + return 0; +} + +static void kvm_sbi_ext_fwft_deinit(struct kvm_vcpu *vcpu) +{ + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + + kfree(fwft->configs); +} + +static void kvm_sbi_ext_fwft_reset(struct kvm_vcpu *vcpu) +{ + int i; + struct kvm_sbi_fwft *fwft = vcpu_to_fwft(vcpu); + + for (i = 0; i < ARRAY_SIZE(features); i++) + fwft->configs[i].flags = 0; +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_fwft = { + .extid_start = SBI_EXT_FWFT, + .extid_end = SBI_EXT_FWFT, + .handler = kvm_sbi_ext_fwft_handler, + .init = kvm_sbi_ext_fwft_init, + .deinit = kvm_sbi_ext_fwft_deinit, + .reset = kvm_sbi_ext_fwft_reset, +};
SBI_FWFT_MISALIGNED_DELEG needs hedeleg to be modified to delegate misaligned load/store exceptions. Save and restore it during CPU load/put.
Signed-off-by: Clément Léger cleger@rivosinc.com Reviewed-by: Deepak Gupta debug@rivosinc.com Reviewed-by: Andrew Jones ajones@ventanamicro.com --- arch/riscv/kvm/vcpu.c | 3 +++ arch/riscv/kvm/vcpu_sbi_fwft.c | 36 ++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+)
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 542747e2c7f5..d98e379945c3 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -646,6 +646,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { void *nsh; struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; + struct kvm_vcpu_config *cfg = &vcpu->arch.cfg;
vcpu->cpu = -1;
@@ -671,6 +672,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) csr->vstval = nacl_csr_read(nsh, CSR_VSTVAL); csr->hvip = nacl_csr_read(nsh, CSR_HVIP); csr->vsatp = nacl_csr_read(nsh, CSR_VSATP); + cfg->hedeleg = nacl_csr_read(nsh, CSR_HEDELEG); } else { csr->vsstatus = csr_read(CSR_VSSTATUS); csr->vsie = csr_read(CSR_VSIE); @@ -681,6 +683,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) csr->vstval = csr_read(CSR_VSTVAL); csr->hvip = csr_read(CSR_HVIP); csr->vsatp = csr_read(CSR_VSATP); + cfg->hedeleg = csr_read(CSR_HEDELEG); } }
diff --git a/arch/riscv/kvm/vcpu_sbi_fwft.c b/arch/riscv/kvm/vcpu_sbi_fwft.c index b0f66c7bf010..d16ee477042f 100644 --- a/arch/riscv/kvm/vcpu_sbi_fwft.c +++ b/arch/riscv/kvm/vcpu_sbi_fwft.c @@ -14,6 +14,8 @@ #include <asm/kvm_vcpu_sbi.h> #include <asm/kvm_vcpu_sbi_fwft.h>
+#define MIS_DELEG (BIT_ULL(EXC_LOAD_MISALIGNED) | BIT_ULL(EXC_STORE_MISALIGNED)) + struct kvm_sbi_fwft_feature { /** * @id: Feature ID @@ -68,7 +70,41 @@ static bool kvm_fwft_is_defined_feature(enum sbi_fwft_feature_t feature) return false; }
+static bool kvm_sbi_fwft_misaligned_delegation_supported(struct kvm_vcpu *vcpu) +{ + return misaligned_traps_can_delegate(); +} + +static long kvm_sbi_fwft_set_misaligned_delegation(struct kvm_vcpu *vcpu, + struct kvm_sbi_fwft_config *conf, + unsigned long value) +{ + if (value == 1) + csr_set(CSR_HEDELEG, MIS_DELEG); + else if (value == 0) + csr_clear(CSR_HEDELEG, MIS_DELEG); + else + return SBI_ERR_INVALID_PARAM; + + return SBI_SUCCESS; +} + +static long kvm_sbi_fwft_get_misaligned_delegation(struct kvm_vcpu *vcpu, + struct kvm_sbi_fwft_config *conf, + unsigned long *value) +{ + *value = (csr_read(CSR_HEDELEG) & MIS_DELEG) == MIS_DELEG; + + return SBI_SUCCESS; +} + static const struct kvm_sbi_fwft_feature features[] = { + { + .id = SBI_FWFT_MISALIGNED_EXC_DELEG, + .supported = kvm_sbi_fwft_misaligned_delegation_supported, + .set = kvm_sbi_fwft_set_misaligned_delegation, + .get = kvm_sbi_fwft_get_misaligned_delegation, + }, };
static struct kvm_sbi_fwft_config *
linux-kselftest-mirror@lists.linaro.org