This is a backport of the remaining patches from the below series: https://lore.kernel.org/all/cover.1633464148.git.naveen.n.rao@linux.vnet.ibm...
Kindly apply to the longterm tree for v5.10
Thanks, Naveen
Naveen N. Rao (4): powerpc/lib: Add helper to check if offset is within conditional branch range powerpc/bpf: Validate branch ranges powerpc/security: Add a helper to query stf_barrier type powerpc/bpf: Emit stf barrier instruction sequences for BPF_NOSPEC
arch/powerpc/include/asm/code-patching.h | 1 + arch/powerpc/include/asm/security_features.h | 5 ++ arch/powerpc/kernel/security.c | 5 ++ arch/powerpc/lib/code-patching.c | 7 ++- arch/powerpc/net/bpf_jit.h | 33 ++++++---- arch/powerpc/net/bpf_jit64.h | 8 +-- arch/powerpc/net/bpf_jit_comp64.c | 64 ++++++++++++++++++-- 7 files changed, 100 insertions(+), 23 deletions(-)
base-commit: 5040520482a594e92d4f69141229a6dd26173511
upstream commit 4549c3ea3160fa8b3f37dfe2f957657bb265eda9
Add a helper to check if a given offset is within the branch range for a powerpc conditional branch instruction, and update some sites to use the new helper.
Signed-off-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com Reviewed-by: Christophe Leroy christophe.leroy@csgroup.eu Acked-by: Song Liu songliubraving@fb.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/442b69a34ced32ca346a0d9a855f3f6cfdbbbd41.163346414... Signed-off-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/include/asm/code-patching.h | 1 + arch/powerpc/lib/code-patching.c | 7 ++++++- arch/powerpc/net/bpf_jit.h | 7 +------ 3 files changed, 8 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h index d5b3c3bb95b400..fa8746c320067a 100644 --- a/arch/powerpc/include/asm/code-patching.h +++ b/arch/powerpc/include/asm/code-patching.h @@ -23,6 +23,7 @@ #define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset); +bool is_offset_in_cond_branch_range(long offset); int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr, unsigned long target, int flags); int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr, diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index 2333625b5e3150..a2e4f864b63d2e 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -230,6 +230,11 @@ bool is_offset_in_branch_range(long offset) return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3)); }
+bool is_offset_in_cond_branch_range(long offset) +{ + return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3); +} + /* * Helper to check if a given instruction is a conditional branch * Derived from the conditional checks in analyse_instr() @@ -283,7 +288,7 @@ int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr, offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */ - if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3) + if (!is_offset_in_cond_branch_range(offset)) return 1;
/* Mask out the flags and target, so they don't step on each other. */ diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index d0a67a1bbaf188..8f88b3d43f0a59 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -71,11 +71,6 @@ #define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0) #endif
-static inline bool is_nearbranch(int offset) -{ - return (offset < 32768) && (offset >= -32768); -} - /* * The fly in the ointment of code size changing from pass to pass is * avoided by padding the short branch case with a NOP. If code size differs @@ -84,7 +79,7 @@ static inline bool is_nearbranch(int offset) * state. */ #define PPC_BCC(cond, dest) do { \ - if (is_nearbranch((dest) - (ctx->idx * 4))) { \ + if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \ PPC_BCC_SHORT(cond, dest); \ EMIT(PPC_RAW_NOP()); \ } else { \
upstream commit 3832ba4e283d7052b783dab8311df7e3590fed93
Add checks to ensure that we never emit branch instructions with truncated branch offsets.
Suggested-by: Michael Ellerman mpe@ellerman.id.au Signed-off-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com Tested-by: Johan Almbladh johan.almbladh@anyfinetworks.com Reviewed-by: Christophe Leroy christophe.leroy@csgroup.eu Acked-by: Song Liu songliubraving@fb.com Acked-by: Johan Almbladh johan.almbladh@anyfinetworks.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/71d33a6b7603ec1013c9734dd8bdd4ff5e929142.163346414... [drop ppc32 changes] Signed-off-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit.h | 26 ++++++++++++++++++++------ arch/powerpc/net/bpf_jit_comp64.c | 8 ++++++-- 2 files changed, 26 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 8f88b3d43f0a59..1a5b4da8a23555 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -12,6 +12,7 @@
#include <asm/types.h> #include <asm/ppc-opcode.h> +#include <asm/code-patching.h>
#ifdef PPC64_ELF_ABI_v1 #define FUNCTION_DESCR_SIZE 24 @@ -24,13 +25,26 @@ #define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
/* Long jump; (unconditional 'branch') */ -#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \ - (((dest) - (ctx->idx * 4)) & 0x03fffffc)) +#define PPC_JMP(dest) \ + do { \ + long offset = (long)(dest) - (ctx->idx * 4); \ + if (!is_offset_in_branch_range(offset)) { \ + pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ + return -ERANGE; \ + } \ + EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \ + } while (0) /* "cond" here covers BO:BI fields. */ -#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \ - (((cond) & 0x3ff) << 16) | \ - (((dest) - (ctx->idx * 4)) & \ - 0xfffc)) +#define PPC_BCC_SHORT(cond, dest) \ + do { \ + long offset = (long)(dest) - (ctx->idx * 4); \ + if (!is_offset_in_cond_branch_range(offset)) { \ + pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \ + return -ERANGE; \ + } \ + EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \ + } while (0) + /* Sign-extended 32-bit immediate load */ #define PPC_LI32(d, i) do { \ if ((int)(uintptr_t)(i) >= -32768 && \ diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index a2750d6ffd0f54..803d95af275534 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -224,7 +224,7 @@ static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, EMIT(PPC_RAW_BLRL()); }
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) +static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out) { /* * By now, the eBPF program has already setup parameters in r3, r4 and r5 @@ -285,7 +285,9 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 bpf_jit_emit_common_epilogue(image, ctx);
EMIT(PPC_RAW_BCTR()); + /* out: */ + return 0; }
/* Assemble the body code between the prologue & epilogue */ @@ -1010,7 +1012,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, */ case BPF_JMP | BPF_TAIL_CALL: ctx->seen |= SEEN_TAILCALL; - bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]); + if (ret < 0) + return ret; break;
default:
upstream commit 030905920f32e91a52794937f67434ac0b3ea41a
Add a helper to return the stf_barrier type for the current processor.
Signed-off-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/3bd5d7f96ea1547991ac2ce3137dc2b220bae285.163346414... Signed-off-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/include/asm/security_features.h | 5 +++++ arch/powerpc/kernel/security.c | 5 +++++ 2 files changed, 10 insertions(+)
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h index b774a4477d5f1a..e380acc6e41327 100644 --- a/arch/powerpc/include/asm/security_features.h +++ b/arch/powerpc/include/asm/security_features.h @@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature) return !!(powerpc_security_features & feature); }
+#ifdef CONFIG_PPC_BOOK3S_64 +enum stf_barrier_type stf_barrier_type_get(void); +#else +static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; } +#endif
// Features indicating support for Spectre/Meltdown mitigations
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c index e4e1a94ccf6a6f..3f510c911b1076 100644 --- a/arch/powerpc/kernel/security.c +++ b/arch/powerpc/kernel/security.c @@ -261,6 +261,11 @@ static int __init handle_no_stf_barrier(char *p)
early_param("no_stf_barrier", handle_no_stf_barrier);
+enum stf_barrier_type stf_barrier_type_get(void) +{ + return stf_enabled_flush_types; +} + /* This is the generic flag used by other architectures */ static int __init handle_ssbd(char *p) {
upstream commit b7540d62509453263604a155bf2d5f0ed450cba2
Emit similar instruction sequences to commit a048a07d7f4535 ("powerpc/64s: Add support for a store forwarding barrier at kernel entry/exit") when encountering BPF_NOSPEC.
Mitigations are enabled depending on what the firmware advertises. In particular, we do not gate these mitigations based on current settings, just like in x86. Due to this, we don't need to take any action if mitigations are enabled or disabled at runtime.
Signed-off-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com Signed-off-by: Michael Ellerman mpe@ellerman.id.au Link: https://lore.kernel.org/r/956570cbc191cd41f8274bed48ee757a86dac62a.163346414... [adjust macros to account for commits 1c9debbc2eb539 and ef909ba954145e. adjust security feature checks to account for commit 84ed26fd00c514] Signed-off-by: Naveen N. Rao naveen.n.rao@linux.vnet.ibm.com --- arch/powerpc/net/bpf_jit64.h | 8 ++--- arch/powerpc/net/bpf_jit_comp64.c | 56 ++++++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h index 2e33c6673ff957..4d164e865b392e 100644 --- a/arch/powerpc/net/bpf_jit64.h +++ b/arch/powerpc/net/bpf_jit64.h @@ -16,18 +16,18 @@ * with our redzone usage. * * [ prev sp ] <------------- - * [ nv gpr save area ] 6*8 | + * [ nv gpr save area ] 5*8 | * [ tail_call_cnt ] 8 | - * [ local_tmp_var ] 8 | + * [ local_tmp_var ] 16 | * fp (r31) --> [ ebpf stack space ] upto 512 | * [ frame header ] 32/112 | * sp (r1) ---> [ stack pointer ] -------------- */
/* for gpr non volatile registers BPG_REG_6 to 10 */ -#define BPF_PPC_STACK_SAVE (6*8) +#define BPF_PPC_STACK_SAVE (5*8) /* for bpf JIT code internal usage */ -#define BPF_PPC_STACK_LOCALS 16 +#define BPF_PPC_STACK_LOCALS 24 /* stack frame excluding BPF stack, ensure this is quadword aligned */ #define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \ BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE) diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 803d95af275534..8936090acb5796 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -15,6 +15,7 @@ #include <linux/if_vlan.h> #include <asm/kprobes.h> #include <linux/bpf.h> +#include <asm/security_features.h>
#include "bpf_jit64.h"
@@ -56,9 +57,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx) * [ prev sp ] <------------- * [ ... ] | * sp (r1) ---> [ stack pointer ] -------------- - * [ nv gpr save area ] 6*8 + * [ nv gpr save area ] 5*8 * [ tail_call_cnt ] 8 - * [ local_tmp_var ] 8 + * [ local_tmp_var ] 16 * [ unused red zone ] 208 bytes protected */ static int bpf_jit_stack_local(struct codegen_context *ctx) @@ -66,12 +67,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx) if (bpf_has_stack_frame(ctx)) return STACK_FRAME_MIN_SIZE + ctx->stack_size; else - return -(BPF_PPC_STACK_SAVE + 16); + return -(BPF_PPC_STACK_SAVE + 24); }
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx) { - return bpf_jit_stack_local(ctx) + 8; + return bpf_jit_stack_local(ctx) + 16; }
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg) @@ -290,11 +291,34 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o return 0; }
+/* + * We spill into the redzone always, even if the bpf program has its own stackframe. + * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local() + */ +void bpf_stf_barrier(void); + +asm ( +" .global bpf_stf_barrier ;" +" bpf_stf_barrier: ;" +" std 21,-64(1) ;" +" std 22,-56(1) ;" +" sync ;" +" ld 21,-64(1) ;" +" ld 22,-56(1) ;" +" ori 31,31,0 ;" +" .rept 14 ;" +" b 1f ;" +" 1: ;" +" .endr ;" +" blr ;" +); + /* Assemble the body code between the prologue & epilogue */ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx, u32 *addrs, bool extra_pass) { + enum stf_barrier_type stf_barrier = stf_barrier_type_get(); const struct bpf_insn *insn = fp->insnsi; int flen = fp->len; int i, ret; @@ -665,6 +689,30 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, * BPF_ST NOSPEC (speculation barrier) */ case BPF_ST | BPF_NOSPEC: + if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) || + (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) && + (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) || !cpu_has_feature(CPU_FTR_HVMODE)))) + break; + + switch (stf_barrier) { + case STF_BARRIER_EIEIO: + EMIT(0x7c0006ac | 0x02000000); + break; + case STF_BARRIER_SYNC_ORI: + EMIT(PPC_INST_SYNC); + EMIT(PPC_RAW_LD(b2p[TMP_REG_1], 13, 0)); + EMIT(PPC_RAW_ORI(31, 31, 0)); + break; + case STF_BARRIER_FALLBACK: + EMIT(PPC_INST_MFLR | ___PPC_RT(b2p[TMP_REG_1])); + PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier)); + EMIT(PPC_RAW_MTCTR(12)); + EMIT(PPC_INST_BCTR | 0x1); + EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1])); + break; + case STF_BARRIER_NONE: + break; + } break;
/*
On Mon, Nov 15, 2021 at 04:36:26PM +0530, Naveen N. Rao wrote:
This is a backport of the remaining patches from the below series: https://lore.kernel.org/all/cover.1633464148.git.naveen.n.rao@linux.vnet.ibm...
Kindly apply to the longterm tree for v5.10
All patches now queued up, thanks.
greg k-h
linux-stable-mirror@lists.linaro.org