From: "Borislav Petkov (AMD)" bp@alien8.de
commit d2408e043e7296017420aa5929b3bba4d5e61013 upstream.
Instead of decoding each instruction in the return sites range only to realize that that return site is a jump to the default return thunk which is needed - X86_FEATURE_RETHUNK is enabled - lift that check before the loop and get rid of that loop overhead.
Add comments about what gets patched, while at it.
Signed-off-by: Borislav Petkov (AMD) bp@alien8.de Acked-by: Peter Zijlstra (Intel) peterz@infradead.org Link: https://lore.kernel.org/r/20230512120952.7924-1-bp@alien8.de Signed-off-by: Pawan Gupta pawan.kumar.gupta@linux.intel.com --- arch/x86/kernel/alternative.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 44bff34f7d10cb6868ad079ca1cb87e458d3f91b..dfd5490df0af9e55d1a6ab185ad7cb03bdda4a91 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -769,13 +769,12 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) { int i = 0;
+ /* Patch the custom return thunks... */ if (cpu_wants_rethunk_at(addr)) { - if (x86_return_thunk == __x86_return_thunk) - return -1; - i = JMP32_INSN_SIZE; __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); } else { + /* ... or patch them out if not needed. */ bytes[i++] = RET_INSN_OPCODE; }
@@ -788,6 +787,14 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) { s32 *s;
+ /* + * Do not patch out the default return thunks if those needed are the + * ones generated by the compiler. + */ + if (cpu_feature_enabled(X86_FEATURE_RETHUNK) && + (x86_return_thunk == __x86_return_thunk)) + return; + for (s = start; s < end; s++) { void *dest = NULL, *addr = (void *)s + *s; struct insn insn;