diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/nospec-branch.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 17 | ||||
-rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 4 | ||||
-rw-r--r-- | arch/x86/lib/retpoline.S | 58 |
4 files changed, 63 insertions, 21 deletions
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 9a4ffd321d29..71899b8a86e7 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -216,9 +216,14 @@ extern void __x86_return_thunk(void); static inline void __x86_return_thunk(void) {} #endif +extern void zen_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + extern void zen_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_untrain_ret_alias(void); + extern void entry_ibpb(void); #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 666f5bed2d97..757c973531c5 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -166,8 +166,13 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); - gds_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ srso_select_mitigation(); + gds_select_mitigation(); } /* @@ -1015,6 +1020,9 @@ do_cmd_auto: setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); + if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = zen_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -2422,10 +2430,13 @@ static void __init srso_select_mitigation(void) */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); - if (boot_cpu_data.x86 == 0x19) + if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - else + x86_return_thunk = srso_alias_return_thunk; + } else { setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index e6939ebb606a..8af3ac61ee5f 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -507,8 +507,8 @@ INIT_PER_CPU(irq_stack_backing_store); "fixed_percpu_data is not at start of per-cpu area"); #endif - #ifdef CONFIG_RETHUNK -. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +#ifdef CONFIG_RETHUNK +. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 271f6560448d..a08a53b31447 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -94,22 +94,27 @@ SYM_CODE_END(__x86_indirect_thunk_array) .section .text.__x86.rethunk_untrain SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC ANNOTATE_NOENDBR ASM_NOP2 lfence - jmp __x86_return_thunk + jmp srso_alias_return_thunk SYM_FUNC_END(srso_untrain_ret_alias) __EXPORT_THUNK(srso_untrain_ret_alias) .section .text.__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_untrain_ret_alias) #endif -/* Needs a definition for the __x86_return_thunk alternative below. */ SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) -#ifdef CONFIG_CPU_SRSO add $8, %_ASM_SP UNWIND_HINT_FUNC -#endif ANNOTATE_UNRET_SAFE ret int3 @@ -117,9 +122,16 @@ SYM_FUNC_END(srso_safe_ret_alias) .section .text.__x86.return_thunk +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret_alias + ud2 +SYM_CODE_END(srso_alias_return_thunk) + /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. * 2) The instruction at zen_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. @@ -127,7 +139,7 @@ SYM_FUNC_END(srso_safe_ret_alias) * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__ret - zen_untrain_ret), 0xcc + .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR /* @@ -135,16 +147,16 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP zen_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * zen_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from zen_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -156,13 +168,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, zen_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) +SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__ret) +SYM_CODE_END(zen_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -173,7 +185,7 @@ SYM_CODE_END(__ret) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __ret + jmp zen_return_thunk int3 SYM_FUNC_END(zen_untrain_ret) __EXPORT_THUNK(zen_untrain_ret) @@ -194,12 +206,19 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_NOENDBR .byte 0x48, 0xb8 +/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) add $8, %_ASM_SP ret int3 int3 int3 + /* end of movabs */ lfence call srso_safe_ret ud2 @@ -207,12 +226,19 @@ SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_CODE_START(__x86_return_thunk) +SYM_CODE_START(srso_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS + call srso_safe_ret ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret + int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) |