kernel/x86_64: Always lfence after swapgs.

This works around "yet another" Intel CPU bug. AMD is unaffected.
Linux chose to use an altcodepatch for Intel only; FreeBSD did
a bunch of benchmarks and determined the effect this had (if any)
was so far out in the decimal places that just adding it unconditionally
was the easier and simplest solution.

(FreeBSD commit: https://github.com/freebsd/freebsd/commit/f4038696064b86260)

Fixes #15236.
This commit is contained in:
Augustin Cavalier 2019-08-14 17:54:40 -04:00
parent 8c6b1519a2
commit 84f6e2d39f
2 changed files with 10 additions and 0 deletions

View File

@ -152,6 +152,7 @@ FUNCTION_END(x86_64_syscall32_entry)
// ecx - user esp
FUNCTION(x86_64_sysenter32_entry):
swapgs
lfence
// Set up an iframe on the stack (ECX = saved ESP).
push $USER_DATA_SELECTOR // ss
@ -314,6 +315,7 @@ FUNCTION(x86_64_sysenter32_entry):
// Restore previous GS base and return.
swapgs
lfence
sti
sysexit

View File

@ -244,6 +244,7 @@ FUNCTION_END(int_bottom)
STATIC_FUNCTION(int_bottom_user):
// Load the kernel GS segment base.
swapgs
lfence
// Push the rest of the interrupt frame to the stack.
PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
@ -293,6 +294,7 @@ STATIC_FUNCTION(int_bottom_user):
// Restore the previous GS base and return.
swapgs
lfence
iretq
.Lkernel_exit_work:
@ -321,6 +323,7 @@ STATIC_FUNCTION(int_bottom_user):
// Restore the previous GS base and return.
swapgs
lfence
iretq
.Lkernel_exit_handle_signals:
@ -340,6 +343,7 @@ FUNCTION(x86_64_syscall_entry):
// scratch space to store the user stack pointer in before we can push it
// to the stack.
swapgs
lfence
movq %rsp, %gs:ARCH_THREAD_user_rsp
movq %gs:ARCH_THREAD_syscall_rsp, %rsp
@ -452,6 +456,7 @@ FUNCTION(x86_64_syscall_entry):
// Restore previous GS base and return.
swapgs
lfence
sysretq
.Lpre_syscall_debug:
@ -525,6 +530,7 @@ FUNCTION(x86_64_syscall_entry):
// Restore the previous GS base and return.
swapgs
lfence
iretq
.Lpost_syscall_handle_signals:
@ -601,6 +607,7 @@ FUNCTION(x86_return_to_userland):
// Restore the frame and return.
RESTORE_IFRAME()
swapgs
lfence
iretq
.Luserland_return_work:
// Slow path for return to userland.
@ -625,6 +632,7 @@ FUNCTION(x86_return_to_userland):
// Restore the previous GS base and return.
swapgs
lfence
iretq
.Luserland_return_handle_signals:
// thread_at_kernel_exit requires interrupts to be enabled, it will disable