Upgrading to GitLab 11.11.0.

Commit 975e4433 authored by Ben Hutchings's avatar Ben Hutchings

[x86] Add support for disabling Speculative Store Bypass (CVE-2018-3639)

Apply all the SSB-related patches pending for 4.16-stable.
parent baa5254a
......@@ -375,6 +375,58 @@ linux (4.16.10-1) UNRELEASED; urgency=medium
* kbuild: use -fmacro-prefix-map to make __FILE__ a relative path
* Bump ABI to 2
* [rt] Update to 4.16.8-rt3
* [x86] Add support for disabling Speculative Store Bypass (CVE-2018-3639):
- x86/nospec: Simplify alternative_msr_write()
- x86/bugs: Concentrate bug detection into a separate function
- x86/bugs: Concentrate bug reporting into a separate function
- x86/bugs: Read SPEC_CTRL MSR during boot and re-use reserved bits
- x86/bugs, KVM: Support the combination of guest and host IBRS
- x86/bugs: Expose /sys/../spec_store_bypass
- x86/cpufeatures: Add X86_FEATURE_RDS
- x86/bugs: Provide boot parameters for the spec_store_bypass_disable
mitigation
- x86/bugs/intel: Set proper CPU features and setup RDS
- x86/bugs: Whitelist allowed SPEC_CTRL MSR values
- x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested
- x86/KVM/VMX: Expose SPEC_CTRL Bit(2) to the guest
- x86/speculation: Create spec-ctrl.h to avoid include hell
- prctl: Add speculation control prctls
- x86/process: Allow runtime control of Speculative Store Bypass
- x86/speculation: Add prctl for Speculative Store Bypass mitigation
- nospec: Allow getting/setting on non-current task
- proc: Provide details on speculation flaw mitigations
- seccomp: Enable speculation flaw mitigations
- x86/bugs: Make boot modes __ro_after_init
- prctl: Add force disable speculation
- seccomp: Use PR_SPEC_FORCE_DISABLE
- seccomp: Add filter flag to opt-out of SSB mitigation
- seccomp: Move speculation migitation control to arch code
- x86/speculation: Make "seccomp" the default mode for Speculative Store
Bypass
- x86/bugs: Rename _RDS to _SSBD
- proc: Use underscores for SSBD in 'status'
- Documentation/spec_ctrl: Do some minor cleanups
- x86/bugs: Fix __ssb_select_mitigation() return type
- x86/bugs: Make cpu_show_common() static
- x86/bugs: Fix the parameters alignment and missing void
- x86/cpu: Make alternative_msr_write work for 32-bit code
- KVM: SVM: Move spec control call after restore of GS
- x86/speculation: Use synthetic bits for IBRS/IBPB/STIBP
- x86/cpufeatures: Disentangle MSR_SPEC_CTRL enumeration from IBRS
- x86/cpufeatures: Disentangle SSBD enumeration
- x86/cpufeatures: Add FEATURE_ZEN
- x86/speculation: Handle HT correctly on AMD
- x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL
- x86/speculation: Add virtualized speculative store bypass disable support
- x86/speculation: Rework speculative_store_bypass_update()
- x86/bugs: Unify x86_spec_ctrl_{set_guest,restore_host}
- x86/bugs: Expose x86_spec_ctrl_base directly
- x86/bugs: Remove x86_spec_ctrl_set()
- x86/bugs: Rework spec_ctrl base and mask logic
- x86/speculation, KVM: Implement support for VIRT_SPEC_CTRL/LS_CFG
- KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
- x86/bugs: Rename SSBD_NO to SSB_NO
- bpf: Prevent memory disambiguation attack
[ Salvatore Bonaccorso ]
* [rt] Update to 4.16.7-rt1 and reenable
From foo@baz Mon May 21 21:56:07 CEST 2018
From: Alexei Starovoitov <ast@kernel.org>
Date: Tue, 15 May 2018 09:27:05 -0700
Subject: bpf: Prevent memory disambiguation attack
From: Alexei Starovoitov <ast@kernel.org>
commit af86ca4e3088fe5eacf2f7e58c01fa68ca067672 upstream
Detect code patterns where malicious 'speculative store bypass' can be used
and sanitize such patterns.
39: (bf) r3 = r10
40: (07) r3 += -216
41: (79) r8 = *(u64 *)(r7 +0) // slow read
42: (7a) *(u64 *)(r10 -72) = 0 // verifier inserts this instruction
43: (7b) *(u64 *)(r8 +0) = r3 // this store becomes slow due to r8
44: (79) r1 = *(u64 *)(r6 +0) // cpu speculatively executes this load
45: (71) r2 = *(u8 *)(r1 +0) // speculatively arbitrary 'load byte'
// is now sanitized
Above code after x86 JIT becomes:
e5: mov %rbp,%rdx
e8: add $0xffffffffffffff28,%rdx
ef: mov 0x0(%r13),%r14
f3: movq $0x0,-0x48(%rbp)
fb: mov %rdx,0x0(%r14)
ff: mov 0x0(%rbx),%rdi
103: movzbq 0x0(%rdi),%rsi
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
include/linux/bpf_verifier.h | 1
kernel/bpf/verifier.c | 59 ++++++++++++++++++++++++++++++++++++++++---
2 files changed, 57 insertions(+), 3 deletions(-)
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -146,6 +146,7 @@ struct bpf_insn_aux_data {
s32 call_imm; /* saved imm field of call insn */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
+ int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
};
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -970,7 +970,7 @@ static bool register_is_null(struct bpf_
*/
static int check_stack_write(struct bpf_verifier_env *env,
struct bpf_func_state *state, /* func where register points to */
- int off, int size, int value_regno)
+ int off, int size, int value_regno, int insn_idx)
{
struct bpf_func_state *cur; /* state of the current function */
int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
@@ -1009,8 +1009,33 @@ static int check_stack_write(struct bpf_
state->stack[spi].spilled_ptr = cur->regs[value_regno];
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
- for (i = 0; i < BPF_REG_SIZE; i++)
+ for (i = 0; i < BPF_REG_SIZE; i++) {
+ if (state->stack[spi].slot_type[i] == STACK_MISC &&
+ !env->allow_ptr_leaks) {
+ int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
+ int soff = (-spi - 1) * BPF_REG_SIZE;
+
+ /* detected reuse of integer stack slot with a pointer
+ * which means either llvm is reusing stack slot or
+ * an attacker is trying to exploit CVE-2018-3639
+ * (speculative store bypass)
+ * Have to sanitize that slot with preemptive
+ * store of zero.
+ */
+ if (*poff && *poff != soff) {
+ /* disallow programs where single insn stores
+ * into two different stack slots, since verifier
+ * cannot sanitize them
+ */
+ verbose(env,
+ "insn %d cannot access two stack slots fp%d and fp%d",
+ insn_idx, *poff, soff);
+ return -EINVAL;
+ }
+ *poff = soff;
+ }
state->stack[spi].slot_type[i] = STACK_SPILL;
+ }
} else {
u8 type = STACK_MISC;
@@ -1685,7 +1710,7 @@ static int check_mem_access(struct bpf_v
if (t == BPF_WRITE)
err = check_stack_write(env, state, off, size,
- value_regno);
+ value_regno, insn_idx);
else
err = check_stack_read(env, state, off, size,
value_regno);
@@ -5156,6 +5181,34 @@ static int convert_ctx_accesses(struct b
else
continue;
+ if (type == BPF_WRITE &&
+ env->insn_aux_data[i + delta].sanitize_stack_off) {
+ struct bpf_insn patch[] = {
+ /* Sanitize suspicious stack slot with zero.
+ * There are no memory dependencies for this store,
+ * since it's only using frame pointer and immediate
+ * constant of zero
+ */
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP,
+ env->insn_aux_data[i + delta].sanitize_stack_off,
+ 0),
+ /* the original STX instruction will immediately
+ * overwrite the same stack slot with appropriate value
+ */
+ *insn,
+ };
+
+ cnt = ARRAY_SIZE(patch);
+ new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ continue;
+ }
+
if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
continue;
From foo@baz Mon May 21 21:56:07 CEST 2018
From: Borislav Petkov <bp@suse.de>
Date: Tue, 8 May 2018 15:43:45 +0200
Subject: Documentation/spec_ctrl: Do some minor cleanups
From: Borislav Petkov <bp@suse.de>
commit dd0792699c4058e63c0715d9a7c2d40226fcdddc upstream
Fix some typos, improve formulations, end sentences with a fullstop.
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
Documentation/userspace-api/spec_ctrl.rst | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
--- a/Documentation/userspace-api/spec_ctrl.rst
+++ b/Documentation/userspace-api/spec_ctrl.rst
@@ -2,13 +2,13 @@
Speculation Control
===================
-Quite some CPUs have speculation related misfeatures which are in fact
-vulnerabilites causing data leaks in various forms even accross privilege
-domains.
+Quite some CPUs have speculation-related misfeatures which are in
+fact vulnerabilities causing data leaks in various forms even across
+privilege domains.
The kernel provides mitigation for such vulnerabilities in various
-forms. Some of these mitigations are compile time configurable and some on
-the kernel command line.
+forms. Some of these mitigations are compile-time configurable and some
+can be supplied on the kernel command line.
There is also a class of mitigations which are very expensive, but they can
be restricted to a certain set of processes or tasks in controlled
@@ -32,18 +32,18 @@ the following meaning:
Bit Define Description
==== ===================== ===================================================
0 PR_SPEC_PRCTL Mitigation can be controlled per task by
- PR_SET_SPECULATION_CTRL
+ PR_SET_SPECULATION_CTRL.
1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
- disabled
+ disabled.
2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
- enabled
+ enabled.
3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
subsequent prctl(..., PR_SPEC_ENABLE) will fail.
==== ===================== ===================================================
If all bits are 0 the CPU is not affected by the speculation misfeature.
-If PR_SPEC_PRCTL is set, then the per task control of the mitigation is
+If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
misfeature will fail.
@@ -61,9 +61,9 @@ Common error codes
Value Meaning
======= =================================================================
EINVAL The prctl is not implemented by the architecture or unused
- prctl(2) arguments are not 0
+ prctl(2) arguments are not 0.
-ENODEV arg2 is selecting a not supported speculation misfeature
+ENODEV arg2 is selecting a not supported speculation misfeature.
======= =================================================================
PR_SET_SPECULATION_CTRL error codes
@@ -74,7 +74,7 @@ Value Meaning
0 Success
ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
- PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE
+ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
ENXIO Control of the selected speculation misfeature is not possible.
See PR_GET_SPECULATION_CTRL.
From foo@baz Mon May 21 21:56:07 CEST 2018
From: Tom Lendacky <thomas.lendacky@amd.com>
Date: Thu, 10 May 2018 22:06:39 +0200
Subject: KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD
From: Tom Lendacky <thomas.lendacky@amd.com>
commit bc226f07dcd3c9ef0b7f6236fe356ea4a9cb4769 upstream
Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
speculative store bypass disable (SSBD) under SVM. This will allow guests
to use SSBD on hardware that uses non-architectural mechanisms for enabling
SSBD.
[ tglx: Folded the migration fixup from Paolo Bonzini ]
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kernel/cpu/common.c | 3 ++-
arch/x86/kvm/cpuid.c | 11 +++++++++--
arch/x86/kvm/svm.c | 21 +++++++++++++++++++--
arch/x86/kvm/vmx.c | 18 +++++++++++++++---
arch/x86/kvm/x86.c | 13 ++++---------
6 files changed, 50 insertions(+), 18 deletions(-)
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -933,7 +933,7 @@ struct kvm_x86_ops {
int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void);
- bool (*cpu_has_high_real_mode_segbase)(void);
+ bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
int (*vm_init)(struct kvm *kvm);
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -767,7 +767,8 @@ static void init_speculation_control(str
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
set_cpu_cap(c, X86_FEATURE_STIBP);
- if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD))
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+ cpu_has(c, X86_FEATURE_VIRT_SSBD))
set_cpu_cap(c, X86_FEATURE_SSBD);
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -374,7 +374,7 @@ static inline int __do_cpuid_ent(struct
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
- F(AMD_IBPB) | F(AMD_IBRS);
+ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
/* cpuid 0xC0000001.edx */
const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -642,13 +642,20 @@ static inline int __do_cpuid_ent(struct
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
entry->edx = 0;
- /* IBRS and IBPB aren't necessarily present in hardware cpuid */
+ /*
+ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+ * hardware cpuid
+ */
if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
entry->ebx |= F(AMD_IBPB);
if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
entry->ebx |= F(AMD_IBRS);
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+ entry->ebx |= F(VIRT_SSBD);
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ entry->ebx |= F(VIRT_SSBD);
break;
}
case 0x80000019:
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3971,6 +3971,13 @@ static int svm_get_msr(struct kvm_vcpu *
msr_info->data = svm->spec_ctrl;
break;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+ return 1;
+
+ msr_info->data = svm->virt_spec_ctrl;
+ break;
case MSR_F15H_IC_CFG: {
int family, model;
@@ -4105,6 +4112,16 @@ static int svm_set_msr(struct kvm_vcpu *
break;
set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
break;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
+ return 1;
+
+ if (data & ~SPEC_CTRL_SSBD)
+ return 1;
+
+ svm->virt_spec_ctrl = data;
+ break;
case MSR_STAR:
svm->vmcb->save.star = data;
break;
@@ -5635,7 +5652,7 @@ static bool svm_cpu_has_accelerated_tpr(
return false;
}
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
{
return true;
}
@@ -6859,7 +6876,7 @@ static struct kvm_x86_ops svm_x86_ops __
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+ .has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu,
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -9223,9 +9223,21 @@ static void vmx_handle_external_intr(str
}
STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
{
- return enable_unrestricted_guest || emulate_invalid_guest_state;
+ switch (index) {
+ case MSR_IA32_SMBASE:
+ /*
+ * We cannot do SMM unless we can run the guest in big
+ * real mode.
+ */
+ return enable_unrestricted_guest || emulate_invalid_guest_state;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ /* This is AMD only. */
+ return false;
+ default:
+ return true;
+ }
}
static bool vmx_mpx_supported(void)
@@ -12295,7 +12307,7 @@ static struct kvm_x86_ops vmx_x86_ops __
.hardware_enable = hardware_enable,
.hardware_disable = hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority,
- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+ .has_emulated_msr = vmx_has_emulated_msr,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1045,6 +1045,7 @@ static u32 emulated_msrs[] = {
MSR_SMI_COUNT,
MSR_PLATFORM_INFO,
MSR_MISC_FEATURES_ENABLES,
+ MSR_AMD64_VIRT_SPEC_CTRL,
};
static unsigned num_emulated_msrs;
@@ -2843,7 +2844,7 @@ int kvm_vm_ioctl_check_extension(struct
* fringe case that is not enabled except via specific settings
* of the module parameters.
*/
- r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
@@ -4522,14 +4523,8 @@ static void kvm_init_msr_list(void)
num_msrs_to_save = j;
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
- switch (emulated_msrs[i]) {
- case MSR_IA32_SMBASE:
- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
- continue;
- break;
- default:
- break;
- }
+ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+ continue;
if (j < i)
emulated_msrs[j] = emulated_msrs[i];
From foo@baz Mon May 21 21:56:07 CEST 2018
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 11 May 2018 15:21:01 +0200
Subject: KVM: SVM: Move spec control call after restore of GS
From: Thomas Gleixner <tglx@linutronix.de>
commit 15e6c22fd8e5a42c5ed6d487b7c9fe44c2517765 upstream
svm_vcpu_run() invokes x86_spec_ctrl_restore_host() after VMEXIT, but
before the host GS is restored. x86_spec_ctrl_restore_host() uses 'current'
to determine the host SSBD state of the thread. 'current' is GS based, but
host GS is not yet restored and the access causes a triple fault.
Move the call after the host GS restore.
Fixes: 885f82bfbc6f x86/process: Allow runtime control of Speculative Store Bypass
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
arch/x86/kvm/svm.c | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5495,6 +5495,18 @@ static void svm_vcpu_run(struct kvm_vcpu
#endif
);
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+
+#ifdef CONFIG_X86_64
+ wrmsrl(MSR_GS_BASE, svm->host.gs_base);
+#else
+ loadsegment(fs, svm->host.fs);
+#ifndef CONFIG_X86_32_LAZY_GS
+ loadsegment(gs, svm->host.gs);
+#endif
+#endif
+
/*
* We do not use IBRS in the kernel. If this vCPU has used the
* SPEC_CTRL MSR it may have left it on; save the value and
@@ -5515,18 +5527,6 @@ static void svm_vcpu_run(struct kvm_vcpu
x86_spec_ctrl_restore_host(svm->spec_ctrl);
- /* Eliminate branch target predictions from guest mode */
- vmexit_fill_RSB();
-
-#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, svm->host.gs_base);
-#else
- loadsegment(fs, svm->host.fs);
-#ifndef CONFIG_X86_32_LAZY_GS
- loadsegment(gs, svm->host.gs);
-#endif
-#endif
-
reload_tss(vcpu);
local_irq_disable();
From foo@baz Mon May 21 21:56:07 CEST 2018
From: Kees Cook <keescook@chromium.org>
Date: Tue, 1 May 2018 15:19:04 -0700
Subject: nospec: Allow getting/setting on non-current task
From: Kees Cook <keescook@chromium.org>
commit 7bbf1373e228840bb0295a2ca26d548ef37f448e upstream
Adjust arch_prctl_get/set_spec_ctrl() to operate on tasks other than
current.
This is needed both for /proc/$pid/status queries and for seccomp (since
thread-syncing can trigger seccomp in non-current threads).
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
arch/x86/kernel/cpu/bugs.c | 27 ++++++++++++++++-----------
include/linux/nospec.h | 7 +++++--
kernel/sys.c | 9 +++++----
3 files changed, 26 insertions(+), 17 deletions(-)
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -530,31 +530,35 @@ static void ssb_select_mitigation()
#undef pr_fmt
-static int ssb_prctl_set(unsigned long ctrl)
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
{
- bool rds = !!test_tsk_thread_flag(current, TIF_RDS);
+ bool rds = !!test_tsk_thread_flag(task, TIF_RDS);
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL)
return -ENXIO;
if (ctrl == PR_SPEC_ENABLE)
- clear_tsk_thread_flag(current, TIF_RDS);
+ clear_tsk_thread_flag(task, TIF_RDS);
else
- set_tsk_thread_flag(current, TIF_RDS);
+ set_tsk_thread_flag(task, TIF_RDS);
- if (rds != !!test_tsk_thread_flag(current, TIF_RDS))
+ /*
+ * If being set on non-current task, delay setting the CPU
+ * mitigation until it is next scheduled.
+ */
+ if (task == current && rds != !!test_tsk_thread_flag(task, TIF_RDS))
speculative_store_bypass_update();
return 0;
}
-static int ssb_prctl_get(void)
+static int ssb_prctl_get(struct task_struct *task)
{
switch (ssb_mode) {
case SPEC_STORE_BYPASS_DISABLE:
return PR_SPEC_DISABLE;
case SPEC_STORE_BYPASS_PRCTL:
- if (test_tsk_thread_flag(current, TIF_RDS))
+ if (test_tsk_thread_flag(task, TIF_RDS))
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
default:
@@ -564,24 +568,25 @@ static int ssb_prctl_get(void)
}
}
-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
{
if (ctrl != PR_SPEC_ENABLE && ctrl != PR_SPEC_DISABLE)
return -ERANGE;
switch (which) {
case PR_SPEC_STORE_BYPASS:
- return ssb_prctl_set(ctrl);
+ return ssb_prctl_set(task, ctrl);
default:
return -ENODEV;
}
}
-int arch_prctl_spec_ctrl_get(unsigned long which)
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
{
switch (which) {
case PR_SPEC_STORE_BYPASS:
- return ssb_prctl_get();
+ return ssb_prctl_get(task);
default:
return -ENODEV;
}
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -7,6 +7,8 @@
#define _LINUX_NOSPEC_H
#include <asm/barrier.h>
+struct task_struct;
+
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
@@ -57,7 +59,8 @@ static inline unsigned long array_index_
})
/* Speculation control prctl */
-int arch_prctl_spec_ctrl_get(unsigned long which);
-int arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl);
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl);
#endif /* _LINUX_NOSPEC_H */
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2192,12 +2192,13 @@ static int propagate_has_child_subreaper
return 1;
}
-int __weak arch_prctl_spec_ctrl_get(unsigned long which)
+int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
{
return -EINVAL;
}
-int __weak arch_prctl_spec_ctrl_set(unsigned long which, unsigned long ctrl)
+int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
+ unsigned long ctrl)
{
return -EINVAL;
}
@@ -2413,12 +2414,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
case PR_GET_SPECULATION_CTRL:
if (arg3 || arg4 || arg5)