Commit 8bf37d8c authored by Thomas Gleixner's avatar Thomas Gleixner

seccomp: Move speculation migitation control to arch code

The migitation control is simpler to implement in architecture code as it
avoids the extra function call to check the mode. Aside of that having an
explicit seccomp enabled mode in the architecture mitigations would require
even more workarounds.

Move it into architecture code and provide a weak function in the seccomp
code. Remove the 'which' argument as this allows the architecture to decide
which mitigations are relevant for seccomp.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 00a02d0c
...@@ -569,6 +569,24 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) ...@@ -569,6 +569,24 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
return 0; return 0;
} }
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
unsigned long ctrl)
{
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssb_prctl_set(task, ctrl);
default:
return -ENODEV;
}
}
#ifdef CONFIG_SECCOMP
void arch_seccomp_spec_mitigate(struct task_struct *task)
{
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
static int ssb_prctl_get(struct task_struct *task) static int ssb_prctl_get(struct task_struct *task)
{ {
switch (ssb_mode) { switch (ssb_mode) {
...@@ -587,17 +605,6 @@ static int ssb_prctl_get(struct task_struct *task) ...@@ -587,17 +605,6 @@ static int ssb_prctl_get(struct task_struct *task)
} }
} }
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
unsigned long ctrl)
{
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssb_prctl_set(task, ctrl);
default:
return -ENODEV;
}
}
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
{ {
switch (which) { switch (which) {
......
...@@ -62,5 +62,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, ...@@ -62,5 +62,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which); int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
unsigned long ctrl); unsigned long ctrl);
/* Speculation control for seccomp enforced mitigation */
void arch_seccomp_spec_mitigate(struct task_struct *task);
#endif /* _LINUX_NOSPEC_H */ #endif /* _LINUX_NOSPEC_H */
...@@ -229,18 +229,7 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode) ...@@ -229,18 +229,7 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
return true; return true;
} }
/* void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
* If a given speculation mitigation is opt-in (prctl()-controlled),
* select it, by disabling speculation (enabling mitigation).
*/
static inline void spec_mitigate(struct task_struct *task,
unsigned long which)
{
int state = arch_prctl_spec_ctrl_get(task, which);
if (state > 0 && (state & PR_SPEC_PRCTL))
arch_prctl_spec_ctrl_set(task, which, PR_SPEC_FORCE_DISABLE);
}
static inline void seccomp_assign_mode(struct task_struct *task, static inline void seccomp_assign_mode(struct task_struct *task,
unsigned long seccomp_mode, unsigned long seccomp_mode,
...@@ -256,7 +245,7 @@ static inline void seccomp_assign_mode(struct task_struct *task, ...@@ -256,7 +245,7 @@ static inline void seccomp_assign_mode(struct task_struct *task,
smp_mb__before_atomic(); smp_mb__before_atomic();
/* Assume default seccomp processes want spec flaw mitigation. */ /* Assume default seccomp processes want spec flaw mitigation. */
if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0) if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
spec_mitigate(task, PR_SPEC_STORE_BYPASS); arch_seccomp_spec_mitigate(task);
set_tsk_thread_flag(task, TIF_SECCOMP); set_tsk_thread_flag(task, TIF_SECCOMP);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment