Commit 7185a969 authored by Jiri Kosina's avatar Jiri Kosina

Merge branch 'for-5.1/fake-signal' into for-linus

Ability to send fake signal to blocking tasks automatically, instead of
requiring manual intervention, from Miroslav Benes
parents 67bae14a 0b3d5279
...@@ -33,18 +33,6 @@ Description: ...@@ -33,18 +33,6 @@ Description:
An attribute which indicates whether the patch is currently in An attribute which indicates whether the patch is currently in
transition. transition.
What: /sys/kernel/livepatch/<patch>/signal
Date: Nov 2017
KernelVersion: 4.15.0
Contact: live-patching@vger.kernel.org
Description:
A writable attribute that allows administrator to affect the
course of an existing transition. Writing 1 sends a fake
signal to all remaining blocking tasks. The fake signal
means that no proper signal is delivered (there is no data in
signal pending structures). Tasks are interrupted or woken up,
and forced to change their patched state.
What: /sys/kernel/livepatch/<patch>/force What: /sys/kernel/livepatch/<patch>/force
Date: Nov 2017 Date: Nov 2017
KernelVersion: 4.15.0 KernelVersion: 4.15.0
......
...@@ -158,12 +158,11 @@ If a patch is in transition, this file shows 0 to indicate the task is ...@@ -158,12 +158,11 @@ If a patch is in transition, this file shows 0 to indicate the task is
unpatched and 1 to indicate it's patched. Otherwise, if no patch is in unpatched and 1 to indicate it's patched. Otherwise, if no patch is in
transition, it shows -1. Any tasks which are blocking the transition transition, it shows -1. Any tasks which are blocking the transition
can be signaled with SIGSTOP and SIGCONT to force them to change their can be signaled with SIGSTOP and SIGCONT to force them to change their
patched state. This may be harmful to the system though. patched state. This may be harmful to the system though. Sending a fake signal
/sys/kernel/livepatch/<patch>/signal attribute provides a better alternative. to all remaining blocking tasks is a better alternative. No proper signal is
Writing 1 to the attribute sends a fake signal to all remaining blocking actually delivered (there is no data in signal pending structures). Tasks are
tasks. No proper signal is actually delivered (there is no data in signal interrupted or woken up, and forced to change their patched state. The fake
pending structures). Tasks are interrupted or woken up, and forced to change signal is automatically sent every 15 seconds.
their patched state.
Administrator can also affect a transition through Administrator can also affect a transition through
/sys/kernel/livepatch/<patch>/force attribute. Writing 1 there clears /sys/kernel/livepatch/<patch>/force attribute. Writing 1 there clears
...@@ -411,8 +410,8 @@ Information about the registered patches can be found under ...@@ -411,8 +410,8 @@ Information about the registered patches can be found under
/sys/kernel/livepatch. The patches could be enabled and disabled /sys/kernel/livepatch. The patches could be enabled and disabled
by writing there. by writing there.
/sys/kernel/livepatch/<patch>/signal and /sys/kernel/livepatch/<patch>/force /sys/kernel/livepatch/<patch>/force attributes allow administrator to affect a
attributes allow administrator to affect a patching operation. patching operation.
See Documentation/ABI/testing/sysfs-kernel-livepatch for more details. See Documentation/ABI/testing/sysfs-kernel-livepatch for more details.
......
...@@ -313,7 +313,6 @@ static int klp_write_object_relocations(struct module *pmod, ...@@ -313,7 +313,6 @@ static int klp_write_object_relocations(struct module *pmod,
* /sys/kernel/livepatch/<patch> * /sys/kernel/livepatch/<patch>
* /sys/kernel/livepatch/<patch>/enabled * /sys/kernel/livepatch/<patch>/enabled
* /sys/kernel/livepatch/<patch>/transition * /sys/kernel/livepatch/<patch>/transition
* /sys/kernel/livepatch/<patch>/signal
* /sys/kernel/livepatch/<patch>/force * /sys/kernel/livepatch/<patch>/force
* /sys/kernel/livepatch/<patch>/<object> * /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos> * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
...@@ -382,35 +381,6 @@ static ssize_t transition_show(struct kobject *kobj, ...@@ -382,35 +381,6 @@ static ssize_t transition_show(struct kobject *kobj,
patch == klp_transition_patch); patch == klp_transition_patch);
} }
static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct klp_patch *patch;
int ret;
bool val;
ret = kstrtobool(buf, &val);
if (ret)
return ret;
if (!val)
return count;
mutex_lock(&klp_mutex);
patch = container_of(kobj, struct klp_patch, kobj);
if (patch != klp_transition_patch) {
mutex_unlock(&klp_mutex);
return -EINVAL;
}
klp_send_signals();
mutex_unlock(&klp_mutex);
return count;
}
static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
...@@ -442,12 +412,10 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr, ...@@ -442,12 +412,10 @@ static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
static struct attribute *klp_patch_attrs[] = { static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr, &enabled_kobj_attr.attr,
&transition_kobj_attr.attr, &transition_kobj_attr.attr,
&signal_kobj_attr.attr,
&force_kobj_attr.attr, &force_kobj_attr.attr,
NULL NULL
}; };
......
...@@ -29,10 +29,14 @@ ...@@ -29,10 +29,14 @@
#define MAX_STACK_ENTRIES 100 #define MAX_STACK_ENTRIES 100
#define STACK_ERR_BUF_SIZE 128 #define STACK_ERR_BUF_SIZE 128
#define SIGNALS_TIMEOUT 15
struct klp_patch *klp_transition_patch; struct klp_patch *klp_transition_patch;
static int klp_target_state = KLP_UNDEFINED; static int klp_target_state = KLP_UNDEFINED;
static unsigned int klp_signals_cnt;
/* /*
* This work can be performed periodically to finish patching or unpatching any * This work can be performed periodically to finish patching or unpatching any
* "straggler" tasks which failed to transition in the first attempt. * "straggler" tasks which failed to transition in the first attempt.
...@@ -343,6 +347,47 @@ static bool klp_try_switch_task(struct task_struct *task) ...@@ -343,6 +347,47 @@ static bool klp_try_switch_task(struct task_struct *task)
} }
/*
* Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
* Kthreads with TIF_PATCH_PENDING set are woken up.
*/
static void klp_send_signals(void)
{
struct task_struct *g, *task;
if (klp_signals_cnt == SIGNALS_TIMEOUT)
pr_notice("signaling remaining tasks\n");
read_lock(&tasklist_lock);
for_each_process_thread(g, task) {
if (!klp_patch_pending(task))
continue;
/*
* There is a small race here. We could see TIF_PATCH_PENDING
* set and decide to wake up a kthread or send a fake signal.
* Meanwhile the task could migrate itself and the action
* would be meaningless. It is not serious though.
*/
if (task->flags & PF_KTHREAD) {
/*
* Wake up a kthread which sleeps interruptedly and
* still has not been migrated.
*/
wake_up_state(task, TASK_INTERRUPTIBLE);
} else {
/*
* Send fake signal to all non-kthread tasks which are
* still not migrated.
*/
spin_lock_irq(&task->sighand->siglock);
signal_wake_up(task, 0);
spin_unlock_irq(&task->sighand->siglock);
}
}
read_unlock(&tasklist_lock);
}
/* /*
* Try to switch all remaining tasks to the target patch state by walking the * Try to switch all remaining tasks to the target patch state by walking the
* stacks of sleeping tasks and looking for any to-be-patched or * stacks of sleeping tasks and looking for any to-be-patched or
...@@ -393,6 +438,10 @@ void klp_try_complete_transition(void) ...@@ -393,6 +438,10 @@ void klp_try_complete_transition(void)
put_online_cpus(); put_online_cpus();
if (!complete) { if (!complete) {
if (klp_signals_cnt && !(klp_signals_cnt % SIGNALS_TIMEOUT))
klp_send_signals();
klp_signals_cnt++;
/* /*
* Some tasks weren't able to be switched over. Try again * Some tasks weren't able to be switched over. Try again
* later and/or wait for other methods like kernel exit * later and/or wait for other methods like kernel exit
...@@ -454,6 +503,8 @@ void klp_start_transition(void) ...@@ -454,6 +503,8 @@ void klp_start_transition(void)
if (task->patch_state != klp_target_state) if (task->patch_state != klp_target_state)
set_tsk_thread_flag(task, TIF_PATCH_PENDING); set_tsk_thread_flag(task, TIF_PATCH_PENDING);
} }
klp_signals_cnt = 0;
} }
/* /*
...@@ -576,47 +627,6 @@ void klp_copy_process(struct task_struct *child) ...@@ -576,47 +627,6 @@ void klp_copy_process(struct task_struct *child)
/* TIF_PATCH_PENDING gets copied in setup_thread_stack() */ /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
} }
/*
* Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
* Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
* action currently.
*/
void klp_send_signals(void)
{
struct task_struct *g, *task;
pr_notice("signaling remaining tasks\n");
read_lock(&tasklist_lock);
for_each_process_thread(g, task) {
if (!klp_patch_pending(task))
continue;
/*
* There is a small race here. We could see TIF_PATCH_PENDING
* set and decide to wake up a kthread or send a fake signal.
* Meanwhile the task could migrate itself and the action
* would be meaningless. It is not serious though.
*/
if (task->flags & PF_KTHREAD) {
/*
* Wake up a kthread which sleeps interruptedly and
* still has not been migrated.
*/
wake_up_state(task, TASK_INTERRUPTIBLE);
} else {
/*
* Send fake signal to all non-kthread tasks which are
* still not migrated.
*/
spin_lock_irq(&task->sighand->siglock);
signal_wake_up(task, 0);
spin_unlock_irq(&task->sighand->siglock);
}
}
read_unlock(&tasklist_lock);
}
/* /*
* Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
* existing transition to finish. * existing transition to finish.
......
...@@ -11,7 +11,6 @@ void klp_cancel_transition(void); ...@@ -11,7 +11,6 @@ void klp_cancel_transition(void);
void klp_start_transition(void); void klp_start_transition(void);
void klp_try_complete_transition(void); void klp_try_complete_transition(void);
void klp_reverse_transition(void); void klp_reverse_transition(void);
void klp_send_signals(void);
void klp_force_transition(void); void klp_force_transition(void);
#endif /* _LIVEPATCH_TRANSITION_H */ #endif /* _LIVEPATCH_TRANSITION_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment