Commit 73459e2a authored by Paolo Bonzini's avatar Paolo Bonzini

x86: pvclock: Really remove the sched notifier for cross-cpu migrations

This reverts commits 0a4e6be9
and 80f7fdb1.

The task migration notifier was originally introduced in order to support
the pvclock vsyscall with non-synchronized TSC, but KVM only supports it
with synchronized TSC.  Hence, on KVM the race condition is only needed
due to a bad implementation on the host side, and even then it's so rare
that it's mostly theoretical.

As far as KVM is concerned it's possible to fix the host, avoiding the
additional complexity in the vDSO and the (re)introduction of the task
migration notifier.

Xen, on the other hand, hasn't yet implemented vsyscall support at
all, so we do not care about its plans for non-synchronized TSC.
Reported-by: default avatarPeter Zijlstra <peterz@infradead.org>
Suggested-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 5dca0d91
...@@ -95,7 +95,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, ...@@ -95,7 +95,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
struct pvclock_vsyscall_time_info { struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti; struct pvclock_vcpu_time_info pvti;
u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES))); } __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
......
...@@ -141,46 +141,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, ...@@ -141,46 +141,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
} }
static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
static struct pvclock_vsyscall_time_info *
pvclock_get_vsyscall_user_time_info(int cpu)
{
if (!pvclock_vdso_info) {
BUG();
return NULL;
}
return &pvclock_vdso_info[cpu];
}
struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
{
return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
void *v)
{
struct task_migration_notifier *mn = v;
struct pvclock_vsyscall_time_info *pvti;
pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
/* this is NULL when pvclock vsyscall is not initialized */
if (unlikely(pvti == NULL))
return NOTIFY_DONE;
pvti->migrate_count++;
return NOTIFY_DONE;
}
static struct notifier_block pvclock_migrate = {
.notifier_call = pvclock_task_migrate,
};
/* /*
* Initialize the generic pvclock vsyscall state. This will allocate * Initialize the generic pvclock vsyscall state. This will allocate
* a/some page(s) for the per-vcpu pvclock information, set up a * a/some page(s) for the per-vcpu pvclock information, set up a
...@@ -194,17 +155,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, ...@@ -194,17 +155,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
pvclock_vdso_info = i;
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
__pa(i) + (idx*PAGE_SIZE), __pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR); PAGE_KERNEL_VVAR);
} }
register_task_migration_notifier(&pvclock_migrate);
return 0; return 0;
} }
#endif #endif
...@@ -82,15 +82,18 @@ static notrace cycle_t vread_pvclock(int *mode) ...@@ -82,15 +82,18 @@ static notrace cycle_t vread_pvclock(int *mode)
cycle_t ret; cycle_t ret;
u64 last; u64 last;
u32 version; u32 version;
u32 migrate_count;
u8 flags; u8 flags;
unsigned cpu, cpu1; unsigned cpu, cpu1;
/* /*
* When looping to get a consistent (time-info, tsc) pair, we * Note: hypervisor must guarantee that:
* also need to deal with the possibility we can switch vcpus, * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
* so make sure we always re-fetch time-info for the current vcpu. * 2. that per-CPU pvclock time info is updated if the
* underlying CPU changes.
* 3. that version is increased whenever underlying CPU
* changes.
*
*/ */
do { do {
cpu = __getcpu() & VGETCPU_CPU_MASK; cpu = __getcpu() & VGETCPU_CPU_MASK;
...@@ -99,27 +102,20 @@ static notrace cycle_t vread_pvclock(int *mode) ...@@ -99,27 +102,20 @@ static notrace cycle_t vread_pvclock(int *mode)
* __getcpu() calls (Gleb). * __getcpu() calls (Gleb).
*/ */
/* Make sure migrate_count will change if we leave the VCPU. */ pvti = get_pvti(cpu);
do {
pvti = get_pvti(cpu);
migrate_count = pvti->migrate_count;
cpu1 = cpu;
cpu = __getcpu() & VGETCPU_CPU_MASK;
} while (unlikely(cpu != cpu1));
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
/* /*
* Test we're still on the cpu as well as the version. * Test we're still on the cpu as well as the version.
* - We must read TSC of pvti's VCPU. * We could have been migrated just after the first
* - KVM doesn't follow the versioning protocol, so data could * vgetcpu but before fetching the version, so we
* change before version if we left the VCPU. * wouldn't notice a version change.
*/ */
smp_rmb(); cpu1 = __getcpu() & VGETCPU_CPU_MASK;
} while (unlikely((pvti->pvti.version & 1) || } while (unlikely(cpu != cpu1 ||
pvti->pvti.version != version || (pvti->pvti.version & 1) ||
pvti->migrate_count != migrate_count)); pvti->pvti.version != version));
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE; *mode = VCLOCK_NONE;
......
...@@ -175,14 +175,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); ...@@ -175,14 +175,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks); extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void); extern void update_cpu_load_nohz(void);
/* Notifier for when a task gets migrated to a new CPU */
struct task_migration_notifier {
struct task_struct *task;
int from_cpu;
int to_cpu;
};
extern void register_task_migration_notifier(struct notifier_block *n);
extern unsigned long get_parent_ip(unsigned long addr); extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu); extern void dump_cpu_task(int cpu);
......
...@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) ...@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq_clock_skip_update(rq, true); rq_clock_skip_update(rq, true);
} }
static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
void register_task_migration_notifier(struct notifier_block *n)
{
atomic_notifier_chain_register(&task_migration_notifier, n);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu) void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{ {
...@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ...@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu); trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) { if (task_cpu(p) != new_cpu) {
struct task_migration_notifier tmn;
if (p->sched_class->migrate_task_rq) if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu); p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++; p->se.nr_migrations++;
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
tmn.task = p;
tmn.from_cpu = task_cpu(p);
tmn.to_cpu = new_cpu;
atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
} }
__set_task_cpu(p, new_cpu); __set_task_cpu(p, new_cpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment