Commit 5f7f7740 authored by Juergen Gross's avatar Juergen Gross

xen/events: block rogue events for some time

In order to avoid high dom0 load due to rogue guests sending events at
high frequency, block those events in case there was no action needed
in dom0 to handle the events.

This is done by adding a per-event counter, which set to zero in case
an EOI without the XEN_EOI_FLAG_SPURIOUS is received from a backend
driver, and incremented when this flag has been set. In case the
counter is 2 or higher delay the EOI by 1 << (cnt - 2) jiffies, but
not more than 1 second.

In order not to waste memory shorten the per-event refcnt to two bytes
(it should normally never exceed a value of 2). Add an overflow check
to evtchn_get() to make sure the 2 bytes really won't overflow.

This is part of XSA-332.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarJan Beulich <jbeulich@suse.com>
Reviewed-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Reviewed-by: default avatarWei Liu <wl@xen.org>
parent e99502f7
...@@ -461,17 +461,34 @@ static void lateeoi_list_add(struct irq_info *info) ...@@ -461,17 +461,34 @@ static void lateeoi_list_add(struct irq_info *info)
spin_unlock_irqrestore(&eoi->eoi_list_lock, flags); spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
} }
static void xen_irq_lateeoi_locked(struct irq_info *info) static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
{ {
evtchn_port_t evtchn; evtchn_port_t evtchn;
unsigned int cpu; unsigned int cpu;
unsigned int delay = 0;
evtchn = info->evtchn; evtchn = info->evtchn;
if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list)) if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
return; return;
if (spurious) {
if ((1 << info->spurious_cnt) < (HZ << 2))
info->spurious_cnt++;
if (info->spurious_cnt > 1) {
delay = 1 << (info->spurious_cnt - 2);
if (delay > HZ)
delay = HZ;
if (!info->eoi_time)
info->eoi_cpu = smp_processor_id();
info->eoi_time = get_jiffies_64() + delay;
}
} else {
info->spurious_cnt = 0;
}
cpu = info->eoi_cpu; cpu = info->eoi_cpu;
if (info->eoi_time && info->irq_epoch == per_cpu(irq_epoch, cpu)) { if (info->eoi_time &&
(info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
lateeoi_list_add(info); lateeoi_list_add(info);
return; return;
} }
...@@ -508,7 +525,7 @@ static void xen_irq_lateeoi_worker(struct work_struct *work) ...@@ -508,7 +525,7 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
info->eoi_time = 0; info->eoi_time = 0;
xen_irq_lateeoi_locked(info); xen_irq_lateeoi_locked(info, false);
} }
if (info) if (info)
...@@ -537,7 +554,7 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags) ...@@ -537,7 +554,7 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
info = info_for_irq(irq); info = info_for_irq(irq);
if (info) if (info)
xen_irq_lateeoi_locked(info); xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
read_unlock_irqrestore(&evtchn_rwlock, flags); read_unlock_irqrestore(&evtchn_rwlock, flags);
} }
...@@ -1441,7 +1458,7 @@ int evtchn_get(evtchn_port_t evtchn) ...@@ -1441,7 +1458,7 @@ int evtchn_get(evtchn_port_t evtchn)
goto done; goto done;
err = -EINVAL; err = -EINVAL;
if (info->refcnt <= 0) if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
goto done; goto done;
info->refcnt++; info->refcnt++;
......
...@@ -31,7 +31,8 @@ enum xen_irq_type { ...@@ -31,7 +31,8 @@ enum xen_irq_type {
struct irq_info { struct irq_info {
struct list_head list; struct list_head list;
struct list_head eoi_list; struct list_head eoi_list;
int refcnt; short refcnt;
short spurious_cnt;
enum xen_irq_type type; /* type */ enum xen_irq_type type; /* type */
unsigned irq; unsigned irq;
evtchn_port_t evtchn; /* event channel */ evtchn_port_t evtchn; /* event channel */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment