Commit f8449a82 authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

[NET]: Kill SMP_TIMER_* users.

Before timers were run per-cpu as they are now, one used
to use the SMP_TIMER_* macros to move globally synchronized
timer work into tasklets.

But with per-cpu timers this is useless work and in fact makes
thing run more slowly.
parent 8c212931
......@@ -165,7 +165,6 @@ struct neigh_table
unsigned long last_rand;
struct neigh_parms *parms_list;
kmem_cache_t *kmem_cachep;
struct tasklet_struct gc_task;
struct neigh_statistics stats;
struct neighbour *hash_buckets[NEIGH_HASHMASK+1];
struct pneigh_entry *phash_buckets[PNEIGH_HASHMASK+1];
......
......@@ -540,7 +540,7 @@ static void neigh_sync(struct neighbour *n)
}
}
static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg)
static void neigh_periodic_timer(unsigned long arg)
{
struct neigh_table *tbl = (struct neigh_table *)arg;
unsigned long now = jiffies;
......@@ -605,15 +605,6 @@ static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg)
write_unlock(&tbl->lock);
}
#ifdef CONFIG_SMP
static void neigh_periodic_timer(unsigned long arg)
{
struct neigh_table *tbl = (struct neigh_table *)arg;
tasklet_schedule(&tbl->gc_task);
}
#endif
static __inline__ int neigh_max_probes(struct neighbour *n)
{
struct neigh_parms *p = n->parms;
......@@ -1147,10 +1138,6 @@ void neigh_table_init(struct neigh_table *tbl)
15) & ~15,
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
#ifdef CONFIG_SMP
tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer),
(unsigned long)tbl);
#endif
tbl->lock = RW_LOCK_UNLOCKED;
init_timer(&tbl->gc_timer);
tbl->gc_timer.data = (unsigned long)tbl;
......@@ -1178,7 +1165,6 @@ int neigh_table_clear(struct neigh_table *tbl)
/* It is not clean... Fix it to unload IPv6 module safely */
del_timer_sync(&tbl->gc_timer);
tasklet_kill(&tbl->gc_task);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
neigh_ifdown(tbl, NULL);
......
......@@ -155,7 +155,7 @@ static inline void dnrt_drop(struct dn_route *rt)
call_rcu(&rt->u.dst.rcu_head, (void (*)(void *))dst_free, &rt->u.dst);
}
static void SMP_TIMER_NAME(dn_dst_check_expire)(unsigned long dummy)
static void dn_dst_check_expire(unsigned long dummy)
{
int i;
struct dn_route *rt, **rtp;
......@@ -185,8 +185,6 @@ static void SMP_TIMER_NAME(dn_dst_check_expire)(unsigned long dummy)
mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
}
SMP_TIMER_DEFINE(dn_dst_check_expire, dn_dst_task);
static int dn_dst_gc(void)
{
struct dn_route *rt, **rtp;
......@@ -319,7 +317,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
return 0;
}
void SMP_TIMER_NAME(dn_run_flush)(unsigned long dummy)
void dn_run_flush(unsigned long dummy)
{
int i;
struct dn_route *rt, *next;
......@@ -341,8 +339,6 @@ void SMP_TIMER_NAME(dn_run_flush)(unsigned long dummy)
}
}
SMP_TIMER_DEFINE(dn_run_flush, dn_flush_task);
static spinlock_t dn_rt_flush_lock = SPIN_LOCK_UNLOCKED;
void dn_rt_cache_flush(int delay)
......
......@@ -455,7 +455,7 @@ out: return ret;
}
/* This runs via a timer and thus is always in BH context. */
static void SMP_TIMER_NAME(rt_check_expire)(unsigned long dummy)
static void rt_check_expire(unsigned long dummy)
{
static int rover;
int i = rover, t;
......@@ -498,12 +498,10 @@ static void SMP_TIMER_NAME(rt_check_expire)(unsigned long dummy)
mod_timer(&rt_periodic_timer, now + ip_rt_gc_interval);
}
SMP_TIMER_DEFINE(rt_check_expire, rt_gc_task);
/* This can run from both BH and non-BH contexts, the latter
* in the case of a forced flush event.
*/
static void SMP_TIMER_NAME(rt_run_flush)(unsigned long dummy)
static void rt_run_flush(unsigned long dummy)
{
int i;
struct rtable *rth, *next;
......@@ -526,8 +524,6 @@ static void SMP_TIMER_NAME(rt_run_flush)(unsigned long dummy)
}
}
SMP_TIMER_DEFINE(rt_run_flush, rt_cache_flush_task);
static spinlock_t rt_flush_lock = SPIN_LOCK_UNLOCKED;
void rt_cache_flush(int delay)
......@@ -559,7 +555,7 @@ void rt_cache_flush(int delay)
if (delay <= 0) {
spin_unlock_bh(&rt_flush_lock);
SMP_TIMER_NAME(rt_run_flush)(0);
rt_run_flush(0);
return;
}
......
......@@ -426,7 +426,7 @@ static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS];
static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED;
static struct timer_list tcp_tw_timer = TIMER_INITIALIZER(tcp_twkill, 0, 0);
static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
static void tcp_twkill(unsigned long dummy)
{
struct tcp_tw_bucket *tw;
int killed = 0;
......@@ -466,8 +466,6 @@ static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy)
spin_unlock(&tw_death_lock);
}
SMP_TIMER_DEFINE(tcp_twkill, tcp_twkill_task);
/* These are always called from BH context. See callers in
* tcp_input.c to verify this.
*/
......@@ -579,7 +577,7 @@ void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo)
spin_unlock(&tw_death_lock);
}
void SMP_TIMER_NAME(tcp_twcal_tick)(unsigned long dummy)
void tcp_twcal_tick(unsigned long dummy)
{
int n, slot;
unsigned long j;
......@@ -630,9 +628,6 @@ void SMP_TIMER_NAME(tcp_twcal_tick)(unsigned long dummy)
spin_unlock(&tw_death_lock);
}
SMP_TIMER_DEFINE(tcp_twcal_tick, tcp_twcal_tasklet);
/* This is not only more efficient than what we used to do, it eliminates
* a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment