Commit ba3a3ce1 authored by Julian Anastasov's avatar Julian Anastasov Committed by Pablo Neira Ayuso

ipvs: convert sched_lock to spin lock

As all read_locks are gone spin lock is preferred.
Signed-off-by: default avatarJulian Anastasov <ja@ssi.bg>
Signed-off-by: default avatarSimon Horman <horms@verge.net.au>
parent ed3ffc4e
...@@ -734,7 +734,7 @@ struct ip_vs_service { ...@@ -734,7 +734,7 @@ struct ip_vs_service {
/* for scheduling */ /* for scheduling */
struct ip_vs_scheduler *scheduler; /* bound scheduler object */ struct ip_vs_scheduler *scheduler; /* bound scheduler object */
rwlock_t sched_lock; /* lock sched_data */ spinlock_t sched_lock; /* lock sched_data */
void *sched_data; /* scheduler application data */ void *sched_data; /* scheduler application data */
/* alternate persistence engine */ /* alternate persistence engine */
......
...@@ -1219,7 +1219,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, ...@@ -1219,7 +1219,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
svc->net = net; svc->net = net;
INIT_LIST_HEAD(&svc->destinations); INIT_LIST_HEAD(&svc->destinations);
rwlock_init(&svc->sched_lock); spin_lock_init(&svc->sched_lock);
spin_lock_init(&svc->stats.lock); spin_lock_init(&svc->stats.lock);
/* Bind the scheduler */ /* Bind the scheduler */
......
...@@ -194,7 +194,7 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, ...@@ -194,7 +194,7 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
/* /*
* Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
* address to a server. Called under write lock. * address to a server. Called under spin lock.
*/ */
static inline struct ip_vs_lblc_entry * static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
...@@ -242,7 +242,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc) ...@@ -242,7 +242,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
struct hlist_node *next; struct hlist_node *next;
int i; int i;
write_lock_bh(&svc->sched_lock); spin_lock_bh(&svc->sched_lock);
tbl->dead = 1; tbl->dead = 1;
for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
...@@ -250,7 +250,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc) ...@@ -250,7 +250,7 @@ static void ip_vs_lblc_flush(struct ip_vs_service *svc)
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
} }
write_unlock_bh(&svc->sched_lock); spin_unlock_bh(&svc->sched_lock);
} }
static int sysctl_lblc_expiration(struct ip_vs_service *svc) static int sysctl_lblc_expiration(struct ip_vs_service *svc)
...@@ -274,7 +274,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) ...@@ -274,7 +274,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK; j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now, if (time_before(now,
en->lastuse + en->lastuse +
...@@ -284,7 +284,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) ...@@ -284,7 +284,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
ip_vs_lblc_free(en); ip_vs_lblc_free(en);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
} }
tbl->rover = j; tbl->rover = j;
} }
...@@ -330,7 +330,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) ...@@ -330,7 +330,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK; j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
continue; continue;
...@@ -339,7 +339,7 @@ static void ip_vs_lblc_check_expire(unsigned long data) ...@@ -339,7 +339,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
goal--; goal--;
} }
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
if (goal <= 0) if (goal <= 0)
break; break;
} }
...@@ -527,10 +527,10 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -527,10 +527,10 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
} }
/* If we fail to create a cache entry, we'll just use the valid dest */ /* If we fail to create a cache entry, we'll just use the valid dest */
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
if (!tbl->dead) if (!tbl->dead)
ip_vs_lblc_new(tbl, &iph.daddr, dest); ip_vs_lblc_new(tbl, &iph.daddr, dest);
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
out: out:
IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n", IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
......
...@@ -368,7 +368,7 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, ...@@ -368,7 +368,7 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
/* /*
* Create or update an ip_vs_lblcr_entry, which is a mapping of a destination * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
* IP address to a server. Called under write lock. * IP address to a server. Called under spin lock.
*/ */
static inline struct ip_vs_lblcr_entry * static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
...@@ -412,14 +412,14 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc) ...@@ -412,14 +412,14 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
struct ip_vs_lblcr_entry *en; struct ip_vs_lblcr_entry *en;
struct hlist_node *next; struct hlist_node *next;
write_lock_bh(&svc->sched_lock); spin_lock_bh(&svc->sched_lock);
tbl->dead = 1; tbl->dead = 1;
for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en); ip_vs_lblcr_free(en);
} }
} }
write_unlock_bh(&svc->sched_lock); spin_unlock_bh(&svc->sched_lock);
} }
static int sysctl_lblcr_expiration(struct ip_vs_service *svc) static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
...@@ -443,7 +443,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) ...@@ -443,7 +443,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK; j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_after(en->lastuse + if (time_after(en->lastuse +
sysctl_lblcr_expiration(svc), now)) sysctl_lblcr_expiration(svc), now))
...@@ -452,7 +452,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc) ...@@ -452,7 +452,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
ip_vs_lblcr_free(en); ip_vs_lblcr_free(en);
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
} }
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
} }
tbl->rover = j; tbl->rover = j;
} }
...@@ -498,7 +498,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) ...@@ -498,7 +498,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK; j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) { hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
continue; continue;
...@@ -507,7 +507,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data) ...@@ -507,7 +507,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
atomic_dec(&tbl->entries); atomic_dec(&tbl->entries);
goal--; goal--;
} }
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
if (goal <= 0) if (goal <= 0)
break; break;
} }
...@@ -678,7 +678,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -678,7 +678,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (atomic_read(&en->set.size) > 1 && if (atomic_read(&en->set.size) > 1 &&
time_after(jiffies, en->set.lastmod + time_after(jiffies, en->set.lastmod +
sysctl_lblcr_expiration(svc))) { sysctl_lblcr_expiration(svc))) {
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
if (atomic_read(&en->set.size) > 1) { if (atomic_read(&en->set.size) > 1) {
struct ip_vs_dest *m; struct ip_vs_dest *m;
...@@ -686,7 +686,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -686,7 +686,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
if (m) if (m)
ip_vs_dest_set_erase(&en->set, m); ip_vs_dest_set_erase(&en->set, m);
} }
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
} }
/* If the destination is not overloaded, use it */ /* If the destination is not overloaded, use it */
...@@ -701,10 +701,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -701,10 +701,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
} }
/* Update our cache entry */ /* Update our cache entry */
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
if (!tbl->dead) if (!tbl->dead)
ip_vs_dest_set_insert(&en->set, dest, true); ip_vs_dest_set_insert(&en->set, dest, true);
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
goto out; goto out;
} }
...@@ -716,10 +716,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -716,10 +716,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
} }
/* If we fail to create a cache entry, we'll just use the valid dest */ /* If we fail to create a cache entry, we'll just use the valid dest */
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
if (!tbl->dead) if (!tbl->dead)
ip_vs_lblcr_new(tbl, &iph.daddr, dest); ip_vs_lblcr_new(tbl, &iph.daddr, dest);
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
out: out:
IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n", IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
......
...@@ -39,14 +39,14 @@ static int ip_vs_rr_del_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest) ...@@ -39,14 +39,14 @@ static int ip_vs_rr_del_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest)
{ {
struct list_head *p; struct list_head *p;
write_lock_bh(&svc->sched_lock); spin_lock_bh(&svc->sched_lock);
p = (struct list_head *) svc->sched_data; p = (struct list_head *) svc->sched_data;
/* dest is already unlinked, so p->prev is not valid but /* dest is already unlinked, so p->prev is not valid but
* p->next is valid, use it to reach previous entry. * p->next is valid, use it to reach previous entry.
*/ */
if (p == &dest->n_list) if (p == &dest->n_list)
svc->sched_data = p->next->prev; svc->sched_data = p->next->prev;
write_unlock_bh(&svc->sched_lock); spin_unlock_bh(&svc->sched_lock);
return 0; return 0;
} }
...@@ -63,7 +63,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -63,7 +63,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
p = (struct list_head *) svc->sched_data; p = (struct list_head *) svc->sched_data;
last = dest = list_entry(p, struct ip_vs_dest, n_list); last = dest = list_entry(p, struct ip_vs_dest, n_list);
...@@ -85,13 +85,13 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -85,13 +85,13 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
} while (pass < 2 && p != &svc->destinations); } while (pass < 2 && p != &svc->destinations);
stop: stop:
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
ip_vs_scheduler_err(svc, "no destination available"); ip_vs_scheduler_err(svc, "no destination available");
return NULL; return NULL;
out: out:
svc->sched_data = &dest->n_list; svc->sched_data = &dest->n_list;
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
IP_VS_DBG_BUF(6, "RR: server %s:%u " IP_VS_DBG_BUF(6, "RR: server %s:%u "
"activeconns %d refcnt %d weight %d\n", "activeconns %d refcnt %d weight %d\n",
IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
......
...@@ -145,7 +145,7 @@ static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc, ...@@ -145,7 +145,7 @@ static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc,
{ {
struct ip_vs_wrr_mark *mark = svc->sched_data; struct ip_vs_wrr_mark *mark = svc->sched_data;
write_lock_bh(&svc->sched_lock); spin_lock_bh(&svc->sched_lock);
mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list); mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
mark->di = ip_vs_wrr_gcd_weight(svc); mark->di = ip_vs_wrr_gcd_weight(svc);
mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1); mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
...@@ -153,7 +153,7 @@ static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc, ...@@ -153,7 +153,7 @@ static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc,
mark->cw = mark->mw; mark->cw = mark->mw;
else if (mark->di > 1) else if (mark->di > 1)
mark->cw = (mark->cw / mark->di) * mark->di + 1; mark->cw = (mark->cw / mark->di) * mark->di + 1;
write_unlock_bh(&svc->sched_lock); spin_unlock_bh(&svc->sched_lock);
return 0; return 0;
} }
...@@ -170,7 +170,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -170,7 +170,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
write_lock(&svc->sched_lock); spin_lock(&svc->sched_lock);
dest = mark->cl; dest = mark->cl;
/* No available dests? */ /* No available dests? */
if (mark->mw == 0) if (mark->mw == 0)
...@@ -222,7 +222,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) ...@@ -222,7 +222,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
mark->cl = dest; mark->cl = dest;
out: out:
write_unlock(&svc->sched_lock); spin_unlock(&svc->sched_lock);
return dest; return dest;
err_noavail: err_noavail:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment