Commit 68d268d0 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-try_cmpxchg-conversions'

Eric Dumazet says:

====================
net: more try_cmpxchg() conversions

Adopt try_cmpxchg() and friends in more places, as this
is preferred nowadays.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8c55face 4ebf802c
......@@ -2073,13 +2073,10 @@ static DECLARE_WORK(netstamp_work, netstamp_clear);
void net_enable_timestamp(void)
{
#ifdef CONFIG_JUMP_LABEL
int wanted;
int wanted = atomic_read(&netstamp_wanted);
while (1) {
wanted = atomic_read(&netstamp_wanted);
if (wanted <= 0)
break;
if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted)
while (wanted > 0) {
if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted + 1))
return;
}
atomic_inc(&netstamp_needed_deferred);
......@@ -2093,13 +2090,10 @@ EXPORT_SYMBOL(net_enable_timestamp);
void net_disable_timestamp(void)
{
#ifdef CONFIG_JUMP_LABEL
int wanted;
int wanted = atomic_read(&netstamp_wanted);
while (1) {
wanted = atomic_read(&netstamp_wanted);
if (wanted <= 1)
break;
if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted)
while (wanted > 1) {
if (atomic_try_cmpxchg(&netstamp_wanted, &wanted, wanted - 1))
return;
}
atomic_dec(&netstamp_needed_deferred);
......@@ -5985,10 +5979,9 @@ EXPORT_SYMBOL(__napi_schedule);
*/
bool napi_schedule_prep(struct napi_struct *n)
{
unsigned long val, new;
unsigned long new, val = READ_ONCE(n->state);
do {
val = READ_ONCE(n->state);
if (unlikely(val & NAPIF_STATE_DISABLE))
return false;
new = val | NAPIF_STATE_SCHED;
......@@ -6001,7 +5994,7 @@ bool napi_schedule_prep(struct napi_struct *n)
*/
new |= (val & NAPIF_STATE_SCHED) / NAPIF_STATE_SCHED *
NAPIF_STATE_MISSED;
} while (cmpxchg(&n->state, val, new) != val);
} while (!try_cmpxchg(&n->state, &val, new));
return !(val & NAPIF_STATE_SCHED);
}
......@@ -6069,9 +6062,8 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
local_irq_restore(flags);
}
val = READ_ONCE(n->state);
do {
val = READ_ONCE(n->state);
WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
......@@ -6084,7 +6076,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
*/
new |= (val & NAPIF_STATE_MISSED) / NAPIF_STATE_MISSED *
NAPIF_STATE_SCHED;
} while (cmpxchg(&n->state, val, new) != val);
} while (!try_cmpxchg(&n->state, &val, new));
if (unlikely(val & NAPIF_STATE_MISSED)) {
__napi_schedule(n);
......@@ -6405,8 +6397,8 @@ void napi_disable(struct napi_struct *n)
might_sleep();
set_bit(NAPI_STATE_DISABLE, &n->state);
for ( ; ; ) {
val = READ_ONCE(n->state);
val = READ_ONCE(n->state);
do {
if (val & (NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC)) {
usleep_range(20, 200);
continue;
......@@ -6414,10 +6406,7 @@ void napi_disable(struct napi_struct *n)
new = val | NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC;
new &= ~(NAPIF_STATE_THREADED | NAPIF_STATE_PREFER_BUSY_POLL);
if (cmpxchg(&n->state, val, new) == val)
break;
}
} while (!try_cmpxchg(&n->state, &val, new));
hrtimer_cancel(&n->timer);
......@@ -6434,16 +6423,15 @@ EXPORT_SYMBOL(napi_disable);
*/
void napi_enable(struct napi_struct *n)
{
unsigned long val, new;
unsigned long new, val = READ_ONCE(n->state);
do {
val = READ_ONCE(n->state);
BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
if (n->dev->threaded && n->thread)
new |= NAPIF_STATE_THREADED;
} while (cmpxchg(&n->state, val, new) != val);
} while (!try_cmpxchg(&n->state, &val, new));
}
EXPORT_SYMBOL(napi_enable);
......
......@@ -1267,13 +1267,12 @@ int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
user = mmp->user ? : current_user();
old_pg = atomic_long_read(&user->locked_vm);
do {
old_pg = atomic_long_read(&user->locked_vm);
new_pg = old_pg + num_pg;
if (new_pg > max_pg)
return -ENOBUFS;
} while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
old_pg);
} while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg));
if (!mmp->user) {
mmp->user = get_uid(user);
......
......@@ -25,14 +25,14 @@ DEFINE_COOKIE(sock_cookie);
u64 __sock_gen_cookie(struct sock *sk)
{
while (1) {
u64 res = atomic64_read(&sk->sk_cookie);
u64 res = atomic64_read(&sk->sk_cookie);
if (res)
return res;
res = gen_cookie_next(&sock_cookie);
atomic64_cmpxchg(&sk->sk_cookie, 0, res);
if (!res) {
u64 new = gen_cookie_next(&sock_cookie);
atomic64_try_cmpxchg(&sk->sk_cookie, &res, new);
}
return res;
}
int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie)
......
......@@ -91,13 +91,12 @@ static void fib6_walker_unlink(struct net *net, struct fib6_walker *w)
static int fib6_new_sernum(struct net *net)
{
int new, old;
int new, old = atomic_read(&net->ipv6.fib6_sernum);
do {
old = atomic_read(&net->ipv6.fib6_sernum);
new = old < INT_MAX ? old + 1 : 1;
} while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
old, new) != old);
} while (!atomic_try_cmpxchg(&net->ipv6.fib6_sernum, &old, new));
return new;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment