Commit 1431996b authored by Hugh Dickins's avatar Hugh Dickins Committed by Andrew Morton

percpu_counter: extend _limited_add() to negative amounts

Though tmpfs does not need it, percpu_counter_limited_add() can be twice
as useful if it works sensibly with negative amounts (subs) - typically
decrements towards a limit of 0 or nearby: as suggested by Dave Chinner.

And in the course of that reworking, skip the percpu counter sum if it is
already obvious that the limit would be passed: as suggested by Tim Chen.

Extend the comment above __percpu_counter_limited_add(), defining the
behaviour with positive and negative amounts, allowing negative limits,
but not bothering about overflow beyond S64_MAX.

Link: https://lkml.kernel.org/r/8f86083b-c452-95d4-365b-f16a2e4ebcd4@google.comSigned-off-by: default avatarHugh Dickins <hughd@google.com>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Carlos Maiolino <cem@kernel.org>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Darrick J. Wong <djwong@kernel.org>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Tim Chen <tim.c.chen@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent beb98686
...@@ -198,14 +198,21 @@ static inline bool ...@@ -198,14 +198,21 @@ static inline bool
percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
{ {
unsigned long flags; unsigned long flags;
bool good = false;
s64 count; s64 count;
if (amount == 0)
return true;
local_irq_save(flags); local_irq_save(flags);
count = fbc->count + amount; count = fbc->count + amount;
if (count <= limit) if ((amount > 0 && count <= limit) ||
(amount < 0 && count >= limit)) {
fbc->count = count; fbc->count = count;
good = true;
}
local_irq_restore(flags); local_irq_restore(flags);
return count <= limit; return good;
} }
/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
......
...@@ -279,8 +279,16 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) ...@@ -279,8 +279,16 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
EXPORT_SYMBOL(__percpu_counter_compare); EXPORT_SYMBOL(__percpu_counter_compare);
/* /*
* Compare counter, and add amount if the total is within limit. * Compare counter, and add amount if total is: less than or equal to limit if
* Return true if amount was added, false if it would exceed limit. * amount is positive, or greater than or equal to limit if amount is negative.
* Return true if amount is added, or false if total would be beyond the limit.
*
* Negative limit is allowed, but unusual.
* When negative amounts (subs) are given to percpu_counter_limited_add(),
* the limit would most naturally be 0 - but other limits are also allowed.
*
* Overflow beyond S64_MAX is not allowed for: counter, limit and amount
* are all assumed to be sane (far from S64_MIN and S64_MAX).
*/ */
bool __percpu_counter_limited_add(struct percpu_counter *fbc, bool __percpu_counter_limited_add(struct percpu_counter *fbc,
s64 limit, s64 amount, s32 batch) s64 limit, s64 amount, s32 batch)
...@@ -288,10 +296,10 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, ...@@ -288,10 +296,10 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
s64 count; s64 count;
s64 unknown; s64 unknown;
unsigned long flags; unsigned long flags;
bool good; bool good = false;
if (amount > limit) if (amount == 0)
return false; return true;
local_irq_save(flags); local_irq_save(flags);
unknown = batch * num_online_cpus(); unknown = batch * num_online_cpus();
...@@ -299,7 +307,8 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, ...@@ -299,7 +307,8 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
/* Skip taking the lock when safe */ /* Skip taking the lock when safe */
if (abs(count + amount) <= batch && if (abs(count + amount) <= batch &&
fbc->count + unknown <= limit) { ((amount > 0 && fbc->count + unknown <= limit) ||
(amount < 0 && fbc->count - unknown >= limit))) {
this_cpu_add(*fbc->counters, amount); this_cpu_add(*fbc->counters, amount);
local_irq_restore(flags); local_irq_restore(flags);
return true; return true;
...@@ -309,7 +318,19 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, ...@@ -309,7 +318,19 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
count = fbc->count + amount; count = fbc->count + amount;
/* Skip percpu_counter_sum() when safe */ /* Skip percpu_counter_sum() when safe */
if (count + unknown > limit) { if (amount > 0) {
if (count - unknown > limit)
goto out;
if (count + unknown <= limit)
good = true;
} else {
if (count + unknown < limit)
goto out;
if (count - unknown >= limit)
good = true;
}
if (!good) {
s32 *pcount; s32 *pcount;
int cpu; int cpu;
...@@ -317,15 +338,20 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, ...@@ -317,15 +338,20 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc,
pcount = per_cpu_ptr(fbc->counters, cpu); pcount = per_cpu_ptr(fbc->counters, cpu);
count += *pcount; count += *pcount;
} }
if (amount > 0) {
if (count > limit)
goto out;
} else {
if (count < limit)
goto out;
}
good = true;
} }
good = count <= limit; count = __this_cpu_read(*fbc->counters);
if (good) { fbc->count += count + amount;
count = __this_cpu_read(*fbc->counters); __this_cpu_sub(*fbc->counters, count);
fbc->count += count + amount; out:
__this_cpu_sub(*fbc->counters, count);
}
raw_spin_unlock(&fbc->lock); raw_spin_unlock(&fbc->lock);
local_irq_restore(flags); local_irq_restore(flags);
return good; return good;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment