Commit 2fcd9005 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

radix-tree: miscellaneous fixes

Typos, whitespace, grammar, line length, using the correct types, etc.
Signed-off-by: default avatarMatthew Wilcox <willy@linux.intel.com>
Reviewed-by: default avatarRoss Zwisler <ross.zwisler@linux.intel.com>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Jan Kara <jack@suse.com>
Cc: Neil Brown <neilb@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b76ba4af
...@@ -66,7 +66,7 @@ static struct kmem_cache *radix_tree_node_cachep; ...@@ -66,7 +66,7 @@ static struct kmem_cache *radix_tree_node_cachep;
* Per-cpu pool of preloaded nodes * Per-cpu pool of preloaded nodes
*/ */
struct radix_tree_preload { struct radix_tree_preload {
int nr; unsigned nr;
/* nodes->private_data points to next preallocated node */ /* nodes->private_data points to next preallocated node */
struct radix_tree_node *nodes; struct radix_tree_node *nodes;
}; };
...@@ -147,7 +147,7 @@ static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) ...@@ -147,7 +147,7 @@ static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
} }
static inline void root_tag_clear(struct radix_tree_root *root, unsigned int tag) static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
{ {
root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
} }
...@@ -159,7 +159,7 @@ static inline void root_tag_clear_all(struct radix_tree_root *root) ...@@ -159,7 +159,7 @@ static inline void root_tag_clear_all(struct radix_tree_root *root)
static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
{ {
return (__force unsigned)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
} }
static inline unsigned root_tags_get(struct radix_tree_root *root) static inline unsigned root_tags_get(struct radix_tree_root *root)
...@@ -173,7 +173,7 @@ static inline unsigned root_tags_get(struct radix_tree_root *root) ...@@ -173,7 +173,7 @@ static inline unsigned root_tags_get(struct radix_tree_root *root)
*/ */
static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
{ {
int idx; unsigned idx;
for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
if (node->tags[tag][idx]) if (node->tags[tag][idx])
return 1; return 1;
...@@ -273,9 +273,9 @@ radix_tree_node_alloc(struct radix_tree_root *root) ...@@ -273,9 +273,9 @@ radix_tree_node_alloc(struct radix_tree_root *root)
gfp_t gfp_mask = root_gfp_mask(root); gfp_t gfp_mask = root_gfp_mask(root);
/* /*
* Preload code isn't irq safe and it doesn't make sence to use * Preload code isn't irq safe and it doesn't make sense to use
* preloading in the interrupt anyway as all the allocations have to * preloading during an interrupt anyway as all the allocations have
* be atomic. So just do normal allocation when in interrupt. * to be atomic. So just do normal allocation when in interrupt.
*/ */
if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
struct radix_tree_preload *rtp; struct radix_tree_preload *rtp;
...@@ -448,7 +448,6 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root, ...@@ -448,7 +448,6 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root,
static int radix_tree_extend(struct radix_tree_root *root, static int radix_tree_extend(struct radix_tree_root *root,
unsigned long index) unsigned long index)
{ {
struct radix_tree_node *node;
struct radix_tree_node *slot; struct radix_tree_node *slot;
unsigned int height; unsigned int height;
int tag; int tag;
...@@ -465,7 +464,9 @@ static int radix_tree_extend(struct radix_tree_root *root, ...@@ -465,7 +464,9 @@ static int radix_tree_extend(struct radix_tree_root *root,
do { do {
unsigned int newheight; unsigned int newheight;
if (!(node = radix_tree_node_alloc(root))) struct radix_tree_node *node = radix_tree_node_alloc(root);
if (!node)
return -ENOMEM; return -ENOMEM;
/* Propagate the aggregated tag info into the new root */ /* Propagate the aggregated tag info into the new root */
...@@ -542,7 +543,8 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, ...@@ -542,7 +543,8 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
while (shift > order) { while (shift > order) {
if (slot == NULL) { if (slot == NULL) {
/* Have to add a child node. */ /* Have to add a child node. */
if (!(slot = radix_tree_node_alloc(root))) slot = radix_tree_node_alloc(root);
if (!slot)
return -ENOMEM; return -ENOMEM;
slot->path = height; slot->path = height;
slot->parent = node; slot->parent = node;
...@@ -770,8 +772,8 @@ EXPORT_SYMBOL(radix_tree_tag_set); ...@@ -770,8 +772,8 @@ EXPORT_SYMBOL(radix_tree_tag_set);
* @tag: tag index * @tag: tag index
* *
* Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
* corresponding to @index in the radix tree. If * corresponding to @index in the radix tree. If this causes
* this causes the leaf node to have no tags set then clear the tag in the * the leaf node to have no tags set then clear the tag in the
* next-to-leaf node, etc. * next-to-leaf node, etc.
* *
* Returns the address of the tagged item on success, else NULL. ie: * Returns the address of the tagged item on success, else NULL. ie:
...@@ -1035,7 +1037,7 @@ EXPORT_SYMBOL(radix_tree_next_chunk); ...@@ -1035,7 +1037,7 @@ EXPORT_SYMBOL(radix_tree_next_chunk);
* set is outside the range we are scanning. This reults in dangling tags and * set is outside the range we are scanning. This reults in dangling tags and
* can lead to problems with later tag operations (e.g. livelocks on lookups). * can lead to problems with later tag operations (e.g. livelocks on lookups).
* *
* The function returns number of leaves where the tag was set and sets * The function returns the number of leaves where the tag was set and sets
* *first_indexp to the first unscanned index. * *first_indexp to the first unscanned index.
* WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
* be prepared to handle that. * be prepared to handle that.
...@@ -1153,9 +1155,10 @@ EXPORT_SYMBOL(radix_tree_range_tag_if_tagged); ...@@ -1153,9 +1155,10 @@ EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
* *
* Like radix_tree_lookup, radix_tree_gang_lookup may be called under * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
* rcu_read_lock. In this case, rather than the returned results being * rcu_read_lock. In this case, rather than the returned results being
* an atomic snapshot of the tree at a single point in time, the semantics * an atomic snapshot of the tree at a single point in time, the
* of an RCU protected gang lookup are as though multiple radix_tree_lookups * semantics of an RCU protected gang lookup are as though multiple
* have been issued in individual locks, and results stored in 'results'. * radix_tree_lookups have been issued in individual locks, and results
* stored in 'results'.
*/ */
unsigned int unsigned int
radix_tree_gang_lookup(struct radix_tree_root *root, void **results, radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
...@@ -1460,7 +1463,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) ...@@ -1460,7 +1463,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
* their slot to become empty sooner or later. * their slot to become empty sooner or later.
* *
* For example, lockless pagecache will look up a slot, deref * For example, lockless pagecache will look up a slot, deref
* the page pointer, and if the page is 0 refcount it means it * the page pointer, and if the page has 0 refcount it means it
* was concurrently deleted from pagecache so try the deref * was concurrently deleted from pagecache so try the deref
* again. Fortunately there is already a requirement for logic * again. Fortunately there is already a requirement for logic
* to retry the entire slot lookup -- the indirect pointer * to retry the entire slot lookup -- the indirect pointer
...@@ -1649,14 +1652,13 @@ static __init void radix_tree_init_maxindex(void) ...@@ -1649,14 +1652,13 @@ static __init void radix_tree_init_maxindex(void)
} }
static int radix_tree_callback(struct notifier_block *nfb, static int radix_tree_callback(struct notifier_block *nfb,
unsigned long action, unsigned long action, void *hcpu)
void *hcpu)
{ {
int cpu = (long)hcpu; int cpu = (long)hcpu;
struct radix_tree_preload *rtp; struct radix_tree_preload *rtp;
struct radix_tree_node *node; struct radix_tree_node *node;
/* Free per-cpu pool of perloaded nodes */ /* Free per-cpu pool of preloaded nodes */
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
rtp = &per_cpu(radix_tree_preloads, cpu); rtp = &per_cpu(radix_tree_preloads, cpu);
while (rtp->nr) { while (rtp->nr) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment