Commit d3eb004b authored by David Woodhouse's avatar David Woodhouse

Merge infradead.org:/home/torvalds/BK/linux-2.5

into infradead.org:/home/dwmw2/BK/rbtree-2.5
parents 0e9387ab 972d3674
......@@ -54,7 +54,7 @@ struct vm_area_struct {
pgprot_t vm_page_prot; /* Access permissions of this VMA. */
unsigned long vm_flags; /* Flags, listed below. */
rb_node_t vm_rb;
struct rb_node vm_rb;
/*
* For areas with an address space and backing store,
......
......@@ -34,7 +34,7 @@
static inline struct page * rb_search_page_cache(struct inode * inode,
unsigned long offset)
{
rb_node_t * n = inode->i_rb_page_cache.rb_node;
struct rb_node * n = inode->i_rb_page_cache.rb_node;
struct page * page;
while (n)
......@@ -53,10 +53,10 @@ static inline struct page * rb_search_page_cache(struct inode * inode,
static inline struct page * __rb_insert_page_cache(struct inode * inode,
unsigned long offset,
rb_node_t * node)
struct rb_node * node)
{
rb_node_t ** p = &inode->i_rb_page_cache.rb_node;
rb_node_t * parent = NULL;
struct rb_node ** p = &inode->i_rb_page_cache.rb_node;
struct rb_node * parent = NULL;
struct page * page;
while (*p)
......@@ -79,7 +79,7 @@ static inline struct page * __rb_insert_page_cache(struct inode * inode,
static inline struct page * rb_insert_page_cache(struct inode * inode,
unsigned long offset,
rb_node_t * node)
struct rb_node * node)
{
struct page * ret;
if ((ret = __rb_insert_page_cache(inode, offset, node)))
......@@ -97,31 +97,38 @@ static inline struct page * rb_insert_page_cache(struct inode * inode,
#include <linux/kernel.h>
#include <linux/stddef.h>
typedef struct rb_node_s
struct rb_node
{
struct rb_node_s * rb_parent;
struct rb_node *rb_parent;
int rb_color;
#define RB_RED 0
#define RB_BLACK 1
struct rb_node_s * rb_right;
struct rb_node_s * rb_left;
}
rb_node_t;
struct rb_node *rb_right;
struct rb_node *rb_left;
};
typedef struct rb_root_s
struct rb_root
{
struct rb_node_s * rb_node;
}
rb_root_t;
struct rb_node *rb_node;
};
#define RB_ROOT (rb_root_t) { NULL, }
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) \
((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))
extern void rb_insert_color(rb_node_t *, rb_root_t *);
extern void rb_erase(rb_node_t *, rb_root_t *);
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(struct rb_node *);
extern struct rb_node *rb_prev(struct rb_node *);
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
static inline void rb_link_node(rb_node_t * node, rb_node_t * parent, rb_node_t ** rb_link)
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
struct rb_node ** rb_link)
{
node->rb_parent = parent;
node->rb_color = RB_RED;
......
......@@ -172,7 +172,7 @@ struct namespace;
struct kioctx;
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
rb_root_t mm_rb;
struct rb_root mm_rb;
struct vm_area_struct * mmap_cache; /* last find_vma result */
pgd_t * pgd;
atomic_t mm_users; /* How many users with user space? */
......
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
(C) 2002 David Woodhouse <dwmw2@infradead.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
......@@ -22,9 +23,9 @@
#include <linux/rbtree.h>
#include <linux/module.h>
static void __rb_rotate_left(rb_node_t * node, rb_root_t * root)
static void __rb_rotate_left(struct rb_node *node, struct rb_root *root)
{
rb_node_t * right = node->rb_right;
struct rb_node *right = node->rb_right;
if ((node->rb_right = right->rb_left))
right->rb_left->rb_parent = node;
......@@ -42,9 +43,9 @@ static void __rb_rotate_left(rb_node_t * node, rb_root_t * root)
node->rb_parent = right;
}
static void __rb_rotate_right(rb_node_t * node, rb_root_t * root)
static void __rb_rotate_right(struct rb_node *node, struct rb_root *root)
{
rb_node_t * left = node->rb_left;
struct rb_node *left = node->rb_left;
if ((node->rb_left = left->rb_right))
left->rb_right->rb_parent = node;
......@@ -62,9 +63,9 @@ static void __rb_rotate_right(rb_node_t * node, rb_root_t * root)
node->rb_parent = left;
}
void rb_insert_color(rb_node_t * node, rb_root_t * root)
void rb_insert_color(struct rb_node *node, struct rb_root *root)
{
rb_node_t * parent, * gparent;
struct rb_node *parent, *gparent;
while ((parent = node->rb_parent) && parent->rb_color == RB_RED)
{
......@@ -73,7 +74,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
if (parent == gparent->rb_left)
{
{
register rb_node_t * uncle = gparent->rb_right;
register struct rb_node *uncle = gparent->rb_right;
if (uncle && uncle->rb_color == RB_RED)
{
uncle->rb_color = RB_BLACK;
......@@ -86,7 +87,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
if (parent->rb_right == node)
{
register rb_node_t * tmp;
register struct rb_node *tmp;
__rb_rotate_left(parent, root);
tmp = parent;
parent = node;
......@@ -98,7 +99,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
__rb_rotate_right(gparent, root);
} else {
{
register rb_node_t * uncle = gparent->rb_left;
register struct rb_node *uncle = gparent->rb_left;
if (uncle && uncle->rb_color == RB_RED)
{
uncle->rb_color = RB_BLACK;
......@@ -111,7 +112,7 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
if (parent->rb_left == node)
{
register rb_node_t * tmp;
register struct rb_node *tmp;
__rb_rotate_right(parent, root);
tmp = parent;
parent = node;
......@@ -128,10 +129,10 @@ void rb_insert_color(rb_node_t * node, rb_root_t * root)
}
EXPORT_SYMBOL(rb_insert_color);
static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
rb_root_t * root)
static void __rb_erase_color(struct rb_node *node, struct rb_node *parent,
struct rb_root *root)
{
rb_node_t * other;
struct rb_node *other;
while ((!node || node->rb_color == RB_BLACK) && node != root->rb_node)
{
......@@ -159,7 +160,7 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
if (!other->rb_right ||
other->rb_right->rb_color == RB_BLACK)
{
register rb_node_t * o_left;
register struct rb_node *o_left;
if ((o_left = other->rb_left))
o_left->rb_color = RB_BLACK;
other->rb_color = RB_RED;
......@@ -199,7 +200,7 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
if (!other->rb_left ||
other->rb_left->rb_color == RB_BLACK)
{
register rb_node_t * o_right;
register struct rb_node *o_right;
if ((o_right = other->rb_right))
o_right->rb_color = RB_BLACK;
other->rb_color = RB_RED;
......@@ -220,9 +221,9 @@ static void __rb_erase_color(rb_node_t * node, rb_node_t * parent,
node->rb_color = RB_BLACK;
}
void rb_erase(rb_node_t * node, rb_root_t * root)
void rb_erase(struct rb_node *node, struct rb_root *root)
{
rb_node_t * child, * parent;
struct rb_node *child, *parent;
int color;
if (!node->rb_left)
......@@ -231,7 +232,7 @@ void rb_erase(rb_node_t * node, rb_root_t * root)
child = node->rb_left;
else
{
rb_node_t * old = node, * left;
struct rb_node *old = node, *left;
node = node->rb_right;
while ((left = node->rb_left))
......@@ -294,3 +295,71 @@ void rb_erase(rb_node_t * node, rb_root_t * root)
__rb_erase_color(child, parent, root);
}
EXPORT_SYMBOL(rb_erase);
struct rb_node *rb_next(struct rb_node *node)
{
/* If we have a right-hand child, go down and then left as far
as we can. */
if (node->rb_right) {
node = node->rb_right;
while (node->rb_left)
node=node->rb_left;
return node;
}
/* No right-hand children. Everything down and left is
smaller than us, so any 'next' node must be in the general
direction of our parent. Go up the tree; any time the
ancestor is a right-hand child of its parent, keep going
up. First time it's a left-hand child of its parent, said
parent is our 'next' node. */
while (node->rb_parent && node == node->rb_parent->rb_right)
node = node->rb_parent;
return node->rb_parent;
}
EXPORT_SYMBOL(rb_next);
struct rb_node *rb_prev(struct rb_node *node)
{
/* If we have a left-hand child, go down and then right as far
as we can. */
if (node->rb_left) {
node = node->rb_left;
while (node->rb_right)
node=node->rb_right;
return node;
}
/* No left-hand children. Go up till we find an ancestor which
is a right-hand child of its parent */
while (node->rb_parent && node == node->rb_parent->rb_left)
node = node->rb_parent;
return node->rb_parent;
}
EXPORT_SYMBOL(rb_prev);
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root)
{
struct rb_node *parent = victim->rb_parent;
/* Set the surrounding nodes to point to the replacement */
if (parent) {
if (victim == parent->rb_left)
parent->rb_left = new;
else
parent->rb_right = new;
} else {
root->rb_node = new;
}
if (victim->rb_left)
victim->rb_left->rb_parent = new;
if (victim->rb_right)
victim->rb_right->rb_parent = new;
/* Copy the pointers/colour from the victim to the replacement */
*new = *victim;
}
EXPORT_SYMBOL(rb_replace_node);
......@@ -245,7 +245,7 @@ static inline unsigned long calc_vm_flags(unsigned long prot, unsigned long flag
}
#ifdef DEBUG_MM_RB
static int browse_rb(rb_node_t * rb_node) {
static int browse_rb(struct rb_node * rb_node) {
int i = 0;
if (rb_node) {
i++;
......@@ -277,10 +277,11 @@ static void validate_mm(struct mm_struct * mm) {
static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct ** pprev,
rb_node_t *** rb_link, rb_node_t ** rb_parent)
struct rb_node *** rb_link,
struct rb_node ** rb_parent)
{
struct vm_area_struct * vma;
rb_node_t ** __rb_link, * __rb_parent, * rb_prev;
struct rb_node ** __rb_link, * __rb_parent, * rb_prev;
__rb_link = &mm->mm_rb.rb_node;
rb_prev = __rb_parent = NULL;
......@@ -311,8 +312,8 @@ static struct vm_area_struct * find_vma_prepare(struct mm_struct * mm, unsigned
return vma;
}
static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
rb_node_t * rb_parent)
static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct * vma,
struct vm_area_struct * prev, struct rb_node * rb_parent)
{
if (prev) {
vma->vm_next = prev->vm_next;
......@@ -327,7 +328,7 @@ static inline void __vma_link_list(struct mm_struct * mm, struct vm_area_struct
}
static inline void __vma_link_rb(struct mm_struct * mm, struct vm_area_struct * vma,
rb_node_t ** rb_link, rb_node_t * rb_parent)
struct rb_node ** rb_link, struct rb_node * rb_parent)
{
rb_link_node(&vma->vm_rb, rb_parent, rb_link);
rb_insert_color(&vma->vm_rb, &mm->mm_rb);
......@@ -353,7 +354,7 @@ static inline void __vma_link_file(struct vm_area_struct * vma)
}
static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
rb_node_t ** rb_link, rb_node_t * rb_parent)
struct rb_node ** rb_link, struct rb_node * rb_parent)
{
__vma_link_list(mm, vma, prev, rb_parent);
__vma_link_rb(mm, vma, rb_link, rb_parent);
......@@ -361,7 +362,7 @@ static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, stru
}
static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev,
rb_node_t ** rb_link, rb_node_t * rb_parent)
struct rb_node ** rb_link, struct rb_node * rb_parent)
{
spin_lock(&mm->page_table_lock);
lock_vma_mappings(vma);
......@@ -374,7 +375,8 @@ static inline void vma_link(struct mm_struct * mm, struct vm_area_struct * vma,
}
static int vma_merge(struct mm_struct * mm, struct vm_area_struct * prev,
rb_node_t * rb_parent, unsigned long addr, unsigned long end, unsigned long vm_flags)
struct rb_node * rb_parent, unsigned long addr,
unsigned long end, unsigned long vm_flags)
{
spinlock_t * lock = &mm->page_table_lock;
if (!prev) {
......@@ -426,7 +428,7 @@ unsigned long do_mmap_pgoff(struct file * file, unsigned long addr,
unsigned int vm_flags;
int correct_wcount = 0;
int error;
rb_node_t ** rb_link, * rb_parent;
struct rb_node ** rb_link, * rb_parent;
unsigned long charged = 0;
if (file && (!file->f_op || !file->f_op->mmap))
......@@ -698,7 +700,7 @@ struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
/* (Cache hit rate is typically around 35%.) */
vma = mm->mmap_cache;
if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
rb_node_t * rb_node;
struct rb_node * rb_node;
rb_node = mm->mm_rb.rb_node;
vma = NULL;
......@@ -728,7 +730,7 @@ struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev)
{
struct vm_area_struct *vma = NULL, *prev = NULL;
rb_node_t * rb_node;
struct rb_node * rb_node;
if (!mm)
goto out;
......@@ -1158,7 +1160,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
struct mm_struct * mm = current->mm;
struct vm_area_struct * vma, * prev;
unsigned long flags;
rb_node_t ** rb_link, * rb_parent;
struct rb_node ** rb_link, * rb_parent;
len = PAGE_ALIGN(len);
if (!len)
......@@ -1236,7 +1238,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
void build_mmap_rb(struct mm_struct * mm)
{
struct vm_area_struct * vma;
rb_node_t ** rb_link, * rb_parent;
struct rb_node ** rb_link, * rb_parent;
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
......@@ -1319,7 +1321,7 @@ void exit_mmap(struct mm_struct * mm)
void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
{
struct vm_area_struct * __vma, * prev;
rb_node_t ** rb_link, * rb_parent;
struct rb_node ** rb_link, * rb_parent;
__vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
if (__vma && __vma->vm_start < vma->vm_end)
......@@ -1332,7 +1334,7 @@ void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
void insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
{
struct vm_area_struct * __vma, * prev;
rb_node_t ** rb_link, * rb_parent;
struct rb_node ** rb_link, * rb_parent;
__vma = find_vma_prepare(mm, vma->vm_start, &prev, &rb_link, &rb_parent);
if (__vma && __vma->vm_start < vma->vm_end)
......
......@@ -162,12 +162,12 @@ struct htb_class
struct list_head drop_list;
} leaf;
struct htb_class_inner {
rb_root_t feed[TC_HTB_NUMPRIO]; /* feed trees */
rb_node_t *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
} inner;
} un;
rb_node_t node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
rb_node_t pq_node; /* node for event queue */
struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
struct rb_node pq_node; /* node for event queue */
unsigned long pq_key; /* the same type as jiffies global */
int prio_activity; /* for which prios are we active */
......@@ -207,12 +207,12 @@ struct htb_sched
struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
/* self list - roots of self generating tree */
rb_root_t row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
int row_mask[TC_HTB_MAXDEPTH];
rb_node_t *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
/* self wait list - roots of wait PQs per row */
rb_root_t wait_pq[TC_HTB_MAXDEPTH];
struct rb_root wait_pq[TC_HTB_MAXDEPTH];
/* time of nearest event per level (row) */
unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
......@@ -324,9 +324,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch)
}
#ifdef HTB_DEBUG
static void htb_next_rb_node(rb_node_t **n);
static void htb_next_rb_node(struct rb_node **n);
#define HTB_DUMTREE(root,memb) if(root) { \
rb_node_t *n = (root)->rb_node; \
struct rb_node *n = (root)->rb_node; \
while (n->rb_left) n = n->rb_left; \
while (n) { \
struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
......@@ -375,10 +375,10 @@ static void htb_debug_dump (struct htb_sched *q)
* Routine adds class to the list (actually tree) sorted by classid.
* Make sure that class is not already on such list for given prio.
*/
static void htb_add_to_id_tree (HTB_ARGQ rb_root_t *root,
static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root,
struct htb_class *cl,int prio)
{
rb_node_t **p = &root->rb_node, *parent = NULL;
struct rb_node **p = &root->rb_node, *parent = NULL;
HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
#ifdef HTB_DEBUG
if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
......@@ -411,7 +411,7 @@ static void htb_add_to_id_tree (HTB_ARGQ rb_root_t *root,
static void htb_add_to_wait_tree (struct htb_sched *q,
struct htb_class *cl,long delay,int debug_hint)
{
rb_node_t **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
#ifdef HTB_DEBUG
if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
......@@ -447,20 +447,9 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
* When we are past last key we return NULL.
* Average complexity is 2 steps per call.
*/
static void htb_next_rb_node(rb_node_t **n)
static void htb_next_rb_node(struct rb_node **n)
{
rb_node_t *p;
if ((*n)->rb_right) {
*n = (*n)->rb_right;
while ((*n)->rb_left)
*n = (*n)->rb_left;
return;
}
while ((p = (*n)->rb_parent) != NULL) {
if (p->rb_left == *n) break;
*n = p;
}
*n = p;
*n = rb_next(*n);
}
/**
......@@ -869,7 +858,7 @@ static long htb_do_events(struct htb_sched *q,int level)
for (i = 0; i < 500; i++) {
struct htb_class *cl;
long diff;
rb_node_t *p = q->wait_pq[level].rb_node;
struct rb_node *p = q->wait_pq[level].rb_node;
if (!p) return 0;
while (p->rb_left) p = p->rb_left;
......@@ -906,12 +895,12 @@ static long htb_do_events(struct htb_sched *q,int level)
* Find leaf where current feed pointers points to.
*/
static struct htb_class *
htb_lookup_leaf(rb_root_t *tree,int prio,rb_node_t **pptr)
htb_lookup_leaf(struct rb_root *tree,int prio,struct rb_node **pptr)
{
int i;
struct {
rb_node_t *root;
rb_node_t **pptr;
struct rb_node *root;
struct rb_node **pptr;
} stk[TC_HTB_MAXDEPTH],*sp = stk;
sp->root = tree->rb_node;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment