Commit 8e7a7362 authored by James Simmons's avatar James Simmons Committed by Greg Kroah-Hartman

staging: lustre: header: remove assert from interval_set()

In the case of interval_tree.h only interval_set()
uses LASSERT which is removed in this patch and
interval_set() instead reports a real error. The
header libcfs.h for interval_tree.h is not needed
anymore so we can just use the standard linux
kernel headers instead.h
Signed-off-by: default avatarJames Simmons <uja.ornl@yahoo.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6401
Reviewed-on: https://review.whamcloud.com/22522
Reviewed-on: https://review.whamcloud.com/24323Reviewed-by: default avatarFrank Zago <fzago@cray.com>
Reviewed-by: default avatarDmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 098b325b
...@@ -36,7 +36,9 @@ ...@@ -36,7 +36,9 @@
#ifndef _INTERVAL_H__ #ifndef _INTERVAL_H__
#define _INTERVAL_H__ #define _INTERVAL_H__
#include "../../include/linux/libcfs/libcfs.h" /* LASSERT. */ #include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
struct interval_node { struct interval_node {
struct interval_node *in_left; struct interval_node *in_left;
...@@ -73,13 +75,15 @@ static inline __u64 interval_high(struct interval_node *node) ...@@ -73,13 +75,15 @@ static inline __u64 interval_high(struct interval_node *node)
return node->in_extent.end; return node->in_extent.end;
} }
static inline void interval_set(struct interval_node *node, static inline int interval_set(struct interval_node *node,
__u64 start, __u64 end) __u64 start, __u64 end)
{ {
LASSERT(start <= end); if (start > end)
return -ERANGE;
node->in_extent.start = start; node->in_extent.start = start;
node->in_extent.end = end; node->in_extent.end = end;
node->in_max_high = end; node->in_max_high = end;
return 0;
} }
/* /*
......
...@@ -162,7 +162,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, ...@@ -162,7 +162,7 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
struct interval_node *found, **root; struct interval_node *found, **root;
struct ldlm_interval *node; struct ldlm_interval *node;
struct ldlm_extent *extent; struct ldlm_extent *extent;
int idx; int idx, rc;
LASSERT(lock->l_granted_mode == lock->l_req_mode); LASSERT(lock->l_granted_mode == lock->l_req_mode);
...@@ -176,7 +176,8 @@ void ldlm_extent_add_lock(struct ldlm_resource *res, ...@@ -176,7 +176,8 @@ void ldlm_extent_add_lock(struct ldlm_resource *res,
/* node extent initialize */ /* node extent initialize */
extent = &lock->l_policy_data.l_extent; extent = &lock->l_policy_data.l_extent;
interval_set(&node->li_node, extent->start, extent->end); rc = interval_set(&node->li_node, extent->start, extent->end);
LASSERT(!rc);
root = &res->lr_itree[idx].lit_root; root = &res->lr_itree[idx].lit_root;
found = interval_insert(&node->li_node, root); found = interval_insert(&node->li_node, root);
......
...@@ -61,17 +61,23 @@ void range_lock_tree_init(struct range_lock_tree *tree) ...@@ -61,17 +61,23 @@ void range_lock_tree_init(struct range_lock_tree *tree)
* Pre: Caller should have allocated the range lock node. * Pre: Caller should have allocated the range lock node.
* Post: The range lock node is meant to cover [start, end] region * Post: The range lock node is meant to cover [start, end] region
*/ */
void range_lock_init(struct range_lock *lock, __u64 start, __u64 end) int range_lock_init(struct range_lock *lock, __u64 start, __u64 end)
{ {
int rc;
memset(&lock->rl_node, 0, sizeof(lock->rl_node)); memset(&lock->rl_node, 0, sizeof(lock->rl_node));
if (end != LUSTRE_EOF) if (end != LUSTRE_EOF)
end >>= PAGE_SHIFT; end >>= PAGE_SHIFT;
interval_set(&lock->rl_node, start >> PAGE_SHIFT, end); rc = interval_set(&lock->rl_node, start >> PAGE_SHIFT, end);
if (rc)
return rc;
INIT_LIST_HEAD(&lock->rl_next_lock); INIT_LIST_HEAD(&lock->rl_next_lock);
lock->rl_task = NULL; lock->rl_task = NULL;
lock->rl_lock_count = 0; lock->rl_lock_count = 0;
lock->rl_blocking_ranges = 0; lock->rl_blocking_ranges = 0;
lock->rl_sequence = 0; lock->rl_sequence = 0;
return rc;
} }
static inline struct range_lock *next_lock(struct range_lock *lock) static inline struct range_lock *next_lock(struct range_lock *lock)
......
...@@ -76,7 +76,7 @@ struct range_lock_tree { ...@@ -76,7 +76,7 @@ struct range_lock_tree {
}; };
void range_lock_tree_init(struct range_lock_tree *tree); void range_lock_tree_init(struct range_lock_tree *tree);
void range_lock_init(struct range_lock *lock, __u64 start, __u64 end); int range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
int range_lock(struct range_lock_tree *tree, struct range_lock *lock); int range_lock(struct range_lock_tree *tree, struct range_lock *lock);
void range_unlock(struct range_lock_tree *tree, struct range_lock *lock); void range_unlock(struct range_lock_tree *tree, struct range_lock *lock);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment