Commit b6aaba5b authored by Jeff Layton's avatar Jeff Layton Committed by Christian Brauner

filelock: convert fl_blocker to file_lock_core

Both locks and leases deal with fl_blocker. Switch the fl_blocker
pointer in struct file_lock_core to point to the file_lock_core of the
blocker instead of a file_lock structure.
Signed-off-by: default avatarJeff Layton <jlayton@kernel.org>
Link: https://lore.kernel.org/r/20240131-flsplit-v3-26-c6129007ee8d@kernel.orgReviewed-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarChristian Brauner <brauner@kernel.org>
parent b6be3714
......@@ -400,7 +400,7 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
/*
* As ctx->flc_lock is held, new requests cannot be added to
* ->fl_blocked_requests, so we don't need a lock to check if it
* ->flc_blocked_requests, so we don't need a lock to check if it
* is empty.
*/
if (list_empty(&fl->c.flc_blocked_requests))
......@@ -410,7 +410,7 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
&new->c.flc_blocked_requests);
list_for_each_entry(f, &new->c.flc_blocked_requests,
c.flc_blocked_member)
f->c.flc_blocker = new;
f->c.flc_blocker = &new->c;
spin_unlock(&blocked_lock_lock);
}
......@@ -773,7 +773,7 @@ static void __locks_insert_block(struct file_lock *blocker_fl,
blocker = flc;
goto new_blocker;
}
waiter->flc_blocker = file_lock(blocker);
waiter->flc_blocker = blocker;
list_add_tail(&waiter->flc_blocked_member,
&blocker->flc_blocked_requests);
......@@ -996,7 +996,7 @@ static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *b
hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) {
if (posix_same_owner(flc, blocker)) {
while (flc->flc_blocker)
flc = &flc->flc_blocker->c;
flc = flc->flc_blocker;
return flc;
}
}
......@@ -2798,9 +2798,9 @@ static struct file_lock *get_next_blocked_member(struct file_lock *node)
/* Next member in the linked list could be itself */
tmp = list_next_entry(node, c.flc_blocked_member);
if (list_entry_is_head(tmp, &node->c.flc_blocker->c.flc_blocked_requests,
c.flc_blocked_member)
|| tmp == node) {
if (list_entry_is_head(tmp, &node->c.flc_blocker->flc_blocked_requests,
c.flc_blocked_member)
|| tmp == node) {
return NULL;
}
......@@ -2841,7 +2841,7 @@ static int locks_show(struct seq_file *f, void *v)
tmp = get_next_blocked_member(cur);
/* Fall back to parent node */
while (tmp == NULL && cur->c.flc_blocker != NULL) {
cur = cur->c.flc_blocker;
cur = file_lock(cur->c.flc_blocker);
level--;
tmp = get_next_blocked_member(cur);
}
......
......@@ -87,7 +87,7 @@ bool opens_in_grace(struct net *);
*/
struct file_lock_core {
struct file_lock *flc_blocker; /* The lock that is blocking us */
struct file_lock_core *flc_blocker; /* The lock that is blocking us */
struct list_head flc_list; /* link into file_lock_context */
struct hlist_node flc_link; /* node in global lists */
struct list_head flc_blocked_requests; /* list of requests with
......
......@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
__field(struct file_lock *, blocker)
__field(struct file_lock_core *, blocker)
__field(fl_owner_t, owner)
__field(unsigned int, pid)
__field(unsigned int, flags)
......@@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
__field(struct file_lock *, blocker)
__field(struct file_lock_core *, blocker)
__field(fl_owner_t, owner)
__field(unsigned int, flags)
__field(unsigned char, type)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment