Commit 8adc8302 authored by Trond Myklebust's avatar Trond Myklebust Committed by Anna Schumaker

pNFS: Add a flag argument to pnfs_destroy_layouts_byclid()

Change the bool argument to a flag so that we can add different modes
for doing bulk destroy of a layout. In particular, we will want the
ability to schedule return of all the layouts associated with a given
NFS server when it reboots.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 5d2db089
......@@ -323,9 +323,10 @@ static u32 initiate_bulk_draining(struct nfs_client *clp,
int stat;
if (args->cbl_recall_type == RETURN_FSID)
stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
stat = pnfs_layout_destroy_byfsid(clp, &args->cbl_fsid,
PNFS_LAYOUT_BULK_RETURN);
else
stat = pnfs_destroy_layouts_byclid(clp, true);
stat = pnfs_layout_destroy_byclid(clp, PNFS_LAYOUT_BULK_RETURN);
if (stat != 0)
return NFS4ERR_DELAY;
return NFS4ERR_NOMATCHING_LAYOUT;
......
......@@ -868,7 +868,7 @@ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
static int
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
bool is_bulk_recall)
enum pnfs_layout_destroy_mode mode)
{
struct pnfs_layout_hdr *lo;
struct inode *inode;
......@@ -887,7 +887,7 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
spin_lock(&inode->i_lock);
list_del_init(&lo->plh_bulk_destroy);
if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
if (is_bulk_recall)
if (mode == PNFS_LAYOUT_BULK_RETURN)
set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
ret = -EAGAIN;
}
......@@ -901,10 +901,8 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
return ret;
}
int
pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
struct nfs_fsid *fsid,
bool is_recall)
int pnfs_layout_destroy_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid,
enum pnfs_layout_destroy_mode mode)
{
struct nfs_server *server;
LIST_HEAD(layout_list);
......@@ -923,12 +921,11 @@ pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
rcu_read_unlock();
spin_unlock(&clp->cl_lock);
return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
return pnfs_layout_free_bulk_destroy_list(&layout_list, mode);
}
int
pnfs_destroy_layouts_byclid(struct nfs_client *clp,
bool is_recall)
int pnfs_layout_destroy_byclid(struct nfs_client *clp,
enum pnfs_layout_destroy_mode mode)
{
struct nfs_server *server;
LIST_HEAD(layout_list);
......@@ -945,7 +942,7 @@ pnfs_destroy_layouts_byclid(struct nfs_client *clp,
rcu_read_unlock();
spin_unlock(&clp->cl_lock);
return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
return pnfs_layout_free_bulk_destroy_list(&layout_list, mode);
}
/*
......@@ -958,7 +955,7 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
nfs4_deviceid_mark_client_invalid(clp);
nfs4_deviceid_purge_client(clp);
pnfs_destroy_layouts_byclid(clp, false);
pnfs_layout_destroy_byclid(clp, PNFS_LAYOUT_INVALIDATE);
}
static void
......
......@@ -118,6 +118,11 @@ enum layoutdriver_policy_flags {
PNFS_LAYOUTGET_ON_OPEN = 1 << 3,
};
enum pnfs_layout_destroy_mode {
PNFS_LAYOUT_INVALIDATE = 0,
PNFS_LAYOUT_BULK_RETURN,
};
struct nfs4_deviceid_node;
/* Per-layout driver specific registration structure */
......@@ -273,11 +278,10 @@ void pnfs_free_lseg_list(struct list_head *tmp_list);
void pnfs_destroy_layout(struct nfs_inode *);
void pnfs_destroy_layout_final(struct nfs_inode *);
void pnfs_destroy_all_layouts(struct nfs_client *);
int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
struct nfs_fsid *fsid,
bool is_recall);
int pnfs_destroy_layouts_byclid(struct nfs_client *clp,
bool is_recall);
int pnfs_layout_destroy_byfsid(struct nfs_client *clp, struct nfs_fsid *fsid,
enum pnfs_layout_destroy_mode mode);
int pnfs_layout_destroy_byclid(struct nfs_client *clp,
enum pnfs_layout_destroy_mode mode);
bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
struct pnfs_layout_range *dst_range,
struct inode *inode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment