Commit 8f18c8a4 authored by wang di's avatar wang di Committed by Greg Kroah-Hartman

staging: lustre: lmv: separate master object with master stripe

Separate master stripe with master object, so
1. stripeEA only exists on master object.
2. sub-stripe object will be inserted into master object
as sub-directory, and it can get the master object by "..".

By this, it will remove those specilities for stripe0 in
LMV and LOD. And also simplify LFSCK, i.e. consistency check
would be easier.

When then master object becomes an orphan, we should
mark all of its sub-stripes as dead object as well,
otherwise client might still be able to create files
under these stripes.

A few fixes for striped directory layout lock:

 1. stripe 0 should be locked as EX, same as other stripes.
 2. Acquire the layout for directory, when it is being unliked.
Signed-off-by: default avatarwang di <di.wang@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-4690
Reviewed-on: http://review.whamcloud.com/9511Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Reviewed-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 31c5e95e
...@@ -2497,18 +2497,52 @@ struct lmv_desc { ...@@ -2497,18 +2497,52 @@ struct lmv_desc {
struct obd_uuid ld_uuid; struct obd_uuid ld_uuid;
}; };
/* lmv structures */ /* LMV layout EA, and it will be stored both in master and slave object */
#define LMV_MAGIC_V1 0x0CD10CD0 /* normal stripe lmv magic */ struct lmv_mds_md_v1 {
#define LMV_USER_MAGIC 0x0CD20CD0 /* default lmv magic*/ __u32 lmv_magic;
#define LMV_MAGIC_MIGRATE 0x0CD30CD0 /* migrate stripe lmv magic */ __u32 lmv_stripe_count;
#define LMV_MAGIC LMV_MAGIC_V1 __u32 lmv_master_mdt_index; /* On master object, it is master
* MDT index, on slave object, it
* is stripe index of the slave obj
*/
__u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
* which hash function to be used,
* Note: only lower 16 bits is being
* used for now. Higher 16 bits will
* be used to mark the object status,
* for example migrating or dead.
*/
__u32 lmv_layout_version; /* Used for directory restriping */
__u32 lmv_padding;
struct lu_fid lmv_master_fid; /* The FID of the master object, which
* is the namespace-visible dir FID
*/
char lmv_pool_name[LOV_MAXPOOLNAME]; /* pool name */
struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
};
#define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */
#define LMV_MAGIC LMV_MAGIC_V1
/* #define LMV_USER_MAGIC 0x0CD30CD0 */
#define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */
/*
*Right now only the lower part(0-16bits) of lmv_hash_type is being used,
* and the higher part will be the flag to indicate the status of object,
* for example the object is being migrated. And the hash function
* might be interpreted differently with different flags.
*/
enum lmv_hash_type { enum lmv_hash_type {
LMV_HASH_TYPE_ALL_CHARS = 1, LMV_HASH_TYPE_ALL_CHARS = 1,
LMV_HASH_TYPE_FNV_1A_64 = 2, LMV_HASH_TYPE_FNV_1A_64 = 2,
LMV_HASH_TYPE_MIGRATION = 3,
}; };
#define LMV_HASH_TYPE_MASK 0x0000ffff
#define LMV_HASH_FLAG_MIGRATION 0x80000000
#define LMV_HASH_FLAG_DEAD 0x40000000
#define LMV_HASH_NAME_ALL_CHARS "all_char" #define LMV_HASH_NAME_ALL_CHARS "all_char"
#define LMV_HASH_NAME_FNV_1A_64 "fnv_1a_64" #define LMV_HASH_NAME_FNV_1A_64 "fnv_1a_64"
...@@ -2540,19 +2574,6 @@ static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size) ...@@ -2540,19 +2574,6 @@ static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
return hash; return hash;
} }
struct lmv_mds_md_v1 {
__u32 lmv_magic;
__u32 lmv_stripe_count; /* stripe count */
__u32 lmv_master_mdt_index; /* master MDT index */
__u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
* which hash function to be used
*/
__u32 lmv_layout_version; /* Used for directory restriping */
__u32 lmv_padding;
char lmv_pool_name[LOV_MAXPOOLNAME]; /* pool name */
struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
};
union lmv_mds_md { union lmv_mds_md {
__u32 lmv_magic; __u32 lmv_magic;
struct lmv_mds_md_v1 lmv_md_v1; struct lmv_mds_md_v1 lmv_md_v1;
...@@ -2566,8 +2587,7 @@ static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic) ...@@ -2566,8 +2587,7 @@ static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
ssize_t len = -EINVAL; ssize_t len = -EINVAL;
switch (lmm_magic) { switch (lmm_magic) {
case LMV_MAGIC_V1: case LMV_MAGIC_V1: {
case LMV_MAGIC_MIGRATE: {
struct lmv_mds_md_v1 *lmm1; struct lmv_mds_md_v1 *lmm1;
len = sizeof(*lmm1); len = sizeof(*lmm1);
...@@ -2583,7 +2603,6 @@ static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm) ...@@ -2583,7 +2603,6 @@ static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
{ {
switch (le32_to_cpu(lmm->lmv_magic)) { switch (le32_to_cpu(lmm->lmv_magic)) {
case LMV_MAGIC_V1: case LMV_MAGIC_V1:
case LMV_MAGIC_MIGRATE:
return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count); return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
case LMV_USER_MAGIC: case LMV_USER_MAGIC:
return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count); return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
...@@ -2599,7 +2618,6 @@ static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm, ...@@ -2599,7 +2618,6 @@ static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm,
switch (le32_to_cpu(lmm->lmv_magic)) { switch (le32_to_cpu(lmm->lmv_magic)) {
case LMV_MAGIC_V1: case LMV_MAGIC_V1:
case LMV_MAGIC_MIGRATE:
lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count); lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count);
break; break;
case LMV_USER_MAGIC: case LMV_USER_MAGIC:
......
...@@ -269,8 +269,7 @@ struct ost_id { ...@@ -269,8 +269,7 @@ struct ost_id {
#define LOV_USER_MAGIC_JOIN_V1 0x0BD20BD0 #define LOV_USER_MAGIC_JOIN_V1 0x0BD20BD0
#define LOV_USER_MAGIC_V3 0x0BD30BD0 #define LOV_USER_MAGIC_V3 0x0BD30BD0
#define LMV_MAGIC_V1 0x0CD10CD0 /*normal stripe lmv magic */ #define LMV_USER_MAGIC 0x0CD30CD0 /*default lmv magic*/
#define LMV_USER_MAGIC 0x0CD20CD0 /*default lmv magic*/
#define LOV_PATTERN_RAID0 0x001 #define LOV_PATTERN_RAID0 0x001
#define LOV_PATTERN_RAID1 0x002 #define LOV_PATTERN_RAID1 0x002
......
...@@ -48,10 +48,33 @@ struct lmv_stripe_md { ...@@ -48,10 +48,33 @@ struct lmv_stripe_md {
__u32 lsm_md_layout_version; __u32 lsm_md_layout_version;
__u32 lsm_md_default_count; __u32 lsm_md_default_count;
__u32 lsm_md_default_index; __u32 lsm_md_default_index;
struct lu_fid lsm_md_master_fid;
char lsm_md_pool_name[LOV_MAXPOOLNAME]; char lsm_md_pool_name[LOV_MAXPOOLNAME];
struct lmv_oinfo lsm_md_oinfo[0]; struct lmv_oinfo lsm_md_oinfo[0];
}; };
static inline bool
lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
{
int idx;
if (lsm1->lsm_md_magic != lsm2->lsm_md_magic ||
lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count ||
lsm1->lsm_md_master_mdt_index != lsm2->lsm_md_master_mdt_index ||
lsm1->lsm_md_hash_type != lsm2->lsm_md_hash_type ||
lsm1->lsm_md_layout_version != lsm2->lsm_md_layout_version ||
!strcmp(lsm1->lsm_md_pool_name, lsm2->lsm_md_pool_name))
return false;
for (idx = 0; idx < lsm1->lsm_md_stripe_count; idx++) {
if (!lu_fid_eq(&lsm1->lsm_md_oinfo[idx].lmo_fid,
&lsm2->lsm_md_oinfo[idx].lmo_fid))
return false;
}
return true;
}
union lmv_mds_md; union lmv_mds_md;
int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
...@@ -106,7 +129,6 @@ static inline void lmv_cpu_to_le(union lmv_mds_md *lmv_dst, ...@@ -106,7 +129,6 @@ static inline void lmv_cpu_to_le(union lmv_mds_md *lmv_dst,
{ {
switch (lmv_src->lmv_magic) { switch (lmv_src->lmv_magic) {
case LMV_MAGIC_V1: case LMV_MAGIC_V1:
case LMV_MAGIC_MIGRATE:
lmv1_cpu_to_le(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1); lmv1_cpu_to_le(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1);
break; break;
default: default:
...@@ -119,7 +141,6 @@ static inline void lmv_le_to_cpu(union lmv_mds_md *lmv_dst, ...@@ -119,7 +141,6 @@ static inline void lmv_le_to_cpu(union lmv_mds_md *lmv_dst,
{ {
switch (le32_to_cpu(lmv_src->lmv_magic)) { switch (le32_to_cpu(lmv_src->lmv_magic)) {
case LMV_MAGIC_V1: case LMV_MAGIC_V1:
case LMV_MAGIC_MIGRATE:
lmv1_le_to_cpu(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1); lmv1_le_to_cpu(&lmv_dst->lmv_md_v1, &lmv_src->lmv_md_v1);
break; break;
default: default:
......
...@@ -917,8 +917,8 @@ struct obd_ops { ...@@ -917,8 +917,8 @@ struct obd_ops {
int (*fid_fini)(struct obd_device *obd); int (*fid_fini)(struct obd_device *obd);
/* Allocate new fid according to passed @hint. */ /* Allocate new fid according to passed @hint. */
int (*fid_alloc)(struct obd_export *exp, struct lu_fid *fid, int (*fid_alloc)(const struct lu_env *env, struct obd_export *exp,
struct md_op_data *op_data); struct lu_fid *fid, struct md_op_data *op_data);
/* /*
* Object with @fid is getting deleted, we may want to do something * Object with @fid is getting deleted, we may want to do something
......
...@@ -930,7 +930,8 @@ static inline int obd_fid_fini(struct obd_device *obd) ...@@ -930,7 +930,8 @@ static inline int obd_fid_fini(struct obd_device *obd)
return rc; return rc;
} }
static inline int obd_fid_alloc(struct obd_export *exp, static inline int obd_fid_alloc(const struct lu_env *env,
struct obd_export *exp,
struct lu_fid *fid, struct lu_fid *fid,
struct md_op_data *op_data) struct md_op_data *op_data)
{ {
...@@ -939,7 +940,7 @@ static inline int obd_fid_alloc(struct obd_export *exp, ...@@ -939,7 +940,7 @@ static inline int obd_fid_alloc(struct obd_export *exp,
EXP_CHECK_DT_OP(exp, fid_alloc); EXP_CHECK_DT_OP(exp, fid_alloc);
EXP_COUNTER_INCREMENT(exp, fid_alloc); EXP_COUNTER_INCREMENT(exp, fid_alloc);
rc = OBP(exp->exp_obd, fid_alloc)(exp, fid, op_data); rc = OBP(exp->exp_obd, fid_alloc)(env, exp, fid, op_data);
return rc; return rc;
} }
......
...@@ -883,7 +883,6 @@ int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size, ...@@ -883,7 +883,6 @@ int ll_dir_getstripe(struct inode *inode, void **plmm, int *plmm_size,
lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm); lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
break; break;
case LMV_USER_MAGIC: case LMV_USER_MAGIC:
case LMV_MAGIC_MIGRATE:
if (cpu_to_le32(LMV_USER_MAGIC) != LMV_USER_MAGIC) if (cpu_to_le32(LMV_USER_MAGIC) != LMV_USER_MAGIC)
lustre_swab_lmv_user_md((struct lmv_user_md *)lmm); lustre_swab_lmv_user_md((struct lmv_user_md *)lmm);
break; break;
...@@ -1471,7 +1470,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -1471,7 +1470,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize, &request, rc = ll_dir_getstripe(inode, (void **)&lmm, &lmmsize, &request,
valid); valid);
if (rc && rc != -ENODATA) if (rc)
goto finish_req; goto finish_req;
/* Get default LMV EA */ /* Get default LMV EA */
...@@ -1490,14 +1489,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -1490,14 +1489,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
goto finish_req; goto finish_req;
} }
/* Get normal LMV EA */ stripe_count = lmv_mds_md_stripe_count_get(lmm);
if (rc == -ENODATA) {
stripe_count = 1;
} else {
LASSERT(lmm);
stripe_count = lmv_mds_md_stripe_count_get(lmm);
}
lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1); lum_size = lmv_user_md_size(stripe_count, LMV_MAGIC_V1);
tmp = kzalloc(lum_size, GFP_NOFS); tmp = kzalloc(lum_size, GFP_NOFS);
if (!tmp) { if (!tmp) {
...@@ -1505,28 +1497,25 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ...@@ -1505,28 +1497,25 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
goto finish_req; goto finish_req;
} }
tmp->lum_magic = LMV_MAGIC_V1;
tmp->lum_stripe_count = 1;
mdt_index = ll_get_mdt_idx(inode); mdt_index = ll_get_mdt_idx(inode);
if (mdt_index < 0) { if (mdt_index < 0) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_tmp; goto out_tmp;
} }
tmp->lum_magic = LMV_MAGIC_V1;
tmp->lum_stripe_count = 0;
tmp->lum_stripe_offset = mdt_index; tmp->lum_stripe_offset = mdt_index;
tmp->lum_objects[0].lum_mds = mdt_index; for (i = 0; i < stripe_count; i++) {
tmp->lum_objects[0].lum_fid = *ll_inode2fid(inode); struct lu_fid *fid;
for (i = 1; i < stripe_count; i++) {
struct lmv_mds_md_v1 *lmm1; fid = &lmm->lmv_md_v1.lmv_stripe_fids[i];
mdt_index = ll_get_mdt_idx_by_fid(sbi, fid);
lmm1 = &lmm->lmv_md_v1;
mdt_index = ll_get_mdt_idx_by_fid(sbi,
&lmm1->lmv_stripe_fids[i]);
if (mdt_index < 0) { if (mdt_index < 0) {
rc = mdt_index; rc = mdt_index;
goto out_tmp; goto out_tmp;
} }
tmp->lum_objects[i].lum_mds = mdt_index; tmp->lum_objects[i].lum_mds = mdt_index;
tmp->lum_objects[i].lum_fid = lmm1->lmv_stripe_fids[i]; tmp->lum_objects[i].lum_fid = *fid;
tmp->lum_stripe_count++; tmp->lum_stripe_count++;
} }
......
...@@ -1042,9 +1042,9 @@ static struct inode *ll_iget_anon_dir(struct super_block *sb, ...@@ -1042,9 +1042,9 @@ static struct inode *ll_iget_anon_dir(struct super_block *sb,
ll_lli_init(lli); ll_lli_init(lli);
LASSERT(lsm); LASSERT(lsm);
/* master stripe FID */ /* master object FID */
lli->lli_pfid = lsm->lsm_md_oinfo[0].lmo_fid; lli->lli_pfid = body->fid1;
CDEBUG(D_INODE, "lli %p master "DFID" slave "DFID"\n", CDEBUG(D_INODE, "lli %p slave "DFID" master "DFID"\n",
lli, PFID(fid), PFID(&lli->lli_pfid)); lli, PFID(fid), PFID(&lli->lli_pfid));
unlock_new_inode(inode); unlock_new_inode(inode);
} }
...@@ -1067,23 +1067,24 @@ static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md) ...@@ -1067,23 +1067,24 @@ static int ll_init_lsm_md(struct inode *inode, struct lustre_md *md)
for (i = 0; i < lsm->lsm_md_stripe_count; i++) { for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
fid = &lsm->lsm_md_oinfo[i].lmo_fid; fid = &lsm->lsm_md_oinfo[i].lmo_fid;
LASSERT(!lsm->lsm_md_oinfo[i].lmo_root); LASSERT(!lsm->lsm_md_oinfo[i].lmo_root);
if (!i) { /* Unfortunately ll_iget will call ll_update_inode,
* where the initialization of slave inode is slightly
* different, so it reset lsm_md to NULL to avoid
* initializing lsm for slave inode.
*/
/* For migrating inode, master stripe and master object will
* be same, so we only need assign this inode
*/
if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && !i)
lsm->lsm_md_oinfo[i].lmo_root = inode; lsm->lsm_md_oinfo[i].lmo_root = inode;
} else { else
/*
* Unfortunately ll_iget will call ll_update_inode,
* where the initialization of slave inode is slightly
* different, so it reset lsm_md to NULL to avoid
* initializing lsm for slave inode.
*/
lsm->lsm_md_oinfo[i].lmo_root = lsm->lsm_md_oinfo[i].lmo_root =
ll_iget_anon_dir(inode->i_sb, fid, md); ll_iget_anon_dir(inode->i_sb, fid, md);
if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) { if (IS_ERR(lsm->lsm_md_oinfo[i].lmo_root)) {
int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root); int rc = PTR_ERR(lsm->lsm_md_oinfo[i].lmo_root);
lsm->lsm_md_oinfo[i].lmo_root = NULL; lsm->lsm_md_oinfo[i].lmo_root = NULL;
return rc; return rc;
}
} }
} }
...@@ -1113,7 +1114,7 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) ...@@ -1113,7 +1114,7 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
{ {
struct ll_inode_info *lli = ll_i2info(inode); struct ll_inode_info *lli = ll_i2info(inode);
struct lmv_stripe_md *lsm = md->lmv; struct lmv_stripe_md *lsm = md->lmv;
int idx, rc; int rc;
LASSERT(S_ISDIR(inode->i_mode)); LASSERT(S_ISDIR(inode->i_mode));
CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md, CDEBUG(D_INODE, "update lsm %p of "DFID"\n", lli->lli_lsm_md,
...@@ -1123,7 +1124,8 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) ...@@ -1123,7 +1124,8 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
if (!lsm) { if (!lsm) {
if (!lli->lli_lsm_md) { if (!lli->lli_lsm_md) {
return 0; return 0;
} else if (lli->lli_lsm_md->lsm_md_magic == LMV_MAGIC_MIGRATE) { } else if (lli->lli_lsm_md->lsm_md_hash_type &
LMV_HASH_FLAG_MIGRATION) {
/* /*
* migration is done, the temporay MIGRATE layout has * migration is done, the temporay MIGRATE layout has
* been removed * been removed
...@@ -1160,43 +1162,40 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md) ...@@ -1160,43 +1162,40 @@ static int ll_update_lsm_md(struct inode *inode, struct lustre_md *md)
} }
/* Compare the old and new stripe information */ /* Compare the old and new stripe information */
if (!lli_lsm_md_eq(lli->lli_lsm_md, lsm)) { if (!lsm_md_eq(lli->lli_lsm_md, lsm)) {
CERROR("inode %p %lu mismatch\n" struct lmv_stripe_md *old_lsm = lli->lli_lsm_md;
" new(%p) vs lli_lsm_md(%p):\n" int idx;
" magic: %x %x\n"
" count: %x %x\n" CERROR("%s: inode "DFID"(%p)'s lmv layout mismatch (%p)/(%p) magic:0x%x/0x%x stripe count: %d/%d master_mdt: %d/%d hash_type:0x%x/0x%x layout: 0x%x/0x%x pool:%s/%s\n",
" master: %x %x\n" ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid),
" hash_type: %x %x\n" inode, lsm, old_lsm,
" layout: %x %x\n" lsm->lsm_md_magic, old_lsm->lsm_md_magic,
" pool: %s %s\n",
inode, inode->i_ino, lsm, lli->lli_lsm_md,
lsm->lsm_md_magic, lli->lli_lsm_md->lsm_md_magic,
lsm->lsm_md_stripe_count, lsm->lsm_md_stripe_count,
lli->lli_lsm_md->lsm_md_stripe_count, old_lsm->lsm_md_stripe_count,
lsm->lsm_md_master_mdt_index, lsm->lsm_md_master_mdt_index,
lli->lli_lsm_md->lsm_md_master_mdt_index, old_lsm->lsm_md_master_mdt_index,
lsm->lsm_md_hash_type, lli->lli_lsm_md->lsm_md_hash_type, lsm->lsm_md_hash_type, old_lsm->lsm_md_hash_type,
lsm->lsm_md_layout_version, lsm->lsm_md_layout_version,
lli->lli_lsm_md->lsm_md_layout_version, old_lsm->lsm_md_layout_version,
lsm->lsm_md_pool_name, lsm->lsm_md_pool_name,
lli->lli_lsm_md->lsm_md_pool_name); old_lsm->lsm_md_pool_name);
return -EIO;
} for (idx = 0; idx < old_lsm->lsm_md_stripe_count; idx++) {
CERROR("%s: sub FIDs in old lsm idx %d, old: "DFID"\n",
ll_get_fsname(inode->i_sb, NULL, 0), idx,
PFID(&old_lsm->lsm_md_oinfo[idx].lmo_fid));
}
for (idx = 0; idx < lli->lli_lsm_md->lsm_md_stripe_count; idx++) { for (idx = 0; idx < lsm->lsm_md_stripe_count; idx++) {
if (!lu_fid_eq(&lli->lli_lsm_md->lsm_md_oinfo[idx].lmo_fid, CERROR("%s: sub FIDs in new lsm idx %d, new: "DFID"\n",
&lsm->lsm_md_oinfo[idx].lmo_fid)) {
CERROR("%s: FID in lsm mismatch idx %d, old: "DFID" new:"DFID"\n",
ll_get_fsname(inode->i_sb, NULL, 0), idx, ll_get_fsname(inode->i_sb, NULL, 0), idx,
PFID(&lli->lli_lsm_md->lsm_md_oinfo[idx].lmo_fid),
PFID(&lsm->lsm_md_oinfo[idx].lmo_fid)); PFID(&lsm->lsm_md_oinfo[idx].lmo_fid));
return -EIO;
} }
return -EIO;
} }
rc = md_update_lsm_md(ll_i2mdexp(inode), ll_i2info(inode)->lli_lsm_md, return 0;
md->body, ll_md_blocking_ast);
return rc;
} }
void ll_clear_inode(struct inode *inode) void ll_clear_inode(struct inode *inode)
......
...@@ -173,9 +173,6 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody, ...@@ -173,9 +173,6 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody,
* revalidate slaves has some problems, temporarily return, * revalidate slaves has some problems, temporarily return,
* we may not need that * we may not need that
*/ */
if (lsm->lsm_md_stripe_count <= 1)
return 0;
op_data = kzalloc(sizeof(*op_data), GFP_NOFS); op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
if (!op_data) if (!op_data)
return -ENOMEM; return -ENOMEM;
...@@ -194,14 +191,6 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody, ...@@ -194,14 +191,6 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody,
fid = lsm->lsm_md_oinfo[i].lmo_fid; fid = lsm->lsm_md_oinfo[i].lmo_fid;
inode = lsm->lsm_md_oinfo[i].lmo_root; inode = lsm->lsm_md_oinfo[i].lmo_root;
if (!i) {
if (mbody) {
body = mbody;
goto update;
} else {
goto release_lock;
}
}
/* /*
* Prepare op_data for revalidating. Note that @fid2 shluld be * Prepare op_data for revalidating. Note that @fid2 shluld be
...@@ -237,7 +226,7 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody, ...@@ -237,7 +226,7 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody,
body = req_capsule_server_get(&req->rq_pill, body = req_capsule_server_get(&req->rq_pill,
&RMF_MDT_BODY); &RMF_MDT_BODY);
LASSERT(body); LASSERT(body);
update:
if (unlikely(body->nlink < 2)) { if (unlikely(body->nlink < 2)) {
CERROR("%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n", CERROR("%s: nlink %d < 2 corrupt stripe %d "DFID":" DFID"\n",
obd->obd_name, body->nlink, i, obd->obd_name, body->nlink, i,
...@@ -256,10 +245,6 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody, ...@@ -256,10 +245,6 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody,
goto cleanup; goto cleanup;
} }
if (i)
md_set_lock_data(tgt->ltd_exp, &lockh->cookie,
inode, NULL);
i_size_write(inode, body->size); i_size_write(inode, body->size);
set_nlink(inode, body->nlink); set_nlink(inode, body->nlink);
LTIME_S(inode->i_atime) = body->atime; LTIME_S(inode->i_atime) = body->atime;
...@@ -269,8 +254,8 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody, ...@@ -269,8 +254,8 @@ int lmv_revalidate_slaves(struct obd_export *exp, struct mdt_body *mbody,
if (req) if (req)
ptlrpc_req_finished(req); ptlrpc_req_finished(req);
} }
release_lock:
size += i_size_read(inode); md_set_lock_data(tgt->ltd_exp, &lockh->cookie, inode, NULL);
if (i != 0) if (i != 0)
nlink += inode->i_nlink - 2; nlink += inode->i_nlink - 2;
...@@ -361,7 +346,7 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data, ...@@ -361,7 +346,7 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
* fid and setup FLD for it. * fid and setup FLD for it.
*/ */
op_data->op_fid3 = op_data->op_fid2; op_data->op_fid3 = op_data->op_fid2;
rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data); rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc != 0) if (rc != 0)
return rc; return rc;
} }
...@@ -453,7 +438,7 @@ static int lmv_intent_lookup(struct obd_export *exp, ...@@ -453,7 +438,7 @@ static int lmv_intent_lookup(struct obd_export *exp,
} }
return rc; return rc;
} else if (it_disposition(it, DISP_LOOKUP_NEG) && lsm && } else if (it_disposition(it, DISP_LOOKUP_NEG) && lsm &&
lsm->lsm_md_magic == LMV_MAGIC_MIGRATE) { lsm->lsm_md_magic & LMV_HASH_FLAG_MIGRATION) {
/* /*
* For migrating directory, if it can not find the child in * For migrating directory, if it can not find the child in
* the source directory(master stripe), try the targeting * the source directory(master stripe), try the targeting
......
...@@ -52,8 +52,8 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data, ...@@ -52,8 +52,8 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds); int lmv_fld_lookup(struct lmv_obd *lmv, const struct lu_fid *fid, u32 *mds);
int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds); int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds);
int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid, int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
struct md_op_data *op_data); struct lu_fid *fid, struct md_op_data *op_data);
int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
const union lmv_mds_md *lmm, int stripe_count); const union lmv_mds_md *lmm, int stripe_count);
......
...@@ -80,41 +80,35 @@ lmv_hash_fnv1a(unsigned int count, const char *name, int namelen) ...@@ -80,41 +80,35 @@ lmv_hash_fnv1a(unsigned int count, const char *name, int namelen)
return do_div(hash, count); return do_div(hash, count);
} }
int lmv_name_to_stripe_index(enum lmv_hash_type hashtype, int lmv_name_to_stripe_index(__u32 lmv_hash_type, unsigned int stripe_count,
unsigned int max_mdt_index,
const char *name, int namelen) const char *name, int namelen)
{ {
__u32 hash_type = lmv_hash_type & LMV_HASH_TYPE_MASK;
int idx; int idx;
LASSERT(namelen > 0); LASSERT(namelen > 0);
if (max_mdt_index <= 1) if (stripe_count <= 1)
return 0; return 0;
switch (hashtype) { /* for migrating object, always start from 0 stripe */
if (lmv_hash_type & LMV_HASH_FLAG_MIGRATION)
return 0;
switch (hash_type) {
case LMV_HASH_TYPE_ALL_CHARS: case LMV_HASH_TYPE_ALL_CHARS:
idx = lmv_hash_all_chars(max_mdt_index, name, namelen); idx = lmv_hash_all_chars(stripe_count, name, namelen);
break; break;
case LMV_HASH_TYPE_FNV_1A_64: case LMV_HASH_TYPE_FNV_1A_64:
idx = lmv_hash_fnv1a(max_mdt_index, name, namelen); idx = lmv_hash_fnv1a(stripe_count, name, namelen);
break; break;
/*
* LMV_HASH_TYPE_MIGRATION means the file is being migrated,
* and the file should be accessed by client, except for
* lookup(see lmv_intent_lookup), return -EACCES here
*/
case LMV_HASH_TYPE_MIGRATION:
CERROR("%.*s is being migrated: rc = %d\n", namelen,
name, -EACCES);
return -EACCES;
default: default:
CERROR("Unknown hash type 0x%x\n", hashtype); CERROR("Unknown hash type 0x%x\n", hash_type);
return -EINVAL; return -EINVAL;
} }
CDEBUG(D_INFO, "name %.*s hash_type %d idx %d\n", namelen, name, CDEBUG(D_INFO, "name %.*s hash_type %d idx %d\n", namelen, name,
hashtype, idx); hash_type, idx);
LASSERT(idx < max_mdt_index);
return idx; return idx;
} }
...@@ -1287,7 +1281,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds) ...@@ -1287,7 +1281,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
/* /*
* Asking underlaying tgt layer to allocate new fid. * Asking underlaying tgt layer to allocate new fid.
*/ */
rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL); rc = obd_fid_alloc(NULL, tgt->ltd_exp, fid, NULL);
if (rc > 0) { if (rc > 0) {
LASSERT(fid_is_sane(fid)); LASSERT(fid_is_sane(fid));
rc = 0; rc = 0;
...@@ -1298,8 +1292,8 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds) ...@@ -1298,8 +1292,8 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
return rc; return rc;
} }
int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid, int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
struct md_op_data *op_data) struct lu_fid *fid, struct md_op_data *op_data)
{ {
struct obd_device *obd = class_exp2obd(exp); struct obd_device *obd = class_exp2obd(exp);
struct lmv_obd *lmv = &obd->u.lmv; struct lmv_obd *lmv = &obd->u.lmv;
...@@ -1695,9 +1689,7 @@ struct lmv_tgt_desc ...@@ -1695,9 +1689,7 @@ struct lmv_tgt_desc
struct lmv_stripe_md *lsm = op_data->op_mea1; struct lmv_stripe_md *lsm = op_data->op_mea1;
struct lmv_tgt_desc *tgt; struct lmv_tgt_desc *tgt;
if (!lsm || lsm->lsm_md_stripe_count <= 1 || if (!lsm || !op_data->op_namelen) {
!op_data->op_namelen ||
lsm->lsm_md_magic == LMV_MAGIC_MIGRATE) {
tgt = lmv_find_target(lmv, fid); tgt = lmv_find_target(lmv, fid);
if (IS_ERR(tgt)) if (IS_ERR(tgt))
return tgt; return tgt;
...@@ -1737,7 +1729,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data, ...@@ -1737,7 +1729,7 @@ static int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
op_data->op_mds); op_data->op_mds);
rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data); rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc) if (rc)
return rc; return rc;
...@@ -2060,7 +2052,7 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data, ...@@ -2060,7 +2052,7 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
if (op_data->op_cli_flags & CLI_MIGRATE) { if (op_data->op_cli_flags & CLI_MIGRATE) {
LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID "DFID"\n", LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID "DFID"\n",
PFID(&op_data->op_fid3)); PFID(&op_data->op_fid3));
rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data); rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc) if (rc)
return rc; return rc;
src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid3); src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid3);
...@@ -2365,8 +2357,7 @@ static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data, ...@@ -2365,8 +2357,7 @@ static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
return PTR_ERR(tgt); return PTR_ERR(tgt);
/* For striped dir, we need to locate the parent as well */ /* For striped dir, we need to locate the parent as well */
if (op_data->op_mea1 && if (op_data->op_mea1) {
op_data->op_mea1->lsm_md_stripe_count > 1) {
struct lmv_tgt_desc *tmp; struct lmv_tgt_desc *tmp;
LASSERT(op_data->op_name && op_data->op_namelen); LASSERT(op_data->op_name && op_data->op_namelen);
...@@ -2679,9 +2670,13 @@ static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm, ...@@ -2679,9 +2670,13 @@ static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index); lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index);
lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type); lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type);
lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version); lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version);
fid_le_to_cpu(&lsm->lsm_md_master_fid, &lmm1->lmv_master_fid);
cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name, cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name,
sizeof(lsm->lsm_md_pool_name)); sizeof(lsm->lsm_md_pool_name));
if (!fid_is_sane(&lsm->lsm_md_master_fid))
return -EPROTO;
if (cplen >= sizeof(lsm->lsm_md_pool_name)) if (cplen >= sizeof(lsm->lsm_md_pool_name))
return -E2BIG; return -E2BIG;
...@@ -2719,7 +2714,13 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, ...@@ -2719,7 +2714,13 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
int i; int i;
for (i = 1; i < lsm->lsm_md_stripe_count; i++) { for (i = 1; i < lsm->lsm_md_stripe_count; i++) {
if (lsm->lsm_md_oinfo[i].lmo_root) /*
* For migrating inode, the master stripe and master
* object will be the same, so do not need iput, see
* ll_update_lsm_md
*/
if (!(lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION &&
!i) && lsm->lsm_md_oinfo[i].lmo_root)
iput(lsm->lsm_md_oinfo[i].lmo_root); iput(lsm->lsm_md_oinfo[i].lmo_root);
} }
...@@ -2739,9 +2740,11 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, ...@@ -2739,9 +2740,11 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
return 0; return 0;
} }
if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
return -EPERM;
/* Unpack memmd */ /* Unpack memmd */
if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 && if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 &&
le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_MIGRATE &&
le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) { le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) {
CERROR("%s: invalid lmv magic %x: rc = %d\n", CERROR("%s: invalid lmv magic %x: rc = %d\n",
exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic), exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic),
...@@ -2749,8 +2752,7 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, ...@@ -2749,8 +2752,7 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
return -EIO; return -EIO;
} }
if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1 || if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1)
le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_MIGRATE)
lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm)); lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm));
else else
/** /**
...@@ -2769,7 +2771,6 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, ...@@ -2769,7 +2771,6 @@ int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp,
switch (le32_to_cpu(lmm->lmv_magic)) { switch (le32_to_cpu(lmm->lmv_magic)) {
case LMV_MAGIC_V1: case LMV_MAGIC_V1:
case LMV_MAGIC_MIGRATE:
rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1); rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1);
break; break;
default: default:
...@@ -3067,9 +3068,6 @@ static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp, ...@@ -3067,9 +3068,6 @@ static int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
int lmv_update_lsm_md(struct obd_export *exp, struct lmv_stripe_md *lsm, int lmv_update_lsm_md(struct obd_export *exp, struct lmv_stripe_md *lsm,
struct mdt_body *body, ldlm_blocking_callback cb_blocking) struct mdt_body *body, ldlm_blocking_callback cb_blocking)
{ {
if (lsm->lsm_md_stripe_count <= 1)
return 0;
return lmv_revalidate_slaves(exp, body, lsm, cb_blocking, 0); return lmv_revalidate_slaves(exp, body, lsm, cb_blocking, 0);
} }
......
...@@ -87,8 +87,8 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid, ...@@ -87,8 +87,8 @@ int mdc_resource_get_unused(struct obd_export *exp, const struct lu_fid *fid,
struct list_head *cancels, enum ldlm_mode mode, struct list_head *cancels, enum ldlm_mode mode,
__u64 bits); __u64 bits);
/* mdc/mdc_request.c */ /* mdc/mdc_request.c */
int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid, int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
struct md_op_data *op_data); struct lu_fid *fid, struct md_op_data *op_data);
struct obd_client_handle; struct obd_client_handle;
int mdc_set_open_replay_data(struct obd_export *exp, int mdc_set_open_replay_data(struct obd_export *exp,
......
...@@ -1144,7 +1144,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data, ...@@ -1144,7 +1144,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
/* For case if upper layer did not alloc fid, do it now. */ /* For case if upper layer did not alloc fid, do it now. */
if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) { if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data); rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc < 0) { if (rc < 0) {
CERROR("Can't alloc new fid, rc %d\n", rc); CERROR("Can't alloc new fid, rc %d\n", rc);
return rc; return rc;
......
...@@ -214,11 +214,9 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data, ...@@ -214,11 +214,9 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
* mdc_fid_alloc() may return errno 1 in case of switch to new * mdc_fid_alloc() may return errno 1 in case of switch to new
* sequence, handle this. * sequence, handle this.
*/ */
rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data); rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc < 0) { if (rc < 0)
CERROR("Can't alloc new fid, rc %d\n", rc);
return rc; return rc;
}
} }
rebuild: rebuild:
......
...@@ -765,7 +765,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data, ...@@ -765,7 +765,7 @@ static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
req_fmt = &RQF_MDS_RELEASE_CLOSE; req_fmt = &RQF_MDS_RELEASE_CLOSE;
/* allocate a FID for volatile file */ /* allocate a FID for volatile file */
rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data); rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
if (rc < 0) { if (rc < 0) {
CERROR("%s: "DFID" failed to allocate FID: %d\n", CERROR("%s: "DFID" failed to allocate FID: %d\n",
obd->obd_name, PFID(&op_data->op_fid1), rc); obd->obd_name, PFID(&op_data->op_fid1), rc);
...@@ -2203,13 +2203,13 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp, ...@@ -2203,13 +2203,13 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
return rc; return rc;
} }
int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid, int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
struct md_op_data *op_data) struct lu_fid *fid, struct md_op_data *op_data)
{ {
struct client_obd *cli = &exp->exp_obd->u.cli; struct client_obd *cli = &exp->exp_obd->u.cli;
struct lu_client_seq *seq = cli->cl_seq; struct lu_client_seq *seq = cli->cl_seq;
return seq_client_alloc_fid(NULL, seq, fid); return seq_client_alloc_fid(env, seq, fid);
} }
static struct obd_uuid *mdc_get_uuid(struct obd_export *exp) static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment