Commit 7208c984 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-v5.18-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

 - Clean up the allocation of glocks that have an address space attached

 - Quota locking fix and quota iomap conversion

 - Fix the FITRIM error reporting

 - Some list iterator cleanups

* tag 'gfs2-v5.18-rc6-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: Convert function bh_get to use iomap
  gfs2: use i_lock spin_lock for inode qadata
  gfs2: Return more useful errors from gfs2_rgrp_send_discards()
  gfs2: Use container_of() for gfs2_glock(aspace)
  gfs2: Explain some direct I/O oddities
  gfs2: replace 'found' with dedicated list iterator variable
parents bd1b7c13 c360abbb
...@@ -840,6 +840,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to, ...@@ -840,6 +840,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
pagefault_enable(); pagefault_enable();
if (ret <= 0 && ret != -EFAULT) if (ret <= 0 && ret != -EFAULT)
goto out_unlock; goto out_unlock;
/* No increment (+=) because iomap_dio_rw returns a cumulative value. */
if (ret > 0) if (ret > 0)
read = ret; read = ret;
...@@ -854,6 +855,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to, ...@@ -854,6 +855,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
gfs2_glock_dq(gh); gfs2_glock_dq(gh);
out_uninit: out_uninit:
gfs2_holder_uninit(gh); gfs2_holder_uninit(gh);
/* User space doesn't expect partial success. */
if (ret < 0) if (ret < 0)
return ret; return ret;
return read; return read;
...@@ -906,6 +908,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from, ...@@ -906,6 +908,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
if (ret != -EFAULT) if (ret != -EFAULT)
goto out_unlock; goto out_unlock;
} }
/* No increment (+=) because iomap_dio_rw returns a cumulative value. */
if (ret > 0) if (ret > 0)
written = ret; written = ret;
...@@ -920,6 +923,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from, ...@@ -920,6 +923,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
gfs2_glock_dq(gh); gfs2_glock_dq(gh);
out_uninit: out_uninit:
gfs2_holder_uninit(gh); gfs2_holder_uninit(gh);
/* User space doesn't expect partial success. */
if (ret < 0) if (ret < 0)
return ret; return ret;
return written; return written;
......
...@@ -127,9 +127,11 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu) ...@@ -127,9 +127,11 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
kfree(gl->gl_lksb.sb_lvbptr); kfree(gl->gl_lksb.sb_lvbptr);
if (gl->gl_ops->go_flags & GLOF_ASPACE) if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl); struct gfs2_glock_aspace *gla =
else container_of(gl, struct gfs2_glock_aspace, glock);
kmem_cache_free(gfs2_glock_aspace_cachep, gla);
} else
kmem_cache_free(gfs2_glock_cachep, gl); kmem_cache_free(gfs2_glock_cachep, gl);
} }
...@@ -1159,7 +1161,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -1159,7 +1161,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
.ln_sbd = sdp }; .ln_sbd = sdp };
struct gfs2_glock *gl, *tmp; struct gfs2_glock *gl, *tmp;
struct address_space *mapping; struct address_space *mapping;
struct kmem_cache *cachep;
int ret = 0; int ret = 0;
gl = find_insert_glock(&name, NULL); gl = find_insert_glock(&name, NULL);
...@@ -1170,20 +1171,24 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -1170,20 +1171,24 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!create) if (!create)
return -ENOENT; return -ENOENT;
if (glops->go_flags & GLOF_ASPACE) if (glops->go_flags & GLOF_ASPACE) {
cachep = gfs2_glock_aspace_cachep; struct gfs2_glock_aspace *gla =
else kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_NOFS);
cachep = gfs2_glock_cachep; if (!gla)
gl = kmem_cache_alloc(cachep, GFP_NOFS); return -ENOMEM;
if (!gl) gl = &gla->glock;
return -ENOMEM; } else {
gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_NOFS);
if (!gl)
return -ENOMEM;
}
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
gl->gl_ops = glops;
if (glops->go_flags & GLOF_LVB) { if (glops->go_flags & GLOF_LVB) {
gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); gl->gl_lksb.sb_lvbptr = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) { if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl); gfs2_glock_dealloc(&gl->gl_rcu);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -1197,7 +1202,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -1197,7 +1202,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_state = LM_ST_UNLOCKED; gl->gl_state = LM_ST_UNLOCKED;
gl->gl_target = LM_ST_UNLOCKED; gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE; gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_ops = glops;
gl->gl_dstamp = 0; gl->gl_dstamp = 0;
preempt_disable(); preempt_disable();
/* We use the global stats to estimate the initial per-glock stats */ /* We use the global stats to estimate the initial per-glock stats */
...@@ -1234,8 +1238,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -1234,8 +1238,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
*glp = tmp; *glp = tmp;
out_free: out_free:
kfree(gl->gl_lksb.sb_lvbptr); gfs2_glock_dealloc(&gl->gl_rcu);
kmem_cache_free(cachep, gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal)) if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait); wake_up(&sdp->sd_glock_wait);
......
...@@ -138,6 +138,11 @@ struct lm_lockops { ...@@ -138,6 +138,11 @@ struct lm_lockops {
const match_table_t *lm_tokens; const match_table_t *lm_tokens;
}; };
struct gfs2_glock_aspace {
struct gfs2_glock glock;
struct address_space mapping;
};
extern struct workqueue_struct *gfs2_delete_workqueue; extern struct workqueue_struct *gfs2_delete_workqueue;
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{ {
...@@ -179,8 +184,11 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl) ...@@ -179,8 +184,11 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
{ {
if (gl->gl_ops->go_flags & GLOF_ASPACE) if (gl->gl_ops->go_flags & GLOF_ASPACE) {
return (struct address_space *)(gl + 1); struct gfs2_glock_aspace *gla =
container_of(gl, struct gfs2_glock_aspace, glock);
return &gla->mapping;
}
return NULL; return NULL;
} }
......
...@@ -62,11 +62,10 @@ static void gfs2_init_glock_once(void *foo) ...@@ -62,11 +62,10 @@ static void gfs2_init_glock_once(void *foo)
static void gfs2_init_gl_aspace_once(void *foo) static void gfs2_init_gl_aspace_once(void *foo)
{ {
struct gfs2_glock *gl = foo; struct gfs2_glock_aspace *gla = foo;
struct address_space *mapping = (struct address_space *)(gl + 1);
gfs2_init_glock_once(gl); gfs2_init_glock_once(&gla->glock);
address_space_init_once(mapping); address_space_init_once(&gla->mapping);
} }
/** /**
...@@ -104,8 +103,7 @@ static int __init init_gfs2_fs(void) ...@@ -104,8 +103,7 @@ static int __init init_gfs2_fs(void)
goto fail_cachep1; goto fail_cachep1;
gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)", gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock(aspace)",
sizeof(struct gfs2_glock) + sizeof(struct gfs2_glock_aspace),
sizeof(struct address_space),
0, 0, gfs2_init_gl_aspace_once); 0, 0, gfs2_init_gl_aspace_once);
if (!gfs2_glock_aspace_cachep) if (!gfs2_glock_aspace_cachep)
......
...@@ -40,9 +40,11 @@ extern const struct address_space_operations gfs2_rgrp_aops; ...@@ -40,9 +40,11 @@ extern const struct address_space_operations gfs2_rgrp_aops;
static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
if (mapping->a_ops == &gfs2_meta_aops) if (mapping->a_ops == &gfs2_meta_aops) {
return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd; struct gfs2_glock_aspace *gla =
else if (mapping->a_ops == &gfs2_rgrp_aops) container_of(mapping, struct gfs2_glock_aspace, mapping);
return gla->glock.gl_name.ln_sbd;
} else if (mapping->a_ops == &gfs2_rgrp_aops)
return container_of(mapping, struct gfs2_sbd, sd_aspace); return container_of(mapping, struct gfs2_sbd, sd_aspace);
else else
return inode->i_sb->s_fs_info; return inode->i_sb->s_fs_info;
......
...@@ -365,11 +365,12 @@ static void slot_put(struct gfs2_quota_data *qd) ...@@ -365,11 +365,12 @@ static void slot_put(struct gfs2_quota_data *qd)
static int bh_get(struct gfs2_quota_data *qd) static int bh_get(struct gfs2_quota_data *qd)
{ {
struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); struct inode *inode = sdp->sd_qc_inode;
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int block, offset; unsigned int block, offset;
struct buffer_head *bh; struct buffer_head *bh;
struct iomap iomap = { };
int error; int error;
struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
mutex_lock(&sdp->sd_quota_mutex); mutex_lock(&sdp->sd_quota_mutex);
...@@ -381,11 +382,17 @@ static int bh_get(struct gfs2_quota_data *qd) ...@@ -381,11 +382,17 @@ static int bh_get(struct gfs2_quota_data *qd)
block = qd->qd_slot / sdp->sd_qc_per_block; block = qd->qd_slot / sdp->sd_qc_per_block;
offset = qd->qd_slot % sdp->sd_qc_per_block; offset = qd->qd_slot % sdp->sd_qc_per_block;
bh_map.b_size = BIT(ip->i_inode.i_blkbits); error = gfs2_iomap_get(inode,
error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); (loff_t)block << inode->i_blkbits,
i_blocksize(inode), &iomap);
if (error) if (error)
goto fail; goto fail;
error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, 0, &bh); error = -ENOENT;
if (iomap.type != IOMAP_MAPPED)
goto fail;
error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits,
DIO_WAIT, 0, &bh);
if (error) if (error)
goto fail; goto fail;
error = -EIO; error = -EIO;
...@@ -443,9 +450,8 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd, ...@@ -443,9 +450,8 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{ {
struct gfs2_quota_data *qd = NULL; struct gfs2_quota_data *qd = NULL, *iter;
int error; int error;
int found = 0;
*qdp = NULL; *qdp = NULL;
...@@ -454,15 +460,13 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp) ...@@ -454,15 +460,13 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
spin_lock(&qd_lock); spin_lock(&qd_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) { list_for_each_entry(iter, &sdp->sd_quota_list, qd_list) {
found = qd_check_sync(sdp, qd, &sdp->sd_quota_sync_gen); if (qd_check_sync(sdp, iter, &sdp->sd_quota_sync_gen)) {
if (found) qd = iter;
break; break;
}
} }
if (!found)
qd = NULL;
spin_unlock(&qd_lock); spin_unlock(&qd_lock);
if (qd) { if (qd) {
...@@ -531,34 +535,42 @@ static void qdsb_put(struct gfs2_quota_data *qd) ...@@ -531,34 +535,42 @@ static void qdsb_put(struct gfs2_quota_data *qd)
*/ */
int gfs2_qa_get(struct gfs2_inode *ip) int gfs2_qa_get(struct gfs2_inode *ip)
{ {
int error = 0;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct inode *inode = &ip->i_inode;
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0; return 0;
down_write(&ip->i_rw_mutex); spin_lock(&inode->i_lock);
if (ip->i_qadata == NULL) { if (ip->i_qadata == NULL) {
ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS); struct gfs2_qadata *tmp;
if (!ip->i_qadata) {
error = -ENOMEM; spin_unlock(&inode->i_lock);
goto out; tmp = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
} if (!tmp)
return -ENOMEM;
spin_lock(&inode->i_lock);
if (ip->i_qadata == NULL)
ip->i_qadata = tmp;
else
kmem_cache_free(gfs2_qadata_cachep, tmp);
} }
ip->i_qadata->qa_ref++; ip->i_qadata->qa_ref++;
out: spin_unlock(&inode->i_lock);
up_write(&ip->i_rw_mutex); return 0;
return error;
} }
void gfs2_qa_put(struct gfs2_inode *ip) void gfs2_qa_put(struct gfs2_inode *ip)
{ {
down_write(&ip->i_rw_mutex); struct inode *inode = &ip->i_inode;
spin_lock(&inode->i_lock);
if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) { if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
ip->i_qadata = NULL; ip->i_qadata = NULL;
} }
up_write(&ip->i_rw_mutex); spin_unlock(&inode->i_lock);
} }
int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
......
...@@ -55,17 +55,16 @@ int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, ...@@ -55,17 +55,16 @@ int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where) int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
{ {
struct list_head *head = &jd->jd_revoke_list; struct list_head *head = &jd->jd_revoke_list;
struct gfs2_revoke_replay *rr; struct gfs2_revoke_replay *rr = NULL, *iter;
int found = 0;
list_for_each_entry(rr, head, rr_list) { list_for_each_entry(iter, head, rr_list) {
if (rr->rr_blkno == blkno) { if (iter->rr_blkno == blkno) {
found = 1; rr = iter;
break; break;
} }
} }
if (found) { if (rr) {
rr->rr_where = where; rr->rr_where = where;
return 0; return 0;
} }
...@@ -83,18 +82,17 @@ int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where) ...@@ -83,18 +82,17 @@ int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where) int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
{ {
struct gfs2_revoke_replay *rr; struct gfs2_revoke_replay *rr = NULL, *iter;
int wrap, a, b, revoke; int wrap, a, b, revoke;
int found = 0;
list_for_each_entry(rr, &jd->jd_revoke_list, rr_list) { list_for_each_entry(iter, &jd->jd_revoke_list, rr_list) {
if (rr->rr_blkno == blkno) { if (iter->rr_blkno == blkno) {
found = 1; rr = iter;
break; break;
} }
} }
if (!found) if (!rr)
return 0; return 0;
wrap = (rr->rr_where < jd->jd_replay_tail); wrap = (rr->rr_where < jd->jd_replay_tail);
......
...@@ -1315,7 +1315,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, ...@@ -1315,7 +1315,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
u64 blk; u64 blk;
sector_t start = 0; sector_t start = 0;
sector_t nr_blks = 0; sector_t nr_blks = 0;
int rv; int rv = -EIO;
unsigned int x; unsigned int x;
u32 trimmed = 0; u32 trimmed = 0;
u8 diff; u8 diff;
...@@ -1371,7 +1371,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, ...@@ -1371,7 +1371,7 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
if (sdp->sd_args.ar_discard) if (sdp->sd_args.ar_discard)
fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv); fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv);
sdp->sd_args.ar_discard = 0; sdp->sd_args.ar_discard = 0;
return -EIO; return rv;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment