Commit a03167ae authored by Abhijith Das's avatar Abhijith Das Committed by Greg Kroah-Hartman

GFS2: Fix writing to non-page aligned gfs2_quota structures

commit 7e619bc3 upstream.

This is the upstream fix for this bug. This patch differs
from the RHEL5 fix (Red Hat bz #555754) which simply writes to the 8-byte
value field of the quota. In upstream quota code, we're
required to write the entire quota (88 bytes) which can be split
across a page boundary. We check for such quotas, and read/write
the two parts from/to the corresponding pages holding these parts.

With this patch, I don't see the bug anymore using the reproducer
in Red Hat bz 555754. I successfully ran a couple of simple tests/mounts/
umounts and it doesn't seem like this patch breaks anything else.
Signed-off-by: default avatarAbhi Das <adas@redhat.com>
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
[Backported to 2.6.32 by dann frazier <dannf@debian.org>]
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 120011ea
...@@ -633,14 +633,29 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, ...@@ -633,14 +633,29 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
unsigned blocksize, iblock, pos; unsigned blocksize, iblock, pos;
struct buffer_head *bh; struct buffer_head *bh;
struct page *page; struct page *page;
void *kaddr; void *kaddr, *ptr;
struct gfs2_quota *qp; struct gfs2_quota q, *qp;
s64 value; int err, nbytes;
int err = -EIO;
if (gfs2_is_stuffed(ip)) if (gfs2_is_stuffed(ip))
gfs2_unstuff_dinode(ip, NULL); gfs2_unstuff_dinode(ip, NULL);
memset(&q, 0, sizeof(struct gfs2_quota));
err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
if (err < 0)
return err;
err = -EIO;
qp = &q;
qp->qu_value = be64_to_cpu(qp->qu_value);
qp->qu_value += change;
qp->qu_value = cpu_to_be64(qp->qu_value);
qd->qd_qb.qb_value = qp->qu_value;
/* Write the quota into the quota file on disk */
ptr = qp;
nbytes = sizeof(struct gfs2_quota);
get_a_page:
page = grab_cache_page(mapping, index); page = grab_cache_page(mapping, index);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -662,7 +677,12 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, ...@@ -662,7 +677,12 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
if (!buffer_mapped(bh)) { if (!buffer_mapped(bh)) {
gfs2_block_map(inode, iblock, bh, 1); gfs2_block_map(inode, iblock, bh, 1);
if (!buffer_mapped(bh)) if (!buffer_mapped(bh))
goto unlock; goto unlock_out;
/* If it's a newly allocated disk block for quota, zero it */
if (buffer_new(bh)) {
memset(bh->b_data, 0, bh->b_size);
set_buffer_uptodate(bh);
}
} }
if (PageUptodate(page)) if (PageUptodate(page))
...@@ -672,20 +692,32 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, ...@@ -672,20 +692,32 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
ll_rw_block(READ_META, 1, &bh); ll_rw_block(READ_META, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
goto unlock; goto unlock_out;
} }
gfs2_trans_add_bh(ip->i_gl, bh, 0); gfs2_trans_add_bh(ip->i_gl, bh, 0);
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page, KM_USER0);
qp = kaddr + offset; if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
value = (s64)be64_to_cpu(qp->qu_value) + change; nbytes = PAGE_CACHE_SIZE - offset;
qp->qu_value = cpu_to_be64(value); memcpy(kaddr + offset, ptr, nbytes);
qd->qd_qb.qb_value = qp->qu_value;
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr, KM_USER0);
unlock_page(page);
page_cache_release(page);
/* If quota straddles page boundary, we need to update the rest of the
* quota at the beginning of the next page */
if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */
ptr = ptr + nbytes;
nbytes = sizeof(struct gfs2_quota) - nbytes;
offset = 0;
index++;
goto get_a_page;
}
err = 0; err = 0;
unlock: return err;
unlock_out:
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
return err; return err;
...@@ -748,8 +780,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) ...@@ -748,8 +780,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
* rgrp since it won't be allocated during the transaction * rgrp since it won't be allocated during the transaction
*/ */
al->al_requested = 1; al->al_requested = 1;
/* +1 in the end for block requested above for unstuffing */ /* +3 in the end for unstuffing block, inode size update block
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1; * and another block in case quota straddles page boundary and
* two blocks need to be updated instead of 1 */
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
if (nalloc) if (nalloc)
al->al_requested += nalloc * (data_blocks + ind_blocks); al->al_requested += nalloc * (data_blocks + ind_blocks);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment