Commit 3219e8cf authored by Bill O'Donnell's avatar Bill O'Donnell Committed by Darrick J. Wong

xfs: assure zeroed memory buffers for certain kmem allocations

Guarantee zeroed memory buffers for cases where potential memory
leak to disk can occur. In these cases, kmem_alloc is used and
doesn't zero the buffer, opening the possibility of information
leakage to disk.

Use existing infrastucture (xfs_buf_allocate_memory) to obtain
the already zeroed buffer from kernel memory.

This solution avoids the performance issue that would occur if a
wholesale change to replace kmem_alloc with kmem_zalloc was done.
Signed-off-by: default avatarBill O'Donnell <billodo@redhat.com>
[darrick: fix bitwise complaint about kmflag_mask]
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent d5cc14d9
...@@ -345,6 +345,15 @@ xfs_buf_allocate_memory( ...@@ -345,6 +345,15 @@ xfs_buf_allocate_memory(
unsigned short page_count, i; unsigned short page_count, i;
xfs_off_t start, end; xfs_off_t start, end;
int error; int error;
xfs_km_flags_t kmflag_mask = 0;
/*
* assure zeroed buffer for non-read cases.
*/
if (!(flags & XBF_READ)) {
kmflag_mask |= KM_ZERO;
gfp_mask |= __GFP_ZERO;
}
/* /*
* for buffers that are contained within a single page, just allocate * for buffers that are contained within a single page, just allocate
...@@ -354,7 +363,8 @@ xfs_buf_allocate_memory( ...@@ -354,7 +363,8 @@ xfs_buf_allocate_memory(
size = BBTOB(bp->b_length); size = BBTOB(bp->b_length);
if (size < PAGE_SIZE) { if (size < PAGE_SIZE) {
int align_mask = xfs_buftarg_dma_alignment(bp->b_target); int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
bp->b_addr = kmem_alloc_io(size, align_mask, KM_NOFS); bp->b_addr = kmem_alloc_io(size, align_mask,
KM_NOFS | kmflag_mask);
if (!bp->b_addr) { if (!bp->b_addr) {
/* low memory - use alloc_page loop instead */ /* low memory - use alloc_page loop instead */
goto use_alloc_page; goto use_alloc_page;
......
...@@ -1443,7 +1443,7 @@ xlog_alloc_log( ...@@ -1443,7 +1443,7 @@ xlog_alloc_log(
prev_iclog = iclog; prev_iclog = iclog;
iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask, iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
KM_MAYFAIL); KM_MAYFAIL | KM_ZERO);
if (!iclog->ic_data) if (!iclog->ic_data)
goto out_free_iclog; goto out_free_iclog;
#ifdef DEBUG #ifdef DEBUG
......
...@@ -127,7 +127,7 @@ xlog_alloc_buffer( ...@@ -127,7 +127,7 @@ xlog_alloc_buffer(
if (nbblks > 1 && log->l_sectBBsize > 1) if (nbblks > 1 && log->l_sectBBsize > 1)
nbblks += log->l_sectBBsize; nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize); nbblks = round_up(nbblks, log->l_sectBBsize);
return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL); return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment