Commit a3ac1414 authored by Cong Wang's avatar Cong Wang Committed by Cong Wang

ntfs: remove the second argument of k[un]map_atomic()

Signed-off-by: default avatarCong Wang <amwang@redhat.com>
parent 7b9c0976
...@@ -94,11 +94,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) ...@@ -94,11 +94,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
if (file_ofs < init_size) if (file_ofs < init_size)
ofs = init_size - file_ofs; ofs = init_size - file_ofs;
local_irq_save(flags); local_irq_save(flags);
kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); kaddr = kmap_atomic(page);
memset(kaddr + bh_offset(bh) + ofs, 0, memset(kaddr + bh_offset(bh) + ofs, 0,
bh->b_size - ofs); bh->b_size - ofs);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); kunmap_atomic(kaddr);
local_irq_restore(flags); local_irq_restore(flags);
} }
} else { } else {
...@@ -147,11 +147,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) ...@@ -147,11 +147,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
/* Should have been verified before we got here... */ /* Should have been verified before we got here... */
BUG_ON(!recs); BUG_ON(!recs);
local_irq_save(flags); local_irq_save(flags);
kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); kaddr = kmap_atomic(page);
for (i = 0; i < recs; i++) for (i = 0; i < recs; i++)
post_read_mst_fixup((NTFS_RECORD*)(kaddr + post_read_mst_fixup((NTFS_RECORD*)(kaddr +
i * rec_size), rec_size); i * rec_size), rec_size);
kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); kunmap_atomic(kaddr);
local_irq_restore(flags); local_irq_restore(flags);
flush_dcache_page(page); flush_dcache_page(page);
if (likely(page_uptodate && !PageError(page))) if (likely(page_uptodate && !PageError(page)))
...@@ -504,7 +504,7 @@ static int ntfs_readpage(struct file *file, struct page *page) ...@@ -504,7 +504,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
/* Race with shrinking truncate. */ /* Race with shrinking truncate. */
attr_len = i_size; attr_len = i_size;
} }
addr = kmap_atomic(page, KM_USER0); addr = kmap_atomic(page);
/* Copy the data to the page. */ /* Copy the data to the page. */
memcpy(addr, (u8*)ctx->attr + memcpy(addr, (u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset), le16_to_cpu(ctx->attr->data.resident.value_offset),
...@@ -512,7 +512,7 @@ static int ntfs_readpage(struct file *file, struct page *page) ...@@ -512,7 +512,7 @@ static int ntfs_readpage(struct file *file, struct page *page)
/* Zero the remainder of the page. */ /* Zero the remainder of the page. */
memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(addr, KM_USER0); kunmap_atomic(addr);
put_unm_err_out: put_unm_err_out:
ntfs_attr_put_search_ctx(ctx); ntfs_attr_put_search_ctx(ctx);
unm_err_out: unm_err_out:
...@@ -746,14 +746,14 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) ...@@ -746,14 +746,14 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
unsigned long *bpos, *bend; unsigned long *bpos, *bend;
/* Check if the buffer is zero. */ /* Check if the buffer is zero. */
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
bpos = (unsigned long *)(kaddr + bh_offset(bh)); bpos = (unsigned long *)(kaddr + bh_offset(bh));
bend = (unsigned long *)((u8*)bpos + blocksize); bend = (unsigned long *)((u8*)bpos + blocksize);
do { do {
if (unlikely(*bpos)) if (unlikely(*bpos))
break; break;
} while (likely(++bpos < bend)); } while (likely(++bpos < bend));
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
if (bpos == bend) { if (bpos == bend) {
/* /*
* Buffer is zero and sparse, no need to write * Buffer is zero and sparse, no need to write
...@@ -1495,14 +1495,14 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc) ...@@ -1495,14 +1495,14 @@ static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
/* Shrinking cannot fail. */ /* Shrinking cannot fail. */
BUG_ON(err); BUG_ON(err);
} }
addr = kmap_atomic(page, KM_USER0); addr = kmap_atomic(page);
/* Copy the data from the page to the mft record. */ /* Copy the data from the page to the mft record. */
memcpy((u8*)ctx->attr + memcpy((u8*)ctx->attr +
le16_to_cpu(ctx->attr->data.resident.value_offset), le16_to_cpu(ctx->attr->data.resident.value_offset),
addr, attr_len); addr, attr_len);
/* Zero out of bounds area in the page cache page. */ /* Zero out of bounds area in the page cache page. */
memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
kunmap_atomic(addr, KM_USER0); kunmap_atomic(addr);
flush_dcache_page(page); flush_dcache_page(page);
flush_dcache_mft_record_page(ctx->ntfs_ino); flush_dcache_mft_record_page(ctx->ntfs_ino);
/* We are done with the page. */ /* We are done with the page. */
......
...@@ -1656,12 +1656,12 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) ...@@ -1656,12 +1656,12 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
attr_size = le32_to_cpu(a->data.resident.value_length); attr_size = le32_to_cpu(a->data.resident.value_length);
BUG_ON(attr_size != data_size); BUG_ON(attr_size != data_size);
if (page && !PageUptodate(page)) { if (page && !PageUptodate(page)) {
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memcpy(kaddr, (u8*)a + memcpy(kaddr, (u8*)a +
le16_to_cpu(a->data.resident.value_offset), le16_to_cpu(a->data.resident.value_offset),
attr_size); attr_size);
memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
flush_dcache_page(page); flush_dcache_page(page);
SetPageUptodate(page); SetPageUptodate(page);
} }
...@@ -1806,9 +1806,9 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) ...@@ -1806,9 +1806,9 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
sizeof(a->data.resident.reserved)); sizeof(a->data.resident.reserved));
/* Copy the data from the page back to the attribute value. */ /* Copy the data from the page back to the attribute value. */
if (page) { if (page) {
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memcpy((u8*)a + mp_ofs, kaddr, attr_size); memcpy((u8*)a + mp_ofs, kaddr, attr_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
/* Setup the allocated size in the ntfs inode in case it changed. */ /* Setup the allocated size in the ntfs inode in case it changed. */
write_lock_irqsave(&ni->size_lock, flags); write_lock_irqsave(&ni->size_lock, flags);
...@@ -2540,10 +2540,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) ...@@ -2540,10 +2540,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
size = PAGE_CACHE_SIZE; size = PAGE_CACHE_SIZE;
if (idx == end) if (idx == end)
size = end_ofs; size = end_ofs;
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memset(kaddr + start_ofs, val, size - start_ofs); memset(kaddr + start_ofs, val, size - start_ofs);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
set_page_dirty(page); set_page_dirty(page);
page_cache_release(page); page_cache_release(page);
balance_dirty_pages_ratelimited(mapping); balance_dirty_pages_ratelimited(mapping);
...@@ -2561,10 +2561,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) ...@@ -2561,10 +2561,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
"page (index 0x%lx).", idx); "page (index 0x%lx).", idx);
return -ENOMEM; return -ENOMEM;
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memset(kaddr, val, PAGE_CACHE_SIZE); memset(kaddr, val, PAGE_CACHE_SIZE);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
/* /*
* If the page has buffers, mark them uptodate since buffer * If the page has buffers, mark them uptodate since buffer
* state and not page state is definitive in 2.6 kernels. * state and not page state is definitive in 2.6 kernels.
...@@ -2598,10 +2598,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) ...@@ -2598,10 +2598,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
"(error, index 0x%lx).", idx); "(error, index 0x%lx).", idx);
return PTR_ERR(page); return PTR_ERR(page);
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memset(kaddr, val, end_ofs); memset(kaddr, val, end_ofs);
flush_dcache_page(page); flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
set_page_dirty(page); set_page_dirty(page);
page_cache_release(page); page_cache_release(page);
balance_dirty_pages_ratelimited(mapping); balance_dirty_pages_ratelimited(mapping);
......
...@@ -704,7 +704,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -704,7 +704,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
u8 *kaddr; u8 *kaddr;
unsigned pofs; unsigned pofs;
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
if (bh_pos < pos) { if (bh_pos < pos) {
pofs = bh_pos & ~PAGE_CACHE_MASK; pofs = bh_pos & ~PAGE_CACHE_MASK;
memset(kaddr + pofs, 0, pos - bh_pos); memset(kaddr + pofs, 0, pos - bh_pos);
...@@ -713,7 +713,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, ...@@ -713,7 +713,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
pofs = end & ~PAGE_CACHE_MASK; pofs = end & ~PAGE_CACHE_MASK;
memset(kaddr + pofs, 0, bh_end - end); memset(kaddr + pofs, 0, bh_end - end);
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
flush_dcache_page(page); flush_dcache_page(page);
} }
continue; continue;
...@@ -1287,9 +1287,9 @@ static inline size_t ntfs_copy_from_user(struct page **pages, ...@@ -1287,9 +1287,9 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
len = PAGE_CACHE_SIZE - ofs; len = PAGE_CACHE_SIZE - ofs;
if (len > bytes) if (len > bytes)
len = bytes; len = bytes;
addr = kmap_atomic(*pages, KM_USER0); addr = kmap_atomic(*pages);
left = __copy_from_user_inatomic(addr + ofs, buf, len); left = __copy_from_user_inatomic(addr + ofs, buf, len);
kunmap_atomic(addr, KM_USER0); kunmap_atomic(addr);
if (unlikely(left)) { if (unlikely(left)) {
/* Do it the slow way. */ /* Do it the slow way. */
addr = kmap(*pages); addr = kmap(*pages);
...@@ -1401,10 +1401,10 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, ...@@ -1401,10 +1401,10 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
len = PAGE_CACHE_SIZE - ofs; len = PAGE_CACHE_SIZE - ofs;
if (len > bytes) if (len > bytes)
len = bytes; len = bytes;
addr = kmap_atomic(*pages, KM_USER0); addr = kmap_atomic(*pages);
copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len); *iov, *iov_ofs, len);
kunmap_atomic(addr, KM_USER0); kunmap_atomic(addr);
if (unlikely(copied != len)) { if (unlikely(copied != len)) {
/* Do it the slow way. */ /* Do it the slow way. */
addr = kmap(*pages); addr = kmap(*pages);
...@@ -1691,7 +1691,7 @@ static int ntfs_commit_pages_after_write(struct page **pages, ...@@ -1691,7 +1691,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
BUG_ON(end > le32_to_cpu(a->length) - BUG_ON(end > le32_to_cpu(a->length) -
le16_to_cpu(a->data.resident.value_offset)); le16_to_cpu(a->data.resident.value_offset));
kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
/* Copy the received data from the page to the mft record. */ /* Copy the received data from the page to the mft record. */
memcpy(kattr + pos, kaddr + pos, bytes); memcpy(kattr + pos, kaddr + pos, bytes);
/* Update the attribute length if necessary. */ /* Update the attribute length if necessary. */
...@@ -1713,7 +1713,7 @@ static int ntfs_commit_pages_after_write(struct page **pages, ...@@ -1713,7 +1713,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
flush_dcache_page(page); flush_dcache_page(page);
SetPageUptodate(page); SetPageUptodate(page);
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
/* Update initialized_size/i_size if necessary. */ /* Update initialized_size/i_size if necessary. */
read_lock_irqsave(&ni->size_lock, flags); read_lock_irqsave(&ni->size_lock, flags);
initialized_size = ni->initialized_size; initialized_size = ni->initialized_size;
......
...@@ -2473,7 +2473,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) ...@@ -2473,7 +2473,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
nr_free -= PAGE_CACHE_SIZE * 8; nr_free -= PAGE_CACHE_SIZE * 8;
continue; continue;
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
/* /*
* Subtract the number of set bits. If this * Subtract the number of set bits. If this
* is the last page and it is partial we don't really care as * is the last page and it is partial we don't really care as
...@@ -2483,7 +2483,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) ...@@ -2483,7 +2483,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
*/ */
nr_free -= bitmap_weight(kaddr, nr_free -= bitmap_weight(kaddr,
PAGE_CACHE_SIZE * BITS_PER_BYTE); PAGE_CACHE_SIZE * BITS_PER_BYTE);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
page_cache_release(page); page_cache_release(page);
} }
ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
...@@ -2544,7 +2544,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, ...@@ -2544,7 +2544,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
nr_free -= PAGE_CACHE_SIZE * 8; nr_free -= PAGE_CACHE_SIZE * 8;
continue; continue;
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
/* /*
* Subtract the number of set bits. If this * Subtract the number of set bits. If this
* is the last page and it is partial we don't really care as * is the last page and it is partial we don't really care as
...@@ -2554,7 +2554,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, ...@@ -2554,7 +2554,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
*/ */
nr_free -= bitmap_weight(kaddr, nr_free -= bitmap_weight(kaddr,
PAGE_CACHE_SIZE * BITS_PER_BYTE); PAGE_CACHE_SIZE * BITS_PER_BYTE);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
page_cache_release(page); page_cache_release(page);
} }
ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment