Commit a969e903 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

kill generic_file_direct_IO()

generic_file_direct_IO is a common helper around the invocation of
->direct_IO.  But there's almost nothing shared between the read and write
side, so we're better off without this helper.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 75353bed
...@@ -42,9 +42,6 @@ ...@@ -42,9 +42,6 @@
#include <asm/mman.h> #include <asm/mman.h>
static ssize_t
generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
/* /*
* Shared mappings implemented 30.11.1994. It's not fully working yet, * Shared mappings implemented 30.11.1994. It's not fully working yet,
...@@ -1205,8 +1202,11 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, ...@@ -1205,8 +1202,11 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
goto out; /* skip atime */ goto out; /* skip atime */
size = i_size_read(inode); size = i_size_read(inode);
if (pos < size) { if (pos < size) {
retval = generic_file_direct_IO(READ, iocb, retval = filemap_write_and_wait(mapping);
iov, pos, nr_segs); if (!retval) {
retval = mapping->a_ops->direct_IO(READ, iocb,
iov, pos, nr_segs);
}
if (retval > 0) if (retval > 0)
*ppos = pos + retval; *ppos = pos + retval;
} }
...@@ -2004,11 +2004,55 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -2004,11 +2004,55 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
ssize_t written; ssize_t written;
size_t write_len;
pgoff_t end;
if (count != ocount) if (count != ocount)
*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); /*
* Unmap all mmappings of the file up-front.
*
* This will cause any pte dirty bits to be propagated into the
* pageframes for the subsequent filemap_write_and_wait().
*/
write_len = iov_length(iov, *nr_segs);
end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
if (mapping_mapped(mapping))
unmap_mapping_range(mapping, pos, write_len, 0);
written = filemap_write_and_wait(mapping);
if (written)
goto out;
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
*/
if (mapping->nrpages) {
written = invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
if (written)
goto out;
}
written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
/*
* Finally, try again to invalidate clean pages which might have been
* cached by non-direct readahead, or faulted in by get_user_pages()
* if the source of the write was an mmap'ed region of the file
* we're writing. Either one is a pretty crazy thing to do,
* so we don't support it 100%. If this invalidation
* fails, tough, the write still worked...
*/
if (mapping->nrpages) {
invalidate_inode_pages2_range(mapping,
pos >> PAGE_CACHE_SHIFT, end);
}
if (written > 0) { if (written > 0) {
loff_t end = pos + written; loff_t end = pos + written;
if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
...@@ -2024,6 +2068,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -2024,6 +2068,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
* i_mutex is held, which protects generic_osync_inode() from * i_mutex is held, which protects generic_osync_inode() from
* livelocking. AIO O_DIRECT ops attempt to sync metadata here. * livelocking. AIO O_DIRECT ops attempt to sync metadata here.
*/ */
out:
if ((written >= 0 || written == -EIOCBQUEUED) && if ((written >= 0 || written == -EIOCBQUEUED) &&
((file->f_flags & O_SYNC) || IS_SYNC(inode))) { ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
...@@ -2511,66 +2556,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, ...@@ -2511,66 +2556,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
} }
EXPORT_SYMBOL(generic_file_aio_write); EXPORT_SYMBOL(generic_file_aio_write);
/*
* Called under i_mutex for writes to S_ISREG files. Returns -EIO if something
* went wrong during pagecache shootdown.
*/
static ssize_t
generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
loff_t offset, unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
ssize_t retval;
size_t write_len;
pgoff_t end = 0; /* silence gcc */
/*
* If it's a write, unmap all mmappings of the file up-front. This
* will cause any pte dirty bits to be propagated into the pageframes
* for the subsequent filemap_write_and_wait().
*/
if (rw == WRITE) {
write_len = iov_length(iov, nr_segs);
end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
if (mapping_mapped(mapping))
unmap_mapping_range(mapping, offset, write_len, 0);
}
retval = filemap_write_and_wait(mapping);
if (retval)
goto out;
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
*/
if (rw == WRITE && mapping->nrpages) {
retval = invalidate_inode_pages2_range(mapping,
offset >> PAGE_CACHE_SHIFT, end);
if (retval)
goto out;
}
retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
/*
* Finally, try again to invalidate clean pages which might have been
* cached by non-direct readahead, or faulted in by get_user_pages()
* if the source of the write was an mmap'ed region of the file
* we're writing. Either one is a pretty crazy thing to do,
* so we don't support it 100%. If this invalidation
* fails, tough, the write still worked...
*/
if (rw == WRITE && mapping->nrpages) {
invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
}
out:
return retval;
}
/** /**
* try_to_release_page() - release old fs-specific metadata on a page * try_to_release_page() - release old fs-specific metadata on a page
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment