Commit 00e23707 authored by David Howells's avatar David Howells

iov_iter: Use accessor function

Use accessor functions to access an iterator's type and direction.  This
allows for the possibility of using some other method of determining the
type of iterator than if-chains with bitwise-AND conditions.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
parent 1fcb748d
...@@ -1255,7 +1255,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -1255,7 +1255,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
/* /*
* success * success
*/ */
if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) { (map_data && map_data->from_user)) {
ret = bio_copy_from_iter(bio, iter); ret = bio_copy_from_iter(bio, iter);
if (ret) if (ret)
......
...@@ -349,7 +349,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ...@@ -349,7 +349,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
dio->size = 0; dio->size = 0;
dio->multi_bio = false; dio->multi_bio = false;
dio->should_dirty = is_read && (iter->type == ITER_IOVEC); dio->should_dirty = is_read && iter_is_iovec(iter);
blk_start_plug(&plug); blk_start_plug(&plug);
for (;;) { for (;;) {
......
...@@ -658,7 +658,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to, ...@@ -658,7 +658,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (unlikely(to->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(to))) {
size_t page_off; size_t page_off;
ret = iov_iter_get_pages_alloc(to, &pages, len, ret = iov_iter_get_pages_alloc(to, &pages, len,
&page_off); &page_off);
......
...@@ -2990,7 +2990,7 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter) ...@@ -2990,7 +2990,7 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
size_t copy = min_t(size_t, remaining, PAGE_SIZE); size_t copy = min_t(size_t, remaining, PAGE_SIZE);
size_t written; size_t written;
if (unlikely(iter->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(iter))) {
void *addr = kmap_atomic(page); void *addr = kmap_atomic(page);
written = copy_to_iter(addr, copy, iter); written = copy_to_iter(addr, copy, iter);
...@@ -3302,7 +3302,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to) ...@@ -3302,7 +3302,7 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
if (!is_sync_kiocb(iocb)) if (!is_sync_kiocb(iocb))
ctx->iocb = iocb; ctx->iocb = iocb;
if (to->type == ITER_IOVEC) if (iter_is_iovec(to))
ctx->should_dirty = true; ctx->should_dirty = true;
rc = setup_aio_ctx_iter(ctx, to, READ); rc = setup_aio_ctx_iter(ctx, to, READ);
......
...@@ -786,7 +786,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) ...@@ -786,7 +786,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
struct page **pages = NULL; struct page **pages = NULL;
struct bio_vec *bv = NULL; struct bio_vec *bv = NULL;
if (iter->type & ITER_KVEC) { if (iov_iter_is_kvec(iter)) {
memcpy(&ctx->iter, iter, sizeof(struct iov_iter)); memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
ctx->len = count; ctx->len = count;
iov_iter_advance(iter, count); iov_iter_advance(iter, count);
......
...@@ -2054,14 +2054,22 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg) ...@@ -2054,14 +2054,22 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
info->smbd_recv_pending++; info->smbd_recv_pending++;
switch (msg->msg_iter.type) { if (iov_iter_rw(&msg->msg_iter) == WRITE) {
case READ | ITER_KVEC: /* It's a bug in upper layer to get there */
cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
iov_iter_rw(&msg->msg_iter));
rc = -EINVAL;
goto out;
}
switch (iov_iter_type(&msg->msg_iter)) {
case ITER_KVEC:
buf = msg->msg_iter.kvec->iov_base; buf = msg->msg_iter.kvec->iov_base;
to_read = msg->msg_iter.kvec->iov_len; to_read = msg->msg_iter.kvec->iov_len;
rc = smbd_recv_buf(info, buf, to_read); rc = smbd_recv_buf(info, buf, to_read);
break; break;
case READ | ITER_BVEC: case ITER_BVEC:
page = msg->msg_iter.bvec->bv_page; page = msg->msg_iter.bvec->bv_page;
page_offset = msg->msg_iter.bvec->bv_offset; page_offset = msg->msg_iter.bvec->bv_offset;
to_read = msg->msg_iter.bvec->bv_len; to_read = msg->msg_iter.bvec->bv_len;
...@@ -2071,10 +2079,11 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg) ...@@ -2071,10 +2079,11 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
default: default:
/* It's a bug in upper layer to get there */ /* It's a bug in upper layer to get there */
cifs_dbg(VFS, "CIFS: invalid msg type %d\n", cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
msg->msg_iter.type); iov_iter_type(&msg->msg_iter));
rc = -EINVAL; rc = -EINVAL;
} }
out:
info->smbd_recv_pending--; info->smbd_recv_pending--;
wake_up(&info->wait_smbd_recv_pending); wake_up(&info->wait_smbd_recv_pending);
......
...@@ -1313,7 +1313,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, ...@@ -1313,7 +1313,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
spin_lock_init(&dio->bio_lock); spin_lock_init(&dio->bio_lock);
dio->refcount = 1; dio->refcount = 1;
dio->should_dirty = (iter->type == ITER_IOVEC); dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
sdio.iter = iter; sdio.iter = iter;
sdio.final_block_in_request = end >> blkbits; sdio.final_block_in_request = end >> blkbits;
......
...@@ -1271,7 +1271,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, ...@@ -1271,7 +1271,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
ssize_t ret = 0; ssize_t ret = 0;
/* Special case for kernel I/O: can copy directly into the buffer */ /* Special case for kernel I/O: can copy directly into the buffer */
if (ii->type & ITER_KVEC) { if (iov_iter_is_kvec(ii)) {
unsigned long user_addr = fuse_get_user_addr(ii); unsigned long user_addr = fuse_get_user_addr(ii);
size_t frag_size = fuse_get_frag_size(ii, *nbytesp); size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
......
...@@ -1795,7 +1795,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, ...@@ -1795,7 +1795,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (pos >= dio->i_size) if (pos >= dio->i_size)
goto out_free_dio; goto out_free_dio;
if (iter->type == ITER_IOVEC) if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
dio->flags |= IOMAP_DIO_DIRTY; dio->flags |= IOMAP_DIO_DIRTY;
} else { } else {
flags |= IOMAP_WRITE; flags |= IOMAP_WRITE;
......
...@@ -21,7 +21,7 @@ struct kvec { ...@@ -21,7 +21,7 @@ struct kvec {
size_t iov_len; size_t iov_len;
}; };
enum { enum iter_type {
ITER_IOVEC = 0, ITER_IOVEC = 0,
ITER_KVEC = 2, ITER_KVEC = 2,
ITER_BVEC = 4, ITER_BVEC = 4,
...@@ -47,6 +47,36 @@ struct iov_iter { ...@@ -47,6 +47,36 @@ struct iov_iter {
}; };
}; };
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
return i->type & ~(READ | WRITE);
}
static inline bool iter_is_iovec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_IOVEC;
}
static inline bool iov_iter_is_kvec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_KVEC;
}
static inline bool iov_iter_is_bvec(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_BVEC;
}
static inline bool iov_iter_is_pipe(const struct iov_iter *i)
{
return iov_iter_type(i) == ITER_PIPE;
}
static inline unsigned char iov_iter_rw(const struct iov_iter *i)
{
return i->type & (READ | WRITE);
}
/* /*
* Total number of bytes covered by an iovec. * Total number of bytes covered by an iovec.
* *
...@@ -74,7 +104,8 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) ...@@ -74,7 +104,8 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
} }
#define iov_for_each(iov, iter, start) \ #define iov_for_each(iov, iter, start) \
if (!((start).type & (ITER_BVEC | ITER_PIPE))) \ if (iov_iter_type(start) == ITER_IOVEC || \
iov_iter_type(start) == ITER_KVEC) \
for (iter = (start); \ for (iter = (start); \
(iter).count && \ (iter).count && \
((iov = iov_iter_iovec(&(iter))), 1); \ ((iov = iov_iter_iovec(&(iter))), 1); \
...@@ -202,19 +233,6 @@ static inline size_t iov_iter_count(const struct iov_iter *i) ...@@ -202,19 +233,6 @@ static inline size_t iov_iter_count(const struct iov_iter *i)
return i->count; return i->count;
} }
static inline bool iter_is_iovec(const struct iov_iter *i)
{
return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
}
/*
* Get one of READ or WRITE out of iter->type without any other flags OR'd in
* with it.
*
* The ?: is just for type safety.
*/
#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
/* /*
* Cap the iov_iter by given limit; note that the second argument is * Cap the iov_iter by given limit; note that the second argument is
* *not* the new size - it's upper limit for such. Passing it a value * *not* the new size - it's upper limit for such. Passing it a value
......
...@@ -558,7 +558,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, ...@@ -558,7 +558,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{ {
const char *from = addr; const char *from = addr;
if (unlikely(i->type & ITER_PIPE)) if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i); return copy_pipe_to_iter(addr, bytes, i);
if (iter_is_iovec(i)) if (iter_is_iovec(i))
might_fault(); might_fault();
...@@ -658,7 +658,7 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i) ...@@ -658,7 +658,7 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
const char *from = addr; const char *from = addr;
unsigned long rem, curr_addr, s_addr = (unsigned long) addr; unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
if (unlikely(i->type & ITER_PIPE)) if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter_mcsafe(addr, bytes, i); return copy_pipe_to_iter_mcsafe(addr, bytes, i);
if (iter_is_iovec(i)) if (iter_is_iovec(i))
might_fault(); might_fault();
...@@ -692,7 +692,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe); ...@@ -692,7 +692,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return 0; return 0;
} }
...@@ -712,7 +712,7 @@ EXPORT_SYMBOL(_copy_from_iter); ...@@ -712,7 +712,7 @@ EXPORT_SYMBOL(_copy_from_iter);
bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return false; return false;
} }
...@@ -739,7 +739,7 @@ EXPORT_SYMBOL(_copy_from_iter_full); ...@@ -739,7 +739,7 @@ EXPORT_SYMBOL(_copy_from_iter_full);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return 0; return 0;
} }
...@@ -773,7 +773,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache); ...@@ -773,7 +773,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return 0; return 0;
} }
...@@ -794,7 +794,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); ...@@ -794,7 +794,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
{ {
char *to = addr; char *to = addr;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return false; return false;
} }
...@@ -836,7 +836,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, ...@@ -836,7 +836,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
size_t wanted = copy_to_iter(kaddr + offset, bytes, i); size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
return wanted; return wanted;
} else if (likely(!(i->type & ITER_PIPE))) } else if (likely(!iov_iter_is_pipe(i)))
return copy_page_to_iter_iovec(page, offset, bytes, i); return copy_page_to_iter_iovec(page, offset, bytes, i);
else else
return copy_page_to_iter_pipe(page, offset, bytes, i); return copy_page_to_iter_pipe(page, offset, bytes, i);
...@@ -848,7 +848,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, ...@@ -848,7 +848,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
{ {
if (unlikely(!page_copy_sane(page, offset, bytes))) if (unlikely(!page_copy_sane(page, offset, bytes)))
return 0; return 0;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return 0; return 0;
} }
...@@ -888,7 +888,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i) ...@@ -888,7 +888,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
size_t iov_iter_zero(size_t bytes, struct iov_iter *i) size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
{ {
if (unlikely(i->type & ITER_PIPE)) if (unlikely(iov_iter_is_pipe(i)))
return pipe_zero(bytes, i); return pipe_zero(bytes, i);
iterate_and_advance(i, bytes, v, iterate_and_advance(i, bytes, v,
clear_user(v.iov_base, v.iov_len), clear_user(v.iov_base, v.iov_len),
...@@ -908,7 +908,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, ...@@ -908,7 +908,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
return 0; return 0;
} }
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
WARN_ON(1); WARN_ON(1);
return 0; return 0;
...@@ -972,7 +972,7 @@ static void pipe_advance(struct iov_iter *i, size_t size) ...@@ -972,7 +972,7 @@ static void pipe_advance(struct iov_iter *i, size_t size)
void iov_iter_advance(struct iov_iter *i, size_t size) void iov_iter_advance(struct iov_iter *i, size_t size)
{ {
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
pipe_advance(i, size); pipe_advance(i, size);
return; return;
} }
...@@ -987,7 +987,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) ...@@ -987,7 +987,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
if (WARN_ON(unroll > MAX_RW_COUNT)) if (WARN_ON(unroll > MAX_RW_COUNT))
return; return;
i->count += unroll; i->count += unroll;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe; struct pipe_inode_info *pipe = i->pipe;
int idx = i->idx; int idx = i->idx;
size_t off = i->iov_offset; size_t off = i->iov_offset;
...@@ -1016,7 +1016,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) ...@@ -1016,7 +1016,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
return; return;
} }
unroll -= i->iov_offset; unroll -= i->iov_offset;
if (i->type & ITER_BVEC) { if (iov_iter_is_bvec(i)) {
const struct bio_vec *bvec = i->bvec; const struct bio_vec *bvec = i->bvec;
while (1) { while (1) {
size_t n = (--bvec)->bv_len; size_t n = (--bvec)->bv_len;
...@@ -1049,11 +1049,11 @@ EXPORT_SYMBOL(iov_iter_revert); ...@@ -1049,11 +1049,11 @@ EXPORT_SYMBOL(iov_iter_revert);
*/ */
size_t iov_iter_single_seg_count(const struct iov_iter *i) size_t iov_iter_single_seg_count(const struct iov_iter *i)
{ {
if (unlikely(i->type & ITER_PIPE)) if (unlikely(iov_iter_is_pipe(i)))
return i->count; // it is a silly place, anyway return i->count; // it is a silly place, anyway
if (i->nr_segs == 1) if (i->nr_segs == 1)
return i->count; return i->count;
else if (i->type & ITER_BVEC) else if (iov_iter_is_bvec(i))
return min(i->count, i->bvec->bv_len - i->iov_offset); return min(i->count, i->bvec->bv_len - i->iov_offset);
else else
return min(i->count, i->iov->iov_len - i->iov_offset); return min(i->count, i->iov->iov_len - i->iov_offset);
...@@ -1106,7 +1106,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) ...@@ -1106,7 +1106,7 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
unsigned long res = 0; unsigned long res = 0;
size_t size = i->count; size_t size = i->count;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
return size | i->iov_offset; return size | i->iov_offset;
return size; return size;
...@@ -1125,7 +1125,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i) ...@@ -1125,7 +1125,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
unsigned long res = 0; unsigned long res = 0;
size_t size = i->count; size_t size = i->count;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return ~0U; return ~0U;
} }
...@@ -1193,7 +1193,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, ...@@ -1193,7 +1193,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (maxsize > i->count) if (maxsize > i->count)
maxsize = i->count; maxsize = i->count;
if (unlikely(i->type & ITER_PIPE)) if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages(i, pages, maxsize, maxpages, start); return pipe_get_pages(i, pages, maxsize, maxpages, start);
iterate_all_kinds(i, maxsize, v, ({ iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base; unsigned long addr = (unsigned long)v.iov_base;
...@@ -1205,7 +1205,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i, ...@@ -1205,7 +1205,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
len = maxpages * PAGE_SIZE; len = maxpages * PAGE_SIZE;
addr &= ~(PAGE_SIZE - 1); addr &= ~(PAGE_SIZE - 1);
n = DIV_ROUND_UP(len, PAGE_SIZE); n = DIV_ROUND_UP(len, PAGE_SIZE);
res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
if (unlikely(res < 0)) if (unlikely(res < 0))
return res; return res;
return (res == n ? len : res * PAGE_SIZE) - *start; return (res == n ? len : res * PAGE_SIZE) - *start;
...@@ -1270,7 +1270,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, ...@@ -1270,7 +1270,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
if (maxsize > i->count) if (maxsize > i->count)
maxsize = i->count; maxsize = i->count;
if (unlikely(i->type & ITER_PIPE)) if (unlikely(iov_iter_is_pipe(i)))
return pipe_get_pages_alloc(i, pages, maxsize, start); return pipe_get_pages_alloc(i, pages, maxsize, start);
iterate_all_kinds(i, maxsize, v, ({ iterate_all_kinds(i, maxsize, v, ({
unsigned long addr = (unsigned long)v.iov_base; unsigned long addr = (unsigned long)v.iov_base;
...@@ -1283,7 +1283,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, ...@@ -1283,7 +1283,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
p = get_pages_array(n); p = get_pages_array(n);
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
if (unlikely(res < 0)) { if (unlikely(res < 0)) {
kvfree(p); kvfree(p);
return res; return res;
...@@ -1313,7 +1313,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, ...@@ -1313,7 +1313,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next; __wsum sum, next;
size_t off = 0; size_t off = 0;
sum = *csum; sum = *csum;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return 0; return 0;
} }
...@@ -1355,7 +1355,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, ...@@ -1355,7 +1355,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
__wsum sum, next; __wsum sum, next;
size_t off = 0; size_t off = 0;
sum = *csum; sum = *csum;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); WARN_ON(1);
return false; return false;
} }
...@@ -1400,7 +1400,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, ...@@ -1400,7 +1400,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
__wsum sum, next; __wsum sum, next;
size_t off = 0; size_t off = 0;
sum = *csum; sum = *csum;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
WARN_ON(1); /* for now */ WARN_ON(1); /* for now */
return 0; return 0;
} }
...@@ -1443,7 +1443,7 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) ...@@ -1443,7 +1443,7 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
if (!size) if (!size)
return 0; return 0;
if (unlikely(i->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(i))) {
struct pipe_inode_info *pipe = i->pipe; struct pipe_inode_info *pipe = i->pipe;
size_t off; size_t off;
int idx; int idx;
...@@ -1481,11 +1481,11 @@ EXPORT_SYMBOL(iov_iter_npages); ...@@ -1481,11 +1481,11 @@ EXPORT_SYMBOL(iov_iter_npages);
const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
{ {
*new = *old; *new = *old;
if (unlikely(new->type & ITER_PIPE)) { if (unlikely(iov_iter_is_pipe(new))) {
WARN_ON(1); WARN_ON(1);
return NULL; return NULL;
} }
if (new->type & ITER_BVEC) if (iov_iter_is_bvec(new))
return new->bvec = kmemdup(new->bvec, return new->bvec = kmemdup(new->bvec,
new->nr_segs * sizeof(struct bio_vec), new->nr_segs * sizeof(struct bio_vec),
flags); flags);
......
...@@ -2122,7 +2122,7 @@ static ssize_t generic_file_buffered_read(struct kiocb *iocb, ...@@ -2122,7 +2122,7 @@ static ssize_t generic_file_buffered_read(struct kiocb *iocb,
!mapping->a_ops->is_partially_uptodate) !mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date; goto page_not_up_to_date;
/* pipes can't handle partially uptodate pages */ /* pipes can't handle partially uptodate pages */
if (unlikely(iter->type & ITER_PIPE)) if (unlikely(iov_iter_is_pipe(iter)))
goto page_not_up_to_date; goto page_not_up_to_date;
if (!trylock_page(page)) if (!trylock_page(page))
goto page_not_up_to_date; goto page_not_up_to_date;
......
...@@ -322,7 +322,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, ...@@ -322,7 +322,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
if (!iov_iter_count(data)) if (!iov_iter_count(data))
return 0; return 0;
if (!(data->type & ITER_KVEC)) { if (iov_iter_is_kvec(data)) {
int n; int n;
/* /*
* We allow only p9_max_pages pinned. We wait for the * We allow only p9_max_pages pinned. We wait for the
......
...@@ -799,7 +799,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) ...@@ -799,7 +799,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send); struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC; bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
unsigned char record_type = TLS_RECORD_TYPE_DATA; unsigned char record_type = TLS_RECORD_TYPE_DATA;
bool is_kvec = msg->msg_iter.type & ITER_KVEC; bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
bool eor = !(msg->msg_flags & MSG_MORE); bool eor = !(msg->msg_flags & MSG_MORE);
size_t try_to_copy, copied = 0; size_t try_to_copy, copied = 0;
struct sk_msg *msg_pl, *msg_en; struct sk_msg *msg_pl, *msg_en;
...@@ -1457,7 +1457,7 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -1457,7 +1457,7 @@ int tls_sw_recvmsg(struct sock *sk,
bool cmsg = false; bool cmsg = false;
int target, err = 0; int target, err = 0;
long timeo; long timeo;
bool is_kvec = msg->msg_iter.type & ITER_KVEC; bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
int num_async = 0; int num_async = 0;
flags |= nonblock; flags |= nonblock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment