Commit dbc2fba3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull iov_iter updates from Al Viro:
 "A couple of iov_iter patches - Christoph's crapectomy (the last
  remaining user of iov_for_each() went away with lustre, IIRC) and
  Eric'c optimization of sanity checks"

* 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  iov_iter: optimize page_copy_sane()
  uio: remove the unused iov_for_each macro
parents 5f739e4a 6daef95b
...@@ -290,7 +290,6 @@ ForEachMacros: ...@@ -290,7 +290,6 @@ ForEachMacros:
- 'idr_for_each_entry_ul' - 'idr_for_each_entry_ul'
- 'inet_bind_bucket_for_each' - 'inet_bind_bucket_for_each'
- 'inet_lhash2_for_each_icsk_rcu' - 'inet_lhash2_for_each_icsk_rcu'
- 'iov_for_each'
- 'key_for_each' - 'key_for_each'
- 'key_for_each_safe' - 'key_for_each_safe'
- 'klp_for_each_func' - 'klp_for_each_func'
......
...@@ -110,14 +110,6 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) ...@@ -110,14 +110,6 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
}; };
} }
#define iov_for_each(iov, iter, start) \
if (iov_iter_type(start) == ITER_IOVEC || \
iov_iter_type(start) == ITER_KVEC) \
for (iter = (start); \
(iter).count && \
((iov = iov_iter_iovec(&(iter))), 1); \
iov_iter_advance(&(iter), (iov).iov_len))
size_t iov_iter_copy_from_user_atomic(struct page *page, size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes); struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes); void iov_iter_advance(struct iov_iter *i, size_t bytes);
......
...@@ -861,8 +861,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache); ...@@ -861,8 +861,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{ {
struct page *head = compound_head(page); struct page *head;
size_t v = n + offset + page_address(page) - page_address(head); size_t v = n + offset;
/*
* The general case needs to access the page order in order
* to compute the page size.
* However, we mostly deal with order-0 pages and thus can
* avoid a possible cache line miss for requests that fit all
* page orders.
*/
if (n <= v && v <= PAGE_SIZE)
return true;
head = compound_head(page);
v += (page - head) << PAGE_SHIFT;
if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head)))) if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment