Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
4f18cd31
Commit
4f18cd31
authored
Feb 05, 2014
by
Al Viro
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
take iov_iter stuff to mm/iov_iter.c
Signed-off-by:
Al Viro
<
viro@zeniv.linux.org.uk
>
parent
4bafbec7
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
225 additions
and
222 deletions
+225
-222
mm/Makefile
mm/Makefile
+1
-1
mm/filemap.c
mm/filemap.c
+0
-221
mm/iov_iter.c
mm/iov_iter.c
+224
-0
No files found.
mm/Makefile
View file @
4f18cd31
...
...
@@ -17,7 +17,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
util.o mmzone.o vmstat.o backing-dev.o
\
mm_init.o mmu_context.o percpu.o slab_common.o
\
compaction.o balloon_compaction.o
\
interval_tree.o list_lru.o
$
(
mmu-y
)
interval_tree.o list_lru.o
iov_iter.o
$
(
mmu-y
)
obj-y
+=
init-mm.o
...
...
mm/filemap.c
View file @
4f18cd31
...
...
@@ -1085,84 +1085,6 @@ static void shrink_readahead_size_eio(struct file *filp,
ra
->
ra_pages
/=
4
;
}
size_t
copy_page_to_iter
(
struct
page
*
page
,
size_t
offset
,
size_t
bytes
,
struct
iov_iter
*
i
)
{
size_t
skip
,
copy
,
left
,
wanted
;
const
struct
iovec
*
iov
;
char
__user
*
buf
;
void
*
kaddr
,
*
from
;
if
(
unlikely
(
bytes
>
i
->
count
))
bytes
=
i
->
count
;
if
(
unlikely
(
!
bytes
))
return
0
;
wanted
=
bytes
;
iov
=
i
->
iov
;
skip
=
i
->
iov_offset
;
buf
=
iov
->
iov_base
+
skip
;
copy
=
min
(
bytes
,
iov
->
iov_len
-
skip
);
if
(
!
fault_in_pages_writeable
(
buf
,
copy
))
{
kaddr
=
kmap_atomic
(
page
);
from
=
kaddr
+
offset
;
/* first chunk, usually the only one */
left
=
__copy_to_user_inatomic
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
+=
copy
;
from
+=
copy
;
bytes
-=
copy
;
while
(
unlikely
(
!
left
&&
bytes
))
{
iov
++
;
buf
=
iov
->
iov_base
;
copy
=
min
(
bytes
,
iov
->
iov_len
);
left
=
__copy_to_user_inatomic
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
=
copy
;
from
+=
copy
;
bytes
-=
copy
;
}
if
(
likely
(
!
bytes
))
{
kunmap_atomic
(
kaddr
);
goto
done
;
}
offset
=
from
-
kaddr
;
buf
+=
copy
;
kunmap_atomic
(
kaddr
);
copy
=
min
(
bytes
,
iov
->
iov_len
-
skip
);
}
/* Too bad - revert to non-atomic kmap */
kaddr
=
kmap
(
page
);
from
=
kaddr
+
offset
;
left
=
__copy_to_user
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
+=
copy
;
from
+=
copy
;
bytes
-=
copy
;
while
(
unlikely
(
!
left
&&
bytes
))
{
iov
++
;
buf
=
iov
->
iov_base
;
copy
=
min
(
bytes
,
iov
->
iov_len
);
left
=
__copy_to_user
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
=
copy
;
from
+=
copy
;
bytes
-=
copy
;
}
kunmap
(
page
);
done:
i
->
count
-=
wanted
-
bytes
;
i
->
nr_segs
-=
iov
-
i
->
iov
;
i
->
iov
=
iov
;
i
->
iov_offset
=
skip
;
return
wanted
-
bytes
;
}
EXPORT_SYMBOL
(
copy_page_to_iter
);
/**
* do_generic_file_read - generic file read routine
* @filp: the file to read
...
...
@@ -1957,149 +1879,6 @@ struct page *read_cache_page(struct address_space *mapping,
}
EXPORT_SYMBOL
(
read_cache_page
);
static
size_t
__iovec_copy_from_user_inatomic
(
char
*
vaddr
,
const
struct
iovec
*
iov
,
size_t
base
,
size_t
bytes
)
{
size_t
copied
=
0
,
left
=
0
;
while
(
bytes
)
{
char
__user
*
buf
=
iov
->
iov_base
+
base
;
int
copy
=
min
(
bytes
,
iov
->
iov_len
-
base
);
base
=
0
;
left
=
__copy_from_user_inatomic
(
vaddr
,
buf
,
copy
);
copied
+=
copy
;
bytes
-=
copy
;
vaddr
+=
copy
;
iov
++
;
if
(
unlikely
(
left
))
break
;
}
return
copied
-
left
;
}
/*
* Copy as much as we can into the page and return the number of bytes which
* were successfully copied. If a fault is encountered then return the number of
* bytes which were copied.
*/
size_t
iov_iter_copy_from_user_atomic
(
struct
page
*
page
,
struct
iov_iter
*
i
,
unsigned
long
offset
,
size_t
bytes
)
{
char
*
kaddr
;
size_t
copied
;
kaddr
=
kmap_atomic
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
left
=
__copy_from_user_inatomic
(
kaddr
+
offset
,
buf
,
bytes
);
copied
=
bytes
-
left
;
}
else
{
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
kunmap_atomic
(
kaddr
);
return
copied
;
}
EXPORT_SYMBOL
(
iov_iter_copy_from_user_atomic
);
/*
* This has the same sideeffects and return value as
* iov_iter_copy_from_user_atomic().
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
size_t
iov_iter_copy_from_user
(
struct
page
*
page
,
struct
iov_iter
*
i
,
unsigned
long
offset
,
size_t
bytes
)
{
char
*
kaddr
;
size_t
copied
;
kaddr
=
kmap
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
left
=
__copy_from_user
(
kaddr
+
offset
,
buf
,
bytes
);
copied
=
bytes
-
left
;
}
else
{
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
kunmap
(
page
);
return
copied
;
}
EXPORT_SYMBOL
(
iov_iter_copy_from_user
);
void
iov_iter_advance
(
struct
iov_iter
*
i
,
size_t
bytes
)
{
BUG_ON
(
i
->
count
<
bytes
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
i
->
iov_offset
+=
bytes
;
i
->
count
-=
bytes
;
}
else
{
const
struct
iovec
*
iov
=
i
->
iov
;
size_t
base
=
i
->
iov_offset
;
unsigned
long
nr_segs
=
i
->
nr_segs
;
/*
* The !iov->iov_len check ensures we skip over unlikely
* zero-length segments (without overruning the iovec).
*/
while
(
bytes
||
unlikely
(
i
->
count
&&
!
iov
->
iov_len
))
{
int
copy
;
copy
=
min
(
bytes
,
iov
->
iov_len
-
base
);
BUG_ON
(
!
i
->
count
||
i
->
count
<
copy
);
i
->
count
-=
copy
;
bytes
-=
copy
;
base
+=
copy
;
if
(
iov
->
iov_len
==
base
)
{
iov
++
;
nr_segs
--
;
base
=
0
;
}
}
i
->
iov
=
iov
;
i
->
iov_offset
=
base
;
i
->
nr_segs
=
nr_segs
;
}
}
EXPORT_SYMBOL
(
iov_iter_advance
);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
* of bytes. Returns 0 on success, or non-zero if the memory could not be
* accessed (ie. because it is an invalid address).
*
* writev-intensive code may want this to prefault several iovecs -- that
* would be possible (callers must not rely on the fact that _only_ the
* first iovec will be faulted with the current implementation).
*/
int
iov_iter_fault_in_readable
(
struct
iov_iter
*
i
,
size_t
bytes
)
{
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
bytes
=
min
(
bytes
,
i
->
iov
->
iov_len
-
i
->
iov_offset
);
return
fault_in_pages_readable
(
buf
,
bytes
);
}
EXPORT_SYMBOL
(
iov_iter_fault_in_readable
);
/*
* Return the count of just the current iov_iter segment.
*/
size_t
iov_iter_single_seg_count
(
const
struct
iov_iter
*
i
)
{
const
struct
iovec
*
iov
=
i
->
iov
;
if
(
i
->
nr_segs
==
1
)
return
i
->
count
;
else
return
min
(
i
->
count
,
iov
->
iov_len
-
i
->
iov_offset
);
}
EXPORT_SYMBOL
(
iov_iter_single_seg_count
);
/*
* Performs necessary checks before doing a write
*
...
...
mm/iov_iter.c
0 → 100644
View file @
4f18cd31
#include <linux/export.h>
#include <linux/uio.h>
#include <linux/pagemap.h>
size_t
copy_page_to_iter
(
struct
page
*
page
,
size_t
offset
,
size_t
bytes
,
struct
iov_iter
*
i
)
{
size_t
skip
,
copy
,
left
,
wanted
;
const
struct
iovec
*
iov
;
char
__user
*
buf
;
void
*
kaddr
,
*
from
;
if
(
unlikely
(
bytes
>
i
->
count
))
bytes
=
i
->
count
;
if
(
unlikely
(
!
bytes
))
return
0
;
wanted
=
bytes
;
iov
=
i
->
iov
;
skip
=
i
->
iov_offset
;
buf
=
iov
->
iov_base
+
skip
;
copy
=
min
(
bytes
,
iov
->
iov_len
-
skip
);
if
(
!
fault_in_pages_writeable
(
buf
,
copy
))
{
kaddr
=
kmap_atomic
(
page
);
from
=
kaddr
+
offset
;
/* first chunk, usually the only one */
left
=
__copy_to_user_inatomic
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
+=
copy
;
from
+=
copy
;
bytes
-=
copy
;
while
(
unlikely
(
!
left
&&
bytes
))
{
iov
++
;
buf
=
iov
->
iov_base
;
copy
=
min
(
bytes
,
iov
->
iov_len
);
left
=
__copy_to_user_inatomic
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
=
copy
;
from
+=
copy
;
bytes
-=
copy
;
}
if
(
likely
(
!
bytes
))
{
kunmap_atomic
(
kaddr
);
goto
done
;
}
offset
=
from
-
kaddr
;
buf
+=
copy
;
kunmap_atomic
(
kaddr
);
copy
=
min
(
bytes
,
iov
->
iov_len
-
skip
);
}
/* Too bad - revert to non-atomic kmap */
kaddr
=
kmap
(
page
);
from
=
kaddr
+
offset
;
left
=
__copy_to_user
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
+=
copy
;
from
+=
copy
;
bytes
-=
copy
;
while
(
unlikely
(
!
left
&&
bytes
))
{
iov
++
;
buf
=
iov
->
iov_base
;
copy
=
min
(
bytes
,
iov
->
iov_len
);
left
=
__copy_to_user
(
buf
,
from
,
copy
);
copy
-=
left
;
skip
=
copy
;
from
+=
copy
;
bytes
-=
copy
;
}
kunmap
(
page
);
done:
i
->
count
-=
wanted
-
bytes
;
i
->
nr_segs
-=
iov
-
i
->
iov
;
i
->
iov
=
iov
;
i
->
iov_offset
=
skip
;
return
wanted
-
bytes
;
}
EXPORT_SYMBOL
(
copy_page_to_iter
);
static
size_t
__iovec_copy_from_user_inatomic
(
char
*
vaddr
,
const
struct
iovec
*
iov
,
size_t
base
,
size_t
bytes
)
{
size_t
copied
=
0
,
left
=
0
;
while
(
bytes
)
{
char
__user
*
buf
=
iov
->
iov_base
+
base
;
int
copy
=
min
(
bytes
,
iov
->
iov_len
-
base
);
base
=
0
;
left
=
__copy_from_user_inatomic
(
vaddr
,
buf
,
copy
);
copied
+=
copy
;
bytes
-=
copy
;
vaddr
+=
copy
;
iov
++
;
if
(
unlikely
(
left
))
break
;
}
return
copied
-
left
;
}
/*
* Copy as much as we can into the page and return the number of bytes which
* were successfully copied. If a fault is encountered then return the number of
* bytes which were copied.
*/
size_t
iov_iter_copy_from_user_atomic
(
struct
page
*
page
,
struct
iov_iter
*
i
,
unsigned
long
offset
,
size_t
bytes
)
{
char
*
kaddr
;
size_t
copied
;
kaddr
=
kmap_atomic
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
left
=
__copy_from_user_inatomic
(
kaddr
+
offset
,
buf
,
bytes
);
copied
=
bytes
-
left
;
}
else
{
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
kunmap_atomic
(
kaddr
);
return
copied
;
}
EXPORT_SYMBOL
(
iov_iter_copy_from_user_atomic
);
/*
* This has the same sideeffects and return value as
* iov_iter_copy_from_user_atomic().
* The difference is that it attempts to resolve faults.
* Page must not be locked.
*/
size_t
iov_iter_copy_from_user
(
struct
page
*
page
,
struct
iov_iter
*
i
,
unsigned
long
offset
,
size_t
bytes
)
{
char
*
kaddr
;
size_t
copied
;
kaddr
=
kmap
(
page
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
int
left
;
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
left
=
__copy_from_user
(
kaddr
+
offset
,
buf
,
bytes
);
copied
=
bytes
-
left
;
}
else
{
copied
=
__iovec_copy_from_user_inatomic
(
kaddr
+
offset
,
i
->
iov
,
i
->
iov_offset
,
bytes
);
}
kunmap
(
page
);
return
copied
;
}
EXPORT_SYMBOL
(
iov_iter_copy_from_user
);
void
iov_iter_advance
(
struct
iov_iter
*
i
,
size_t
bytes
)
{
BUG_ON
(
i
->
count
<
bytes
);
if
(
likely
(
i
->
nr_segs
==
1
))
{
i
->
iov_offset
+=
bytes
;
i
->
count
-=
bytes
;
}
else
{
const
struct
iovec
*
iov
=
i
->
iov
;
size_t
base
=
i
->
iov_offset
;
unsigned
long
nr_segs
=
i
->
nr_segs
;
/*
* The !iov->iov_len check ensures we skip over unlikely
* zero-length segments (without overruning the iovec).
*/
while
(
bytes
||
unlikely
(
i
->
count
&&
!
iov
->
iov_len
))
{
int
copy
;
copy
=
min
(
bytes
,
iov
->
iov_len
-
base
);
BUG_ON
(
!
i
->
count
||
i
->
count
<
copy
);
i
->
count
-=
copy
;
bytes
-=
copy
;
base
+=
copy
;
if
(
iov
->
iov_len
==
base
)
{
iov
++
;
nr_segs
--
;
base
=
0
;
}
}
i
->
iov
=
iov
;
i
->
iov_offset
=
base
;
i
->
nr_segs
=
nr_segs
;
}
}
EXPORT_SYMBOL
(
iov_iter_advance
);
/*
* Fault in the first iovec of the given iov_iter, to a maximum length
* of bytes. Returns 0 on success, or non-zero if the memory could not be
* accessed (ie. because it is an invalid address).
*
* writev-intensive code may want this to prefault several iovecs -- that
* would be possible (callers must not rely on the fact that _only_ the
* first iovec will be faulted with the current implementation).
*/
int
iov_iter_fault_in_readable
(
struct
iov_iter
*
i
,
size_t
bytes
)
{
char
__user
*
buf
=
i
->
iov
->
iov_base
+
i
->
iov_offset
;
bytes
=
min
(
bytes
,
i
->
iov
->
iov_len
-
i
->
iov_offset
);
return
fault_in_pages_readable
(
buf
,
bytes
);
}
EXPORT_SYMBOL
(
iov_iter_fault_in_readable
);
/*
* Return the count of just the current iov_iter segment.
*/
size_t
iov_iter_single_seg_count
(
const
struct
iov_iter
*
i
)
{
const
struct
iovec
*
iov
=
i
->
iov
;
if
(
i
->
nr_segs
==
1
)
return
i
->
count
;
else
return
min
(
i
->
count
,
iov
->
iov_len
-
i
->
iov_offset
);
}
EXPORT_SYMBOL
(
iov_iter_single_seg_count
);
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment