Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
24237d11
Commit
24237d11
authored
Mar 07, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
mm cleanup: split out mincore() system call from filemap.c
parent
893e544c
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
176 additions
and
155 deletions
+176
-155
mm/Makefile
mm/Makefile
+1
-1
mm/filemap.c
mm/filemap.c
+0
-154
mm/mincore.c
mm/mincore.c
+175
-0
No files found.
mm/Makefile
View file @
24237d11
...
...
@@ -14,6 +14,6 @@ export-objs := shmem.o filemap.o mempool.o page_alloc.o
obj-y
:=
memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o
\
vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o
\
page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o
\
shmem.o highmem.o mempool.o msync.o
shmem.o highmem.o mempool.o msync.o
mincore.o
include
$(TOPDIR)/Rules.make
mm/filemap.c
View file @
24237d11
...
...
@@ -2455,160 +2455,6 @@ asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior)
return
error
;
}
/*
* Later we can get more picky about what "in core" means precisely.
* For now, simply check to see if the page is in the page cache,
* and is up to date; i.e. that no page-in operation would be required
* at this time if an application were to map and access this page.
*/
static
unsigned
char
mincore_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
pgoff
)
{
unsigned
char
present
=
0
;
struct
address_space
*
as
=
vma
->
vm_file
->
f_dentry
->
d_inode
->
i_mapping
;
struct
page
*
page
,
**
hash
=
page_hash
(
as
,
pgoff
);
spin_lock
(
&
pagecache_lock
);
page
=
__find_page_nolock
(
as
,
pgoff
,
*
hash
);
if
((
page
)
&&
(
Page_Uptodate
(
page
)))
present
=
1
;
spin_unlock
(
&
pagecache_lock
);
return
present
;
}
static
long
mincore_vma
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
,
unsigned
char
*
vec
)
{
long
error
,
i
,
remaining
;
unsigned
char
*
tmp
;
error
=
-
ENOMEM
;
if
(
!
vma
->
vm_file
)
return
error
;
start
=
((
start
-
vma
->
vm_start
)
>>
PAGE_SHIFT
)
+
vma
->
vm_pgoff
;
if
(
end
>
vma
->
vm_end
)
end
=
vma
->
vm_end
;
end
=
((
end
-
vma
->
vm_start
)
>>
PAGE_SHIFT
)
+
vma
->
vm_pgoff
;
error
=
-
EAGAIN
;
tmp
=
(
unsigned
char
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
!
tmp
)
return
error
;
/* (end - start) is # of pages, and also # of bytes in "vec */
remaining
=
(
end
-
start
),
error
=
0
;
for
(
i
=
0
;
remaining
>
0
;
remaining
-=
PAGE_SIZE
,
i
++
)
{
int
j
=
0
;
long
thispiece
=
(
remaining
<
PAGE_SIZE
)
?
remaining
:
PAGE_SIZE
;
while
(
j
<
thispiece
)
tmp
[
j
++
]
=
mincore_page
(
vma
,
start
++
);
if
(
copy_to_user
(
vec
+
PAGE_SIZE
*
i
,
tmp
,
thispiece
))
{
error
=
-
EFAULT
;
break
;
}
}
free_page
((
unsigned
long
)
tmp
);
return
error
;
}
/*
* The mincore(2) system call.
*
* mincore() returns the memory residency status of the pages in the
* current process's address space specified by [addr, addr + len).
* The status is returned in a vector of bytes. The least significant
* bit of each byte is 1 if the referenced page is in memory, otherwise
* it is zero.
*
* Because the status of a page can change after mincore() checks it
* but before it returns to the application, the returned vector may
* contain stale information. Only locked pages are guaranteed to
* remain in memory.
*
* return values:
* zero - success
* -EFAULT - vec points to an illegal address
* -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE,
* or len has a nonpositive value
* -ENOMEM - Addresses in the range [addr, addr + len] are
* invalid for the address space of this process, or
* specify one or more pages which are not currently
* mapped
* -EAGAIN - A kernel resource was temporarily unavailable.
*/
asmlinkage
long
sys_mincore
(
unsigned
long
start
,
size_t
len
,
unsigned
char
*
vec
)
{
int
index
=
0
;
unsigned
long
end
;
struct
vm_area_struct
*
vma
;
int
unmapped_error
=
0
;
long
error
=
-
EINVAL
;
down_read
(
&
current
->
mm
->
mmap_sem
);
if
(
start
&
~
PAGE_CACHE_MASK
)
goto
out
;
len
=
(
len
+
~
PAGE_CACHE_MASK
)
&
PAGE_CACHE_MASK
;
end
=
start
+
len
;
if
(
end
<
start
)
goto
out
;
error
=
0
;
if
(
end
==
start
)
goto
out
;
/*
* If the interval [start,end) covers some unmapped address
* ranges, just ignore them, but return -ENOMEM at the end.
*/
vma
=
find_vma
(
current
->
mm
,
start
);
for
(;;)
{
/* Still start < end. */
error
=
-
ENOMEM
;
if
(
!
vma
)
goto
out
;
/* Here start < vma->vm_end. */
if
(
start
<
vma
->
vm_start
)
{
unmapped_error
=
-
ENOMEM
;
start
=
vma
->
vm_start
;
}
/* Here vma->vm_start <= start < vma->vm_end. */
if
(
end
<=
vma
->
vm_end
)
{
if
(
start
<
end
)
{
error
=
mincore_vma
(
vma
,
start
,
end
,
&
vec
[
index
]);
if
(
error
)
goto
out
;
}
error
=
unmapped_error
;
goto
out
;
}
/* Here vma->vm_start <= start < vma->vm_end < end. */
error
=
mincore_vma
(
vma
,
start
,
vma
->
vm_end
,
&
vec
[
index
]);
if
(
error
)
goto
out
;
index
+=
(
vma
->
vm_end
-
start
)
>>
PAGE_CACHE_SHIFT
;
start
=
vma
->
vm_end
;
vma
=
vma
->
vm_next
;
}
out:
up_read
(
&
current
->
mm
->
mmap_sem
);
return
error
;
}
static
inline
struct
page
*
__read_cache_page
(
struct
address_space
*
mapping
,
unsigned
long
index
,
...
...
mm/mincore.c
0 → 100644
View file @
24237d11
/*
* linux/mm/mincore.c
*
* Copyright (C) 1994-1999 Linus Torvalds
*/
/*
* The mincore() system call.
*/
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
/*
* Later we can get more picky about what "in core" means precisely.
* For now, simply check to see if the page is in the page cache,
* and is up to date; i.e. that no page-in operation would be required
* at this time if an application were to map and access this page.
*/
static
unsigned
char
mincore_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
pgoff
)
{
unsigned
char
present
=
0
;
struct
address_space
*
as
=
vma
->
vm_file
->
f_dentry
->
d_inode
->
i_mapping
;
struct
page
*
page
,
**
hash
=
page_hash
(
as
,
pgoff
);
page
=
__find_get_page
(
as
,
pgoff
,
hash
);
if
(
page
)
{
present
=
Page_Uptodate
(
page
);
page_cache_release
(
page
);
}
return
present
;
}
static
long
mincore_vma
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
,
unsigned
char
*
vec
)
{
long
error
,
i
,
remaining
;
unsigned
char
*
tmp
;
error
=
-
ENOMEM
;
if
(
!
vma
->
vm_file
)
return
error
;
start
=
((
start
-
vma
->
vm_start
)
>>
PAGE_SHIFT
)
+
vma
->
vm_pgoff
;
if
(
end
>
vma
->
vm_end
)
end
=
vma
->
vm_end
;
end
=
((
end
-
vma
->
vm_start
)
>>
PAGE_SHIFT
)
+
vma
->
vm_pgoff
;
error
=
-
EAGAIN
;
tmp
=
(
unsigned
char
*
)
__get_free_page
(
GFP_KERNEL
);
if
(
!
tmp
)
return
error
;
/* (end - start) is # of pages, and also # of bytes in "vec */
remaining
=
(
end
-
start
),
error
=
0
;
for
(
i
=
0
;
remaining
>
0
;
remaining
-=
PAGE_SIZE
,
i
++
)
{
int
j
=
0
;
long
thispiece
=
(
remaining
<
PAGE_SIZE
)
?
remaining
:
PAGE_SIZE
;
while
(
j
<
thispiece
)
tmp
[
j
++
]
=
mincore_page
(
vma
,
start
++
);
if
(
copy_to_user
(
vec
+
PAGE_SIZE
*
i
,
tmp
,
thispiece
))
{
error
=
-
EFAULT
;
break
;
}
}
free_page
((
unsigned
long
)
tmp
);
return
error
;
}
/*
* The mincore(2) system call.
*
* mincore() returns the memory residency status of the pages in the
* current process's address space specified by [addr, addr + len).
* The status is returned in a vector of bytes. The least significant
* bit of each byte is 1 if the referenced page is in memory, otherwise
* it is zero.
*
* Because the status of a page can change after mincore() checks it
* but before it returns to the application, the returned vector may
* contain stale information. Only locked pages are guaranteed to
* remain in memory.
*
* return values:
* zero - success
* -EFAULT - vec points to an illegal address
* -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE,
* or len has a nonpositive value
* -ENOMEM - Addresses in the range [addr, addr + len] are
* invalid for the address space of this process, or
* specify one or more pages which are not currently
* mapped
* -EAGAIN - A kernel resource was temporarily unavailable.
*/
asmlinkage
long
sys_mincore
(
unsigned
long
start
,
size_t
len
,
unsigned
char
*
vec
)
{
int
index
=
0
;
unsigned
long
end
;
struct
vm_area_struct
*
vma
;
int
unmapped_error
=
0
;
long
error
=
-
EINVAL
;
down_read
(
&
current
->
mm
->
mmap_sem
);
if
(
start
&
~
PAGE_CACHE_MASK
)
goto
out
;
len
=
(
len
+
~
PAGE_CACHE_MASK
)
&
PAGE_CACHE_MASK
;
end
=
start
+
len
;
if
(
end
<
start
)
goto
out
;
error
=
-
EFAULT
;
if
(
!
access_ok
(
VERIFY_WRITE
,
(
unsigned
long
)
vec
,
len
>>
PAGE_SHIFT
))
goto
out
;
error
=
0
;
if
(
end
==
start
)
goto
out
;
/*
* If the interval [start,end) covers some unmapped address
* ranges, just ignore them, but return -ENOMEM at the end.
*/
vma
=
find_vma
(
current
->
mm
,
start
);
for
(;;)
{
/* Still start < end. */
error
=
-
ENOMEM
;
if
(
!
vma
)
goto
out
;
/* Here start < vma->vm_end. */
if
(
start
<
vma
->
vm_start
)
{
unmapped_error
=
-
ENOMEM
;
start
=
vma
->
vm_start
;
}
/* Here vma->vm_start <= start < vma->vm_end. */
if
(
end
<=
vma
->
vm_end
)
{
if
(
start
<
end
)
{
error
=
mincore_vma
(
vma
,
start
,
end
,
&
vec
[
index
]);
if
(
error
)
goto
out
;
}
error
=
unmapped_error
;
goto
out
;
}
/* Here vma->vm_start <= start < vma->vm_end < end. */
error
=
mincore_vma
(
vma
,
start
,
vma
->
vm_end
,
&
vec
[
index
]);
if
(
error
)
goto
out
;
index
+=
(
vma
->
vm_end
-
start
)
>>
PAGE_CACHE_SHIFT
;
start
=
vma
->
vm_end
;
vma
=
vma
->
vm_next
;
}
out:
up_read
(
&
current
->
mm
->
mmap_sem
);
return
error
;
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment