Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
4cb92025
Commit
4cb92025
authored
Oct 23, 2003
by
David S. Miller
Committed by
Linus Torvalds
Oct 23, 2003
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[SPARC64]: Get hugetlb support back into working shape.
parent
59488495
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
365 additions
and
287 deletions
+365
-287
arch/sparc64/Kconfig
arch/sparc64/Kconfig
+16
-0
arch/sparc64/defconfig
arch/sparc64/defconfig
+10
-4
arch/sparc64/mm/hugetlbpage.c
arch/sparc64/mm/hugetlbpage.c
+319
-249
arch/sparc64/mm/init.c
arch/sparc64/mm/init.c
+4
-33
include/asm-sparc64/page.h
include/asm-sparc64/page.h
+6
-0
include/asm-sparc64/pgtable.h
include/asm-sparc64/pgtable.h
+10
-1
No files found.
arch/sparc64/Kconfig
View file @
4cb92025
...
@@ -211,6 +211,22 @@ config RWSEM_XCHGADD_ALGORITHM
...
@@ -211,6 +211,22 @@ config RWSEM_XCHGADD_ALGORITHM
bool
bool
default y
default y
choice
prompt "SPARC64 Huge TLB Page Size"
depends on HUGETLB_PAGE
default HUGETLB_PAGE_SIZE_4MB
config HUGETLB_PAGE_SIZE_4MB
bool "4MB"
config HUGETLB_PAGE_SIZE_512K
bool "512K"
config HUGETLB_PAGE_SIZE_64K
bool "64K"
endchoice
config GENERIC_ISA_DMA
config GENERIC_ISA_DMA
bool
bool
default y
default y
...
...
arch/sparc64/defconfig
View file @
4cb92025
...
@@ -62,6 +62,9 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=m
...
@@ -62,6 +62,9 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=m
CONFIG_SPARC64=y
CONFIG_SPARC64=y
CONFIG_HOTPLUG=y
CONFIG_HOTPLUG=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_HUGETLB_PAGE_SIZE_4MB=y
# CONFIG_HUGETLB_PAGE_SIZE_512K is not set
# CONFIG_HUGETLB_PAGE_SIZE_64K is not set
CONFIG_GENERIC_ISA_DMA=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_SBUS=y
CONFIG_SBUS=y
CONFIG_SBUSCHAR=y
CONFIG_SBUSCHAR=y
...
@@ -315,6 +318,11 @@ CONFIG_AIC79XX_DEBUG_MASK=0
...
@@ -315,6 +318,11 @@ CONFIG_AIC79XX_DEBUG_MASK=0
# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_ADVANSYS is not set
# CONFIG_SCSI_MEGARAID is not set
# CONFIG_SCSI_MEGARAID is not set
CONFIG_SCSI_SATA=y
CONFIG_SCSI_SATA_SVW=m
CONFIG_SCSI_ATA_PIIX=m
CONFIG_SCSI_SATA_PROMISE=m
CONFIG_SCSI_SATA_VIA=m
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_CPQFCTS is not set
# CONFIG_SCSI_CPQFCTS is not set
CONFIG_SCSI_DMX3191D=m
CONFIG_SCSI_DMX3191D=m
...
@@ -713,7 +721,6 @@ CONFIG_SIS900=m
...
@@ -713,7 +721,6 @@ CONFIG_SIS900=m
CONFIG_EPIC100=m
CONFIG_EPIC100=m
CONFIG_SUNDANCE=m
CONFIG_SUNDANCE=m
CONFIG_SUNDANCE_MMIO=y
CONFIG_SUNDANCE_MMIO=y
# CONFIG_TLAN is not set
CONFIG_VIA_RHINE=m
CONFIG_VIA_RHINE=m
# CONFIG_VIA_RHINE_MMIO is not set
# CONFIG_VIA_RHINE_MMIO is not set
...
@@ -784,7 +791,6 @@ CONFIG_NET_WIRELESS=y
...
@@ -784,7 +791,6 @@ CONFIG_NET_WIRELESS=y
#
#
# CONFIG_TR is not set
# CONFIG_TR is not set
CONFIG_NET_FC=y
CONFIG_NET_FC=y
# CONFIG_RCPCI is not set
CONFIG_SHAPER=m
CONFIG_SHAPER=m
#
#
...
@@ -1115,8 +1121,8 @@ CONFIG_DEVPTS_FS=y
...
@@ -1115,8 +1121,8 @@ CONFIG_DEVPTS_FS=y
CONFIG_DEVPTS_FS_XATTR=y
CONFIG_DEVPTS_FS_XATTR=y
# CONFIG_DEVPTS_FS_SECURITY is not set
# CONFIG_DEVPTS_FS_SECURITY is not set
# CONFIG_TMPFS is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLBFS is not set
CONFIG_HUGETLBFS=y
# CONFIG_HUGETLB_PAGE is not set
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
CONFIG_RAMFS=y
#
#
...
...
arch/sparc64/mm/hugetlbpage.c
View file @
4cb92025
/*
/*
* SPARC64 Huge TLB page support.
* SPARC64 Huge TLB page support.
*
*
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
* Copyright (C) 2002
, 2003
David S. Miller (davem@redhat.com)
*/
*/
#include <linux/config.h>
#include <linux/config.h>
...
@@ -12,6 +12,7 @@
...
@@ -12,6 +12,7 @@
#include <linux/pagemap.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/pgalloc.h>
...
@@ -19,87 +20,68 @@
...
@@ -19,87 +20,68 @@
#include <asm/tlbflush.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/cacheflush.h>
static
struct
vm_operations_struct
hugetlb_vm_ops
;
static
long
htlbpagemem
;
struct
list_head
htlbpage_freelist
;
int
htlbpage_max
;
spinlock_t
htlbpage_lock
=
SPIN_LOCK_UNLOCKED
;
static
long
htlbzone_pages
;
extern
long
htlbpagemem
;
static
void
zap_hugetlb_resources
(
struct
vm_area_struct
*
)
;
static
struct
list_head
hugepage_freelists
[
MAX_NUMNODES
]
;
void
free_huge_page
(
struct
page
*
page
)
;
static
spinlock_t
htlbpage_lock
=
SPIN_LOCK_UNLOCKED
;
#define MAX_ID 32
static
void
enqueue_huge_page
(
struct
page
*
page
)
struct
htlbpagekey
{
{
struct
inode
*
in
;
list_add
(
&
page
->
list
,
int
key
;
&
hugepage_freelists
[
page_zone
(
page
)
->
zone_pgdat
->
node_id
])
;
}
htlbpagek
[
MAX_ID
];
}
static
struct
inode
*
find_key_inode
(
int
key
)
static
struct
page
*
dequeue_huge_page
(
void
)
{
{
int
i
;
int
nid
=
numa_node_id
();
struct
page
*
page
=
NULL
;
for
(
i
=
0
;
i
<
MAX_ID
;
i
++
)
{
if
(
list_empty
(
&
hugepage_freelists
[
nid
]))
{
if
(
htlbpagek
[
i
].
key
==
key
)
for
(
nid
=
0
;
nid
<
MAX_NUMNODES
;
++
nid
)
return
htlbpagek
[
i
].
in
;
if
(
!
list_empty
(
&
hugepage_freelists
[
nid
]))
break
;
}
}
return
NULL
;
if
(
nid
>=
0
&&
nid
<
MAX_NUMNODES
&&
!
list_empty
(
&
hugepage_freelists
[
nid
]))
{
page
=
list_entry
(
hugepage_freelists
[
nid
].
next
,
struct
page
,
list
);
list_del
(
&
page
->
list
);
}
return
page
;
}
}
static
struct
page
*
alloc_
hugetlb
_page
(
void
)
static
struct
page
*
alloc_
fresh_huge
_page
(
void
)
{
{
st
ruct
list_head
*
curr
,
*
head
;
st
atic
int
nid
=
0
;
struct
page
*
page
;
struct
page
*
page
;
page
=
alloc_pages_node
(
nid
,
GFP_HIGHUSER
,
HUGETLB_PAGE_ORDER
);
nid
=
(
nid
+
1
)
%
numnodes
;
return
page
;
}
spin_lock
(
&
htlbpage_lock
);
static
void
free_huge_page
(
struct
page
*
page
);
head
=
&
htlbpage_freelist
;
static
struct
page
*
alloc_hugetlb_page
(
void
)
curr
=
head
->
next
;
{
struct
page
*
page
;
if
(
curr
==
head
)
{
spin_lock
(
&
htlbpage_lock
);
page
=
dequeue_huge_page
();
if
(
!
page
)
{
spin_unlock
(
&
htlbpage_lock
);
spin_unlock
(
&
htlbpage_lock
);
return
NULL
;
return
NULL
;
}
}
page
=
list_entry
(
curr
,
struct
page
,
list
);
list_del
(
curr
);
htlbpagemem
--
;
htlbpagemem
--
;
spin_unlock
(
&
htlbpage_lock
);
spin_unlock
(
&
htlbpage_lock
);
set_page_count
(
page
,
1
);
set_page_count
(
page
,
1
);
page
->
lru
.
prev
=
(
void
*
)
free_huge_page
;
page
->
lru
.
prev
=
(
void
*
)
free_huge_page
;
memset
(
page_address
(
page
),
0
,
HPAGE_SIZE
);
memset
(
page_address
(
page
),
0
,
HPAGE_SIZE
);
return
page
;
return
page
;
}
}
static
void
free_hugetlb_page
(
struct
page
*
page
)
static
pte_t
*
huge_pte_alloc
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
spin_lock
(
&
htlbpage_lock
);
if
((
page
->
mapping
!=
NULL
)
&&
(
page_count
(
page
)
==
2
))
{
struct
inode
*
inode
=
page
->
mapping
->
host
;
int
i
;
ClearPageDirty
(
page
);
remove_from_page_cache
(
page
);
set_page_count
(
page
,
1
);
if
((
inode
->
i_size
-=
HPAGE_SIZE
)
==
0
)
{
for
(
i
=
0
;
i
<
MAX_ID
;
i
++
)
{
if
(
htlbpagek
[
i
].
key
==
inode
->
i_ino
)
{
htlbpagek
[
i
].
key
=
0
;
htlbpagek
[
i
].
in
=
NULL
;
break
;
}
}
kfree
(
inode
);
}
}
if
(
put_page_testzero
(
page
))
{
list_add
(
&
page
->
list
,
&
htlbpage_freelist
);
htlbpagemem
++
;
}
spin_unlock
(
&
htlbpage_lock
);
}
static
pte_t
*
huge_pte_alloc_map
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
{
pgd_t
*
pgd
;
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pmd_t
*
pmd
;
...
@@ -114,7 +96,7 @@ static pte_t *huge_pte_alloc_map(struct mm_struct *mm, unsigned long addr)
...
@@ -114,7 +96,7 @@ static pte_t *huge_pte_alloc_map(struct mm_struct *mm, unsigned long addr)
return
pte
;
return
pte
;
}
}
static
pte_t
*
huge_pte_offset
_map
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
static
pte_t
*
huge_pte_offset
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
{
pgd_t
*
pgd
;
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pmd_t
*
pmd
;
...
@@ -129,32 +111,16 @@ static pte_t *huge_pte_offset_map(struct mm_struct *mm, unsigned long addr)
...
@@ -129,32 +111,16 @@ static pte_t *huge_pte_offset_map(struct mm_struct *mm, unsigned long addr)
return
pte
;
return
pte
;
}
}
static
pte_t
*
huge_pte_offset_map_nested
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
{
pgd_t
*
pgd
;
pmd_t
*
pmd
;
pte_t
*
pte
=
NULL
;
pgd
=
pgd_offset
(
mm
,
addr
);
if
(
pgd
)
{
pmd
=
pmd_offset
(
pgd
,
addr
);
if
(
pmd
)
pte
=
pte_offset_map_nested
(
pmd
,
addr
);
}
return
pte
;
}
#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZ4MB; } while (0)
static
void
set_huge_pte
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
static
void
set_huge_pte
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
pte_t
*
page_table
,
int
write_access
)
struct
page
*
page
,
pte_t
*
page_table
,
int
write_access
)
{
{
pte_t
entry
;
unsigned
long
i
;
unsigned
long
i
;
pte_t
entry
;
mm
->
rss
+=
(
HPAGE_SIZE
/
PAGE_SIZE
);
mm
->
rss
+=
(
HPAGE_SIZE
/
PAGE_SIZE
);
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
if
(
write_access
)
if
(
write_access
)
entry
=
pte_mkwrite
(
pte_mkdirty
(
mk_pte
(
page
,
entry
=
pte_mkwrite
(
pte_mkdirty
(
mk_pte
(
page
,
vma
->
vm_page_prot
)));
vma
->
vm_page_prot
)));
...
@@ -162,76 +128,13 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
...
@@ -162,76 +128,13 @@ static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
entry
=
pte_wrprotect
(
mk_pte
(
page
,
vma
->
vm_page_prot
));
entry
=
pte_wrprotect
(
mk_pte
(
page
,
vma
->
vm_page_prot
));
entry
=
pte_mkyoung
(
entry
);
entry
=
pte_mkyoung
(
entry
);
mk_pte_huge
(
entry
);
mk_pte_huge
(
entry
);
pte_val
(
entry
)
+=
(
i
<<
PAGE_SHIFT
);
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
set_pte
(
page_table
,
entry
);
set_pte
(
page_table
,
entry
);
page_table
++
;
page_table
++
;
}
}
static
int
anon_get_hugetlb_page
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
int
write_access
,
pte_t
*
page_table
)
{
struct
page
*
page
;
page
=
alloc_hugetlb_page
();
if
(
page
==
NULL
)
return
-
1
;
set_huge_pte
(
mm
,
vma
,
page
,
page_table
,
write_access
);
return
1
;
}
static
int
make_hugetlb_pages_present
(
unsigned
long
addr
,
unsigned
long
end
,
int
flags
)
{
struct
mm_struct
*
mm
=
current
->
mm
;
struct
vm_area_struct
*
vma
;
pte_t
*
pte
;
int
write
;
vma
=
find_vma
(
mm
,
addr
);
pte_val
(
entry
)
+=
PAGE_SIZE
;
if
(
!
vma
)
goto
out_error1
;
write
=
(
vma
->
vm_flags
&
VM_WRITE
)
!=
0
;
if
((
vma
->
vm_end
-
vma
->
vm_start
)
&
(
HPAGE_SIZE
-
1
))
goto
out_error1
;
spin_lock
(
&
mm
->
page_table_lock
);
do
{
int
err
;
pte
=
huge_pte_alloc_map
(
mm
,
addr
);
err
=
(
!
pte
||
!
pte_none
(
*
pte
)
||
(
anon_get_hugetlb_page
(
mm
,
vma
,
write
?
VM_WRITE
:
VM_READ
,
pte
)
==
-
1
));
if
(
pte
)
pte_unmap
(
pte
);
if
(
err
)
goto
out_error
;
addr
+=
HPAGE_SIZE
;
}
while
(
addr
<
end
);
spin_unlock
(
&
mm
->
page_table_lock
);
vma
->
vm_flags
|=
(
VM_HUGETLB
|
VM_RESERVED
);
if
(
flags
&
MAP_PRIVATE
)
vma
->
vm_flags
|=
VM_DONTCOPY
;
vma
->
vm_ops
=
&
hugetlb_vm_ops
;
return
0
;
out_error:
if
(
addr
>
vma
->
vm_start
)
{
vma
->
vm_end
=
addr
;
flush_cache_range
(
vma
,
vma
->
vm_start
,
vma
->
vm_end
);
zap_hugetlb_resources
(
vma
);
flush_tlb_range
(
vma
,
vma
->
vm_start
,
vma
->
vm_end
);
vma
->
vm_end
=
end
;
}
}
spin_unlock
(
&
mm
->
page_table_lock
);
out_error1:
return
-
1
;
}
}
/*
/*
...
@@ -253,18 +156,15 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
...
@@ -253,18 +156,15 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct
page
*
ptepage
;
struct
page
*
ptepage
;
unsigned
long
addr
=
vma
->
vm_start
;
unsigned
long
addr
=
vma
->
vm_start
;
unsigned
long
end
=
vma
->
vm_end
;
unsigned
long
end
=
vma
->
vm_end
;
int
i
;
while
(
addr
<
end
)
{
while
(
addr
<
end
)
{
unsigned
long
i
;
dst_pte
=
huge_pte_alloc
(
dst
,
addr
);
dst_pte
=
huge_pte_alloc_map
(
dst
,
addr
);
if
(
!
dst_pte
)
if
(
!
dst_pte
)
goto
nomem
;
goto
nomem
;
src_pte
=
huge_pte_offset
(
src
,
addr
);
src_pte
=
huge_pte_offset_map_nested
(
src
,
addr
);
BUG_ON
(
!
src_pte
||
pte_none
(
*
src_pte
)
);
entry
=
*
src_pte
;
entry
=
*
src_pte
;
pte_unmap_nested
(
src_pte
);
ptepage
=
pte_page
(
entry
);
ptepage
=
pte_page
(
entry
);
get_page
(
ptepage
);
get_page
(
ptepage
);
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
...
@@ -272,8 +172,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
...
@@ -272,8 +172,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
pte_val
(
entry
)
+=
PAGE_SIZE
;
pte_val
(
entry
)
+=
PAGE_SIZE
;
dst_pte
++
;
dst_pte
++
;
}
}
pte_unmap
(
dst_pte
-
(
1
<<
HUGETLB_PAGE_ORDER
));
dst
->
rss
+=
(
HPAGE_SIZE
/
PAGE_SIZE
);
dst
->
rss
+=
(
HPAGE_SIZE
/
PAGE_SIZE
);
addr
+=
HPAGE_SIZE
;
addr
+=
HPAGE_SIZE
;
}
}
...
@@ -285,161 +183,333 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
...
@@ -285,161 +183,333 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
int
follow_hugetlb_page
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
int
follow_hugetlb_page
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
struct
page
**
pages
,
struct
vm_area_struct
**
vmas
,
struct
page
**
pages
,
struct
vm_area_struct
**
vmas
,
unsigned
long
*
st
,
int
*
length
,
int
i
)
unsigned
long
*
position
,
int
*
length
,
int
i
)
{
{
pte_t
*
ptep
,
pte
;
unsigned
long
vaddr
=
*
position
;
unsigned
long
start
=
*
st
;
int
remainder
=
*
length
;
unsigned
long
pstart
;
int
len
=
*
length
;
struct
page
*
page
;
do
{
WARN_ON
(
!
is_vm_hugetlb_page
(
vma
));
pstart
=
start
;
ptep
=
huge_pte_offset_map
(
mm
,
start
);
pte
=
*
ptep
;
back1:
while
(
vaddr
<
vma
->
vm_end
&&
remainder
)
{
page
=
pte_page
(
pte
);
if
(
pages
)
{
if
(
pages
)
{
page
+=
((
start
&
~
HPAGE_MASK
)
>>
PAGE_SHIFT
);
pte_t
*
pte
;
struct
page
*
page
;
pte
=
huge_pte_offset
(
mm
,
vaddr
);
/* hugetlb should be locked, and hence, prefaulted */
BUG_ON
(
!
pte
||
pte_none
(
*
pte
));
page
=
pte_page
(
*
pte
);
WARN_ON
(
!
PageCompound
(
page
));
get_page
(
page
);
get_page
(
page
);
pages
[
i
]
=
page
;
pages
[
i
]
=
page
;
}
}
if
(
vmas
)
if
(
vmas
)
vmas
[
i
]
=
vma
;
vmas
[
i
]
=
vma
;
i
++
;
len
--
;
vaddr
+=
PAGE_SIZE
;
start
+=
PAGE_SIZE
;
--
remainder
;
if
(((
start
&
HPAGE_MASK
)
==
pstart
)
&&
len
&&
++
i
;
(
start
<
vma
->
vm_end
))
}
goto
back1
;
*
length
=
remainder
;
pte_unmap
(
ptep
);
*
position
=
vaddr
;
}
while
(
len
&&
start
<
vma
->
vm_end
);
*
length
=
len
;
*
st
=
start
;
return
i
;
return
i
;
}
}
static
void
zap_hugetlb_resources
(
struct
vm_area_struct
*
mpnt
)
struct
page
*
follow_huge_addr
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
int
write
)
{
{
struct
mm_struct
*
mm
=
mpnt
->
vm_mm
;
return
NULL
;
unsigned
long
len
,
addr
,
end
;
}
struct
vm_area_struct
*
hugepage_vma
(
struct
mm_struct
*
mm
,
unsigned
long
addr
)
{
return
NULL
;
}
int
pmd_huge
(
pmd_t
pmd
)
{
return
0
;
}
struct
page
*
follow_huge_pmd
(
struct
mm_struct
*
mm
,
unsigned
long
address
,
pmd_t
*
pmd
,
int
write
)
{
return
NULL
;
}
static
void
free_huge_page
(
struct
page
*
page
)
{
BUG_ON
(
page_count
(
page
));
BUG_ON
(
page
->
mapping
);
INIT_LIST_HEAD
(
&
page
->
list
);
spin_lock
(
&
htlbpage_lock
);
enqueue_huge_page
(
page
);
htlbpagemem
++
;
spin_unlock
(
&
htlbpage_lock
);
}
void
huge_page_release
(
struct
page
*
page
)
{
if
(
!
put_page_testzero
(
page
))
return
;
free_huge_page
(
page
);
}
void
unmap_hugepage_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
unsigned
long
address
;
pte_t
*
pte
;
struct
page
*
page
;
struct
page
*
page
;
pte_t
*
ptep
;
int
i
;
addr
=
mpnt
->
vm_start
;
BUG_ON
(
start
&
(
HPAGE_SIZE
-
1
));
end
=
mpnt
->
vm_end
;
BUG_ON
(
end
&
(
HPAGE_SIZE
-
1
));
len
=
end
-
addr
;
do
{
unsigned
long
i
;
ptep
=
huge_pte_offset_map
(
mm
,
addr
);
for
(
address
=
start
;
address
<
end
;
address
+=
HPAGE_SIZE
)
{
page
=
pte_page
(
*
ptep
);
pte
=
huge_pte_offset
(
mm
,
address
);
BUG_ON
(
!
pte
);
if
(
pte_none
(
*
pte
))
continue
;
page
=
pte_page
(
*
pte
);
huge_page_release
(
page
);
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
for
(
i
=
0
;
i
<
(
1
<<
HUGETLB_PAGE_ORDER
);
i
++
)
{
pte_clear
(
pte
p
);
pte_clear
(
pte
);
pte
p
++
;
pte
++
;
}
}
pte_unmap
(
ptep
-
(
1
<<
HUGETLB_PAGE_ORDER
));
}
free_hugetlb_page
(
page
);
mm
->
rss
-=
(
end
-
start
)
>>
PAGE_SHIFT
;
addr
+=
HPAGE_SIZE
;
flush_tlb_range
(
vma
,
start
,
end
);
}
while
(
addr
<
end
);
}
mm
->
rss
-=
(
len
>>
PAGE_SHIFT
);
mpnt
->
vm_ops
=
NULL
;
void
zap_hugepage_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
length
)
{
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
spin_lock
(
&
mm
->
page_table_lock
);
unmap_hugepage_range
(
vma
,
start
,
start
+
length
);
spin_unlock
(
&
mm
->
page_table_lock
);
}
}
static
void
unlink_vma
(
struct
vm_area_struct
*
mpnt
)
int
hugetlb_prefault
(
struct
address_space
*
mapping
,
struct
vm_area_struct
*
vma
)
{
{
struct
mm_struct
*
mm
=
current
->
mm
;
struct
mm_struct
*
mm
=
current
->
mm
;
struct
vm_area_struct
*
vma
;
unsigned
long
addr
;
int
ret
=
0
;
vma
=
mm
->
mmap
;
if
(
vma
==
mpnt
)
{
BUG_ON
(
vma
->
vm_start
&
~
HPAGE_MASK
);
mm
->
mmap
=
vma
->
vm_next
;
BUG_ON
(
vma
->
vm_end
&
~
HPAGE_MASK
);
}
else
{
while
(
vma
->
vm_next
!=
mpnt
)
{
spin_lock
(
&
mm
->
page_table_lock
);
vma
=
vma
->
vm_next
;
for
(
addr
=
vma
->
vm_start
;
addr
<
vma
->
vm_end
;
addr
+=
HPAGE_SIZE
)
{
unsigned
long
idx
;
pte_t
*
pte
=
huge_pte_alloc
(
mm
,
addr
);
struct
page
*
page
;
if
(
!
pte
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
if
(
!
pte_none
(
*
pte
))
continue
;
idx
=
((
addr
-
vma
->
vm_start
)
>>
HPAGE_SHIFT
)
+
(
vma
->
vm_pgoff
>>
(
HPAGE_SHIFT
-
PAGE_SHIFT
));
page
=
find_get_page
(
mapping
,
idx
);
if
(
!
page
)
{
/* charge the fs quota first */
if
(
hugetlb_get_quota
(
mapping
))
{
ret
=
-
ENOMEM
;
goto
out
;
}
page
=
alloc_hugetlb_page
();
if
(
!
page
)
{
hugetlb_put_quota
(
mapping
);
ret
=
-
ENOMEM
;
goto
out
;
}
ret
=
add_to_page_cache
(
page
,
mapping
,
idx
,
GFP_ATOMIC
);
unlock_page
(
page
);
if
(
ret
)
{
hugetlb_put_quota
(
mapping
);
free_huge_page
(
page
);
goto
out
;
}
}
vma
->
vm_next
=
mpnt
->
vm_next
;
}
}
rb_erase
(
&
mpnt
->
vm_rb
,
&
mm
->
mm_rb
);
set_huge_pte
(
mm
,
vma
,
page
,
pte
,
vma
->
vm_flags
&
VM_WRITE
);
mm
->
mmap_cache
=
NULL
;
}
mm
->
map_count
--
;
out:
spin_unlock
(
&
mm
->
page_table_lock
);
return
ret
;
}
}
int
free_hugepages
(
struct
vm_area_struct
*
mpnt
)
static
void
update_and_free_page
(
struct
page
*
page
)
{
{
unlink_vma
(
mpnt
);
int
j
;
struct
page
*
map
;
flush_cache_range
(
mpnt
,
mpnt
->
vm_start
,
mpnt
->
vm_end
);
map
=
page
;
zap_hugetlb_resources
(
mpnt
);
htlbzone_pages
--
;
flush_tlb_range
(
mpnt
,
mpnt
->
vm_start
,
mpnt
->
vm_end
);
for
(
j
=
0
;
j
<
(
HPAGE_SIZE
/
PAGE_SIZE
);
j
++
)
{
map
->
flags
&=
~
(
1
<<
PG_locked
|
1
<<
PG_error
|
1
<<
PG_referenced
|
kmem_cache_free
(
vm_area_cachep
,
mpnt
);
1
<<
PG_dirty
|
1
<<
PG_active
|
1
<<
PG_reserved
|
return
1
;
1
<<
PG_private
|
1
<<
PG_writeback
);
set_page_count
(
map
,
0
);
map
++
;
}
set_page_count
(
page
,
1
);
__free_pages
(
page
,
HUGETLB_PAGE_ORDER
);
}
}
extern
long
htlbzone_pages
;
static
int
try_to_free_low
(
int
count
)
extern
struct
list_head
htlbpage_freelist
;
int
set_hugetlb_mem_size
(
int
count
)
{
{
int
j
,
lcount
;
struct
list_head
*
p
;
struct
page
*
page
,
*
map
;
struct
page
*
page
,
*
map
;
map
=
NULL
;
spin_lock
(
&
htlbpage_lock
);
/* all lowmem is on node 0 */
list_for_each
(
p
,
&
hugepage_freelists
[
0
])
{
if
(
map
)
{
list_del
(
&
map
->
list
);
update_and_free_page
(
map
);
htlbpagemem
--
;
map
=
NULL
;
if
(
++
count
==
0
)
break
;
}
page
=
list_entry
(
p
,
struct
page
,
list
);
if
(
!
PageHighMem
(
page
))
map
=
page
;
}
if
(
map
)
{
list_del
(
&
map
->
list
);
update_and_free_page
(
map
);
htlbpagemem
--
;
count
++
;
}
spin_unlock
(
&
htlbpage_lock
);
return
count
;
}
static
int
set_hugetlb_mem_size
(
int
count
)
{
int
lcount
;
struct
page
*
page
;
if
(
count
<
0
)
if
(
count
<
0
)
lcount
=
count
;
lcount
=
count
;
else
else
lcount
=
count
-
htlbzone_pages
;
lcount
=
count
-
htlbzone_pages
;
if
(
lcount
==
0
)
return
(
int
)
htlbzone_pages
;
if
(
lcount
>
0
)
{
/* Increase the mem size. */
if
(
lcount
>
0
)
{
/* Increase the mem size. */
while
(
lcount
--
)
{
while
(
lcount
--
)
{
page
=
alloc_
pages
(
GFP_ATOMIC
,
HUGETLB_PAGE_ORDER
);
page
=
alloc_
fresh_huge_page
(
);
if
(
page
==
NULL
)
if
(
page
==
NULL
)
break
;
break
;
spin_lock
(
&
htlbpage_lock
);
spin_lock
(
&
htlbpage_lock
);
list_add
(
&
page
->
list
,
&
htlbpage_freelist
);
enqueue_huge_page
(
page
);
htlbpagemem
++
;
htlbpagemem
++
;
htlbzone_pages
++
;
htlbzone_pages
++
;
spin_unlock
(
&
htlbpage_lock
);
spin_unlock
(
&
htlbpage_lock
);
}
}
return
(
int
)
htlbzone_pages
;
return
(
int
)
htlbzone_pages
;
}
}
/* Shrink the memory size. */
/* Shrink the memory size. */
lcount
=
try_to_free_low
(
lcount
);
while
(
lcount
++
)
{
while
(
lcount
++
)
{
page
=
alloc_hugetlb_page
();
page
=
alloc_hugetlb_page
();
if
(
page
==
NULL
)
if
(
page
==
NULL
)
break
;
break
;
spin_lock
(
&
htlbpage_lock
);
spin_lock
(
&
htlbpage_lock
);
htlbzone_pages
--
;
update_and_free_page
(
page
)
;
spin_unlock
(
&
htlbpage_lock
);
spin_unlock
(
&
htlbpage_lock
);
map
=
page
;
for
(
j
=
0
;
j
<
(
HPAGE_SIZE
/
PAGE_SIZE
);
j
++
)
{
map
->
flags
&=
~
(
1UL
<<
PG_locked
|
1UL
<<
PG_error
|
1UL
<<
PG_referenced
|
1UL
<<
PG_dirty
|
1UL
<<
PG_active
|
1UL
<<
PG_private
|
1UL
<<
PG_writeback
);
set_page_count
(
page
,
0
);
map
++
;
}
set_page_count
(
page
,
1
);
__free_pages
(
page
,
HUGETLB_PAGE_ORDER
);
}
}
return
(
int
)
htlbzone_pages
;
return
(
int
)
htlbzone_pages
;
}
}
static
struct
page
*
int
hugetlb_sysctl_handler
(
struct
ctl_table
*
table
,
int
write
,
hugetlb_nopage
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
int
unused
)
struct
file
*
file
,
void
*
buffer
,
size_t
*
length
)
{
proc_dointvec
(
table
,
write
,
file
,
buffer
,
length
);
htlbpage_max
=
set_hugetlb_mem_size
(
htlbpage_max
);
return
0
;
}
static
int
__init
hugetlb_setup
(
char
*
s
)
{
if
(
sscanf
(
s
,
"%d"
,
&
htlbpage_max
)
<=
0
)
htlbpage_max
=
0
;
return
1
;
}
__setup
(
"hugepages="
,
hugetlb_setup
);
static
int
__init
hugetlb_init
(
void
)
{
int
i
;
struct
page
*
page
;
for
(
i
=
0
;
i
<
MAX_NUMNODES
;
++
i
)
INIT_LIST_HEAD
(
&
hugepage_freelists
[
i
]);
for
(
i
=
0
;
i
<
htlbpage_max
;
++
i
)
{
page
=
alloc_fresh_huge_page
();
if
(
!
page
)
break
;
spin_lock
(
&
htlbpage_lock
);
enqueue_huge_page
(
page
);
spin_unlock
(
&
htlbpage_lock
);
}
htlbpage_max
=
htlbpagemem
=
htlbzone_pages
=
i
;
printk
(
"Total HugeTLB memory allocated, %ld
\n
"
,
htlbpagemem
);
return
0
;
}
module_init
(
hugetlb_init
);
int
hugetlb_report_meminfo
(
char
*
buf
)
{
return
sprintf
(
buf
,
"HugePages_Total: %5lu
\n
"
"HugePages_Free: %5lu
\n
"
"Hugepagesize: %5lu kB
\n
"
,
htlbzone_pages
,
htlbpagemem
,
HPAGE_SIZE
/
1024
);
}
int
is_hugepage_mem_enough
(
size_t
size
)
{
return
(
size
+
~
HPAGE_MASK
)
/
HPAGE_SIZE
<=
htlbpagemem
;
}
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static
struct
page
*
hugetlb_nopage
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
int
unused
)
{
{
BUG
();
BUG
();
return
NULL
;
return
NULL
;
}
}
st
atic
st
ruct
vm_operations_struct
hugetlb_vm_ops
=
{
struct
vm_operations_struct
hugetlb_vm_ops
=
{
.
nopage
=
hugetlb_nopage
,
.
nopage
=
hugetlb_nopage
,
.
close
=
zap_hugetlb_resources
,
};
};
arch/sparc64/mm/init.c
View file @
4cb92025
...
@@ -1166,7 +1166,11 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
...
@@ -1166,7 +1166,11 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
pte_t
*
pte
;
pte_t
*
pte
;
#if (L1DCACHE_SIZE > PAGE_SIZE)
/* is there D$ aliasing problem */
#if (L1DCACHE_SIZE > PAGE_SIZE)
/* is there D$ aliasing problem */
set_page_count
(
page
,
1
);
ClearPageCompound
(
page
);
set_page_count
((
page
+
1
),
1
);
set_page_count
((
page
+
1
),
1
);
ClearPageCompound
(
page
+
1
);
#endif
#endif
paddr
=
(
unsigned
long
)
page_address
(
page
);
paddr
=
(
unsigned
long
)
page_address
(
page
);
memset
((
char
*
)
paddr
,
0
,
(
PAGE_SIZE
<<
DC_ALIAS_SHIFT
));
memset
((
char
*
)
paddr
,
0
,
(
PAGE_SIZE
<<
DC_ALIAS_SHIFT
));
...
@@ -1680,13 +1684,6 @@ static void __init taint_real_pages(void)
...
@@ -1680,13 +1684,6 @@ static void __init taint_real_pages(void)
}
}
}
}
#ifdef CONFIG_HUGETLB_PAGE
long
htlbpagemem
=
0
;
int
htlbpage_max
;
long
htlbzone_pages
;
extern
struct
list_head
htlbpage_freelist
;
#endif
void
__init
mem_init
(
void
)
void
__init
mem_init
(
void
)
{
{
unsigned
long
codepages
,
datapages
,
initpages
;
unsigned
long
codepages
,
datapages
,
initpages
;
...
@@ -1763,32 +1760,6 @@ void __init mem_init(void)
...
@@ -1763,32 +1760,6 @@ void __init mem_init(void)
if
(
tlb_type
==
cheetah
||
tlb_type
==
cheetah_plus
)
if
(
tlb_type
==
cheetah
||
tlb_type
==
cheetah_plus
)
cheetah_ecache_flush_init
();
cheetah_ecache_flush_init
();
#ifdef CONFIG_HUGETLB_PAGE
{
long
i
,
j
;
struct
page
*
page
,
*
map
;
/* For now reserve quarter for hugetlb_pages. */
htlbzone_pages
=
(
num_physpages
>>
((
HPAGE_SHIFT
-
PAGE_SHIFT
)
+
2
))
;
/* Will make this kernel command line. */
INIT_LIST_HEAD
(
&
htlbpage_freelist
);
for
(
i
=
0
;
i
<
htlbzone_pages
;
i
++
)
{
page
=
alloc_pages
(
GFP_ATOMIC
,
HUGETLB_PAGE_ORDER
);
if
(
page
==
NULL
)
break
;
map
=
page
;
for
(
j
=
0
;
j
<
(
HPAGE_SIZE
/
PAGE_SIZE
);
j
++
)
{
SetPageReserved
(
map
);
map
++
;
}
list_add
(
&
page
->
list
,
&
htlbpage_freelist
);
}
printk
(
"Total Huge_TLB_Page memory pages allocated %ld
\n
"
,
i
);
htlbzone_pages
=
htlbpagemem
=
i
;
htlbpage_max
=
i
;
}
#endif
}
}
void
free_initmem
(
void
)
void
free_initmem
(
void
)
...
...
include/asm-sparc64/page.h
View file @
4cb92025
...
@@ -90,7 +90,13 @@ typedef unsigned long iopgprot_t;
...
@@ -90,7 +90,13 @@ typedef unsigned long iopgprot_t;
#endif
/* (STRICT_MM_TYPECHECKS) */
#endif
/* (STRICT_MM_TYPECHECKS) */
#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define HPAGE_SHIFT 22
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define HPAGE_SHIFT 19
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HPAGE_SHIFT 16
#endif
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
...
...
include/asm-sparc64/pgtable.h
View file @
4cb92025
...
@@ -12,6 +12,7 @@
...
@@ -12,6 +12,7 @@
* the SpitFire page tables.
* the SpitFire page tables.
*/
*/
#include <linux/config.h>
#include <asm/spitfire.h>
#include <asm/spitfire.h>
#include <asm/asi.h>
#include <asm/asi.h>
#include <asm/system.h>
#include <asm/system.h>
...
@@ -136,11 +137,19 @@
...
@@ -136,11 +137,19 @@
#elif PAGE_SHIFT == 19
#elif PAGE_SHIFT == 19
#define _PAGE_SZBITS _PAGE_SZ512K
#define _PAGE_SZBITS _PAGE_SZ512K
#elif PAGE_SHIFT == 22
#elif PAGE_SHIFT == 22
#define _PAGE_SZBITS _PAGE_SZ4M
#define _PAGE_SZBITS _PAGE_SZ4M
B
#else
#else
#error Wrong PAGE_SHIFT specified
#error Wrong PAGE_SHIFT specified
#endif
#endif
#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
#define _PAGE_SZHUGE _PAGE_SZ4MB
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
#define _PAGE_SZHUGE _PAGE_512K
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define _PAGE_SZHUGE _PAGE_64K
#endif
#define _PAGE_CACHE (_PAGE_CP | _PAGE_CV)
#define _PAGE_CACHE (_PAGE_CP | _PAGE_CV)
#define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
#define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment