Commit 68b5a30f authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] use __GFP_REPEAT in pte_alloc_one()

Remove all the open-coded retry loops in various architectures, use
__GFP_REPEAT.

It could be that at some time in the future we change __GFP_REPEAT to give up
after ten seconds or so, so all the checks for failed allocations are
retained.
parent 8db50e8b
......@@ -66,19 +66,9 @@ pgd_alloc(struct mm_struct *mm)
pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
long timeout = 10;
retry:
pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
else if (--timeout >= 0) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
goto retry;
}
return pte;
}
......
......@@ -131,39 +131,23 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
int count = 0;
pte_t *pte;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
int count = 0;
struct page *pte;
do {
#if CONFIG_HIGHPTE
pte = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, 0);
pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
#else
pte = alloc_pages(GFP_KERNEL, 0);
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
#endif
if (pte)
clear_highpage(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
......
......@@ -76,15 +76,11 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
extern void *early_get_page(void);
int timeout = 0;
if (mem_init_done) {
while ((pte = (pte_t *) __get_free_page(GFP_KERNEL)) == NULL
&& ++timeout < 10) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
} else
pte = (pte_t *) early_get_page();
if (pte != NULL)
if (mem_init_done)
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
else
pte = (pte_t *)early_get_page();
if (pte)
clear_page(pte);
return pte;
}
......@@ -92,19 +88,15 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *pte;
int timeout = 0;
#ifdef CONFIG_HIGHPTE
int flags = GFP_KERNEL | __GFP_HIGHMEM;
int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
#else
int flags = GFP_KERNEL;
int flags = GFP_KERNEL | __GFP_REPEAT;
#endif
while ((pte = alloc_pages(flags, 0)) == NULL) {
if (++timeout >= 10)
return NULL;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(HZ);
}
pte = alloc_pages(flags, 0);
if (pte)
clear_highpage(pte);
return pte;
}
......
......@@ -1901,7 +1901,7 @@ static pte_t *sun4c_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
if ((pte = sun4c_pte_alloc_one_fast(mm, address)) != NULL)
return pte;
pte = (pte_t *)__get_free_page(GFP_KERNEL);
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
memset(pte, 0, PAGE_SIZE);
return pte;
......
......@@ -810,35 +810,21 @@ void pgd_free(pgd_t *pgd)
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
int count = 0;
pte_t *pte;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
int count = 0;
struct page *pte;
do {
pte = alloc_pages(GFP_KERNEL, 0);
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (pte)
clear_highpage(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
......
......@@ -27,17 +27,9 @@
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
int count = 0;
pte_t *pte;
do {
pte = (pte_t *)__get_free_page(GFP_KERNEL);
if (!pte) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) {
clear_page(pte);
clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
......@@ -51,16 +43,8 @@ static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
struct page *pte;
int count = 0;
do {
pte = alloc_pages(GFP_KERNEL, 0);
if (!pte) {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (pte) {
void *page = page_address(pte);
clear_page(page);
......
......@@ -62,7 +62,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
return pte;
......
......@@ -125,7 +125,7 @@ pmd_populate_kernel (struct mm_struct *mm, pmd_t *pmd_entry, pte_t *pte)
static inline struct page *
pte_alloc_one (struct mm_struct *mm, unsigned long addr)
{
struct page *pte = alloc_pages(GFP_KERNEL, 0);
struct page *pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (likely(pte != NULL))
clear_page(page_address(pte));
......@@ -135,7 +135,7 @@ pte_alloc_one (struct mm_struct *mm, unsigned long addr)
static inline pte_t *
pte_alloc_one_kernel (struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(pte != NULL))
clear_page(pte);
......
......@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
{
pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte) {
clear_page(pte);
__flush_page_to_ram(pte);
......@@ -30,7 +30,7 @@ static inline void pte_free_kernel(pte_t *pte)
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_pages(GFP_KERNEL, 0);
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
pte_t *pte;
if(!page)
......
......@@ -39,7 +39,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
unsigned long page = __get_free_page(GFP_KERNEL);
unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (!page)
return NULL;
......@@ -51,7 +51,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
struct page *page = alloc_pages(GFP_KERNEL, 0);
struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
if (page == NULL)
return NULL;
......
......@@ -132,7 +132,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
return pte;
......
......@@ -93,7 +93,7 @@ static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
return pte;
......
......@@ -73,7 +73,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
struct page *page = alloc_page(GFP_KERNEL);
struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(page != NULL))
clear_page(page_address(page));
return page;
......@@ -82,7 +82,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (likely(pte != NULL))
clear_page(pte);
return pte;
......
......@@ -62,19 +62,11 @@ pmd_free(pmd_t *pmd)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
int count = 0;
pte_t *pte;
do {
pte = (pte_t *)__get_free_page(GFP_KERNEL);
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
......
......@@ -120,20 +120,13 @@ static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
{
pte_t *pte;
int count;
int i;
count = 0;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte != NULL) {
for (i=0; i < PTRS_PER_PTE; i++)
pte_clear(pte+i);
} else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
......
......@@ -35,7 +35,7 @@ static inline void pgd_free(pgd_t *pgd)
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
if (pte)
clear_page(pte);
return pte;
......
......@@ -48,12 +48,12 @@ static inline void pgd_free (pgd_t *pgd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
return (pte_t *) get_zeroed_page(GFP_KERNEL);
return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
void *p = (void *)get_zeroed_page(GFP_KERNEL);
void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
if (!p)
return NULL;
return virt_to_page(p);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment