Commit a31a00ba authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

s390/mm: get rid of __ASSEMBLY__ guards within pgtable.h

We have C code also outside of #ifndef __ASSEMBLY__. So these
guards seem to be quite pointless and can be removed.
Reviewed-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 8457d775
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#ifndef _ASM_S390_PGTABLE_H #ifndef _ASM_S390_PGTABLE_H
#define _ASM_S390_PGTABLE_H #define _ASM_S390_PGTABLE_H
#ifndef __ASSEMBLY__
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/page-flags.h> #include <linux/page-flags.h>
...@@ -65,7 +64,6 @@ extern unsigned long zero_page_mask; ...@@ -65,7 +64,6 @@ extern unsigned long zero_page_mask;
#define __HAVE_COLOR_ZERO_PAGE #define __HAVE_COLOR_ZERO_PAGE
/* TODO: s390 cannot support io_remap_pfn_range... */ /* TODO: s390 cannot support io_remap_pfn_range... */
#endif /* !__ASSEMBLY__ */
#define PMD_SHIFT 20 #define PMD_SHIFT 20
#define PUD_SHIFT 31 #define PUD_SHIFT 31
...@@ -100,7 +98,6 @@ extern unsigned long zero_page_mask; ...@@ -100,7 +98,6 @@ extern unsigned long zero_page_mask;
#define pgd_ERROR(e) \ #define pgd_ERROR(e) \
printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e)) printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
#ifndef __ASSEMBLY__
/* /*
* The vmalloc and module area will always be on the topmost area of the * The vmalloc and module area will always be on the topmost area of the
* kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
...@@ -1571,8 +1568,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) ...@@ -1571,8 +1568,6 @@ static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#endif /* !__ASSEMBLY__ */
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
extern int vmem_add_mapping(unsigned long start, unsigned long size); extern int vmem_add_mapping(unsigned long start, unsigned long size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment