Commit 6e6e4187 authored by Sam Ravnborg's avatar Sam Ravnborg Committed by David S. Miller

sparc32: fix build with STRICT_MM_TYPECHECKS

Based on recent thread on linux-arch (some weeks ago) I
decided to check how much work was required to build sparc32
with STRICT_MM_TYPECHECKS enabled.

The resulting binary (checked srmmu.o) was to my suprise smaller with
STRICT_MM_TYPECHECKS defined, than without.

As I have no working gear to test sparc32 bits at for the moment,
I did not enable STRICT_MM_TYPECHECKS - but was tempeted to do so.
Signed-off-by: default avatarSam Ravnborg <sam@ravnborg.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3c46e2d6
...@@ -69,7 +69,6 @@ typedef struct { unsigned long iopgprot; } iopgprot_t; ...@@ -69,7 +69,6 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
#define __pte(x) ((pte_t) { (x) } ) #define __pte(x) ((pte_t) { (x) } )
#define __iopte(x) ((iopte_t) { (x) } ) #define __iopte(x) ((iopte_t) { (x) } )
/* #define __pmd(x) ((pmd_t) { (x) } ) */ /* XXX procedure with loop */
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __ctxd(x) ((ctxd_t) { (x) } ) #define __ctxd(x) ((ctxd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
...@@ -97,7 +96,6 @@ typedef unsigned long iopgprot_t; ...@@ -97,7 +96,6 @@ typedef unsigned long iopgprot_t;
#define __pte(x) (x) #define __pte(x) (x)
#define __iopte(x) (x) #define __iopte(x) (x)
/* #define __pmd(x) (x) */ /* XXX later */
#define __pgd(x) (x) #define __pgd(x) (x)
#define __ctxd(x) (x) #define __ctxd(x) (x)
#define __pgprot(x) (x) #define __pgprot(x) (x)
......
...@@ -31,7 +31,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp) ...@@ -31,7 +31,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
{ {
unsigned long pa = __nocache_pa((unsigned long)pmdp); unsigned long pa = __nocache_pa((unsigned long)pmdp);
set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (pa >> 4))); set_pte((pte_t *)pgdp, __pte((SRMMU_ET_PTD | (pa >> 4))));
} }
#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
......
...@@ -298,7 +298,7 @@ static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space) ...@@ -298,7 +298,7 @@ static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
#define pgprot_noncached pgprot_noncached #define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t prot) static inline pgprot_t pgprot_noncached(pgprot_t prot)
{ {
prot &= ~__pgprot(SRMMU_CACHE); pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
return prot; return prot;
} }
......
...@@ -133,7 +133,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan); ...@@ -133,7 +133,7 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK); vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) { for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
set_bit(scan, iounit->bmap); set_bit(scan, iounit->bmap);
sbus_writel(iopte, &iounit->page_table[scan]); sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
} }
IOD(("%08lx\n", vaddr)); IOD(("%08lx\n", vaddr));
return vaddr; return vaddr;
...@@ -228,7 +228,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon ...@@ -228,7 +228,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT); i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
iopte = iounit->page_table + i; iopte = iounit->page_table + i;
sbus_writel(MKIOPTE(__pa(page)), iopte); sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
} }
addr += PAGE_SIZE; addr += PAGE_SIZE;
va += PAGE_SIZE; va += PAGE_SIZE;
......
...@@ -107,7 +107,12 @@ static inline int srmmu_pmd_none(pmd_t pmd) ...@@ -107,7 +107,12 @@ static inline int srmmu_pmd_none(pmd_t pmd)
/* XXX should we hyper_flush_whole_icache here - Anton */ /* XXX should we hyper_flush_whole_icache here - Anton */
static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } {
pte_t pte;
pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
set_pte((pte_t *)ctxp, pte);
}
void pmd_set(pmd_t *pmdp, pte_t *ptep) void pmd_set(pmd_t *pmdp, pte_t *ptep)
{ {
...@@ -116,8 +121,8 @@ void pmd_set(pmd_t *pmdp, pte_t *ptep) ...@@ -116,8 +121,8 @@ void pmd_set(pmd_t *pmdp, pte_t *ptep)
ptp = __nocache_pa((unsigned long) ptep) >> 4; ptp = __nocache_pa((unsigned long) ptep) >> 4;
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
} }
} }
...@@ -128,8 +133,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep) ...@@ -128,8 +133,8 @@ void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment