Commit 2fe25f67 authored by Thiemo Seufer's avatar Thiemo Seufer Committed by Ralf Baechle

More .set push/pop encapsulation, more eyefriendly code formatting.

Signed-off-by: default avatarThiemo Seufer <ths@networkno.de>
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent f8670e66
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* *
* - The MIPS32 and MIPS64 specs permit an implementation to directly derive * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
* the index bits from the virtual address. This breaks with tradition * the index bits from the virtual address. This breaks with tradition
* set by the R4000. To keep unpleassant surprises from happening we pick * set by the R4000. To keep unpleasant surprises from happening we pick
* an address in KSEG0 / CKSEG0. * an address in KSEG0 / CKSEG0.
* - We need a properly sign extended address for 64-bit code. To get away * - We need a properly sign extended address for 64-bit code. To get away
* without ifdefs we let the compiler do it by a type cast. * without ifdefs we let the compiler do it by a type cast.
...@@ -30,11 +30,11 @@ ...@@ -30,11 +30,11 @@
#define cache_op(op,addr) \ #define cache_op(op,addr) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips3\n\t \n" \ " .set mips3\n\t \n" \
" cache %0, %1 \n" \ " cache %0, %1 \n" \
" .set mips0 \n" \ " .set pop \n" \
" .set reorder" \
: \ : \
: "i" (op), "m" (*(unsigned char *)(addr))) : "i" (op), "m" (*(unsigned char *)(addr)))
...@@ -84,14 +84,14 @@ static inline void flush_scache_line(unsigned long addr) ...@@ -84,14 +84,14 @@ static inline void flush_scache_line(unsigned long addr)
static inline void protected_flush_icache_line(unsigned long addr) static inline void protected_flush_icache_line(unsigned long addr)
{ {
__asm__ __volatile__( __asm__ __volatile__(
".set noreorder\n\t" " .set push \n"
".set mips3\n" " .set noreorder \n"
"1:\tcache %0,(%1)\n" " .set mips3 \n"
"2:\t.set mips0\n\t" "1: cache %0, (%1) \n"
".set reorder\n\t" "2: .set pop \n"
".section\t__ex_table,\"a\"\n\t" " .section __ex_table,\"a\" \n"
STR(PTR)"\t1b,2b\n\t" " "STR(PTR)" 1b, 2b \n"
".previous" " .previous"
: :
: "i" (Hit_Invalidate_I), "r" (addr)); : "i" (Hit_Invalidate_I), "r" (addr));
} }
...@@ -100,19 +100,19 @@ static inline void protected_flush_icache_line(unsigned long addr) ...@@ -100,19 +100,19 @@ static inline void protected_flush_icache_line(unsigned long addr)
* R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
* cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
* caches. We're talking about one cacheline unnecessarily getting invalidated * caches. We're talking about one cacheline unnecessarily getting invalidated
* here so the penaltiy isn't overly hard. * here so the penalty isn't overly hard.
*/ */
static inline void protected_writeback_dcache_line(unsigned long addr) static inline void protected_writeback_dcache_line(unsigned long addr)
{ {
__asm__ __volatile__( __asm__ __volatile__(
".set noreorder\n\t" " .set push \n"
".set mips3\n" " .set noreorder \n"
"1:\tcache %0,(%1)\n" " .set mips3 \n"
"2:\t.set mips0\n\t" "1: cache %0, (%1) \n"
".set reorder\n\t" "2: .set pop \n"
".section\t__ex_table,\"a\"\n\t" " .section __ex_table,\"a\" \n"
STR(PTR)"\t1b,2b\n\t" " "STR(PTR)" 1b, 2b \n"
".previous" " .previous"
: :
: "i" (Hit_Writeback_Inv_D), "r" (addr)); : "i" (Hit_Writeback_Inv_D), "r" (addr));
} }
...@@ -120,14 +120,14 @@ static inline void protected_writeback_dcache_line(unsigned long addr) ...@@ -120,14 +120,14 @@ static inline void protected_writeback_dcache_line(unsigned long addr)
static inline void protected_writeback_scache_line(unsigned long addr) static inline void protected_writeback_scache_line(unsigned long addr)
{ {
__asm__ __volatile__( __asm__ __volatile__(
".set noreorder\n\t" " .set push \n"
".set mips3\n" " .set noreorder \n"
"1:\tcache %0,(%1)\n" " .set mips3 \n"
"2:\t.set mips0\n\t" "1: cache %0, (%1) \n"
".set reorder\n\t" "2: .set pop \n"
".section\t__ex_table,\"a\"\n\t" " .section __ex_table,\"a\" \n"
STR(PTR)"\t1b,2b\n\t" " "STR(PTR)" 1b, 2b \n"
".previous" " .previous"
: :
: "i" (Hit_Writeback_Inv_SD), "r" (addr)); : "i" (Hit_Writeback_Inv_SD), "r" (addr));
} }
...@@ -142,6 +142,7 @@ static inline void invalidate_tcache_page(unsigned long addr) ...@@ -142,6 +142,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
#define cache16_unroll32(base,op) \ #define cache16_unroll32(base,op) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips3 \n" \ " .set mips3 \n" \
" cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \ " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
...@@ -160,8 +161,7 @@ static inline void invalidate_tcache_page(unsigned long addr) ...@@ -160,8 +161,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
" cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \ " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
" cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \ " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
" cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \ " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
" .set mips0 \n" \ " .set pop \n" \
" .set reorder \n" \
: \ : \
: "r" (base), \ : "r" (base), \
"i" (op)); "i" (op));
...@@ -285,6 +285,7 @@ static inline void blast_scache16_page_indexed(unsigned long page) ...@@ -285,6 +285,7 @@ static inline void blast_scache16_page_indexed(unsigned long page)
#define cache32_unroll32(base,op) \ #define cache32_unroll32(base,op) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips3 \n" \ " .set mips3 \n" \
" cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \ " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
...@@ -303,8 +304,7 @@ static inline void blast_scache16_page_indexed(unsigned long page) ...@@ -303,8 +304,7 @@ static inline void blast_scache16_page_indexed(unsigned long page)
" cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \ " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
" cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \ " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
" cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \ " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
" .set mips0 \n" \ " .set pop \n" \
" .set reorder \n" \
: \ : \
: "r" (base), \ : "r" (base), \
"i" (op)); "i" (op));
...@@ -428,6 +428,7 @@ static inline void blast_scache32_page_indexed(unsigned long page) ...@@ -428,6 +428,7 @@ static inline void blast_scache32_page_indexed(unsigned long page)
#define cache64_unroll32(base,op) \ #define cache64_unroll32(base,op) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips3 \n" \ " .set mips3 \n" \
" cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \ " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
...@@ -446,8 +447,7 @@ static inline void blast_scache32_page_indexed(unsigned long page) ...@@ -446,8 +447,7 @@ static inline void blast_scache32_page_indexed(unsigned long page)
" cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \ " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
" cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \ " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
" cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \ " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
" .set mips0 \n" \ " .set pop \n" \
" .set reorder \n" \
: \ : \
: "r" (base), \ : "r" (base), \
"i" (op)); "i" (op));
...@@ -532,6 +532,7 @@ static inline void blast_scache64_page_indexed(unsigned long page) ...@@ -532,6 +532,7 @@ static inline void blast_scache64_page_indexed(unsigned long page)
#define cache128_unroll32(base,op) \ #define cache128_unroll32(base,op) \
__asm__ __volatile__( \ __asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \ " .set noreorder \n" \
" .set mips3 \n" \ " .set mips3 \n" \
" cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \ " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
...@@ -550,8 +551,7 @@ static inline void blast_scache64_page_indexed(unsigned long page) ...@@ -550,8 +551,7 @@ static inline void blast_scache64_page_indexed(unsigned long page)
" cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \ " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
" cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \ " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
" cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \ " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
" .set mips0 \n" \ " .set pop \n" \
" .set reorder \n" \
: \ : \
: "r" (base), \ : "r" (base), \
"i" (op)); "i" (op));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment