Commit e9144754 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] Fix 32bit kernels on R4k with 128 byte cache line size
  [MIPS] Atlas, decstation: Fix section mismatches triggered by defconfigs
parents eb35c218 14defd90
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
static unsigned long icache_size, dcache_size; /* Size in bytes */ static unsigned long icache_size, dcache_size; /* Size in bytes */
static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
unsigned long __init r3k_cache_size(unsigned long ca_flags) unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
{ {
unsigned long flags, status, dummy, size; unsigned long flags, status, dummy, size;
volatile unsigned long *p; volatile unsigned long *p;
...@@ -61,7 +61,7 @@ unsigned long __init r3k_cache_size(unsigned long ca_flags) ...@@ -61,7 +61,7 @@ unsigned long __init r3k_cache_size(unsigned long ca_flags)
return size * sizeof(*p); return size * sizeof(*p);
} }
unsigned long __init r3k_cache_lsize(unsigned long ca_flags) unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
{ {
unsigned long flags, status, lsize, i; unsigned long flags, status, lsize, i;
volatile unsigned long *p; volatile unsigned long *p;
...@@ -90,7 +90,7 @@ unsigned long __init r3k_cache_lsize(unsigned long ca_flags) ...@@ -90,7 +90,7 @@ unsigned long __init r3k_cache_lsize(unsigned long ca_flags)
return lsize * sizeof(*p); return lsize * sizeof(*p);
} }
static void __init r3k_probe_cache(void) static void __cpuinit r3k_probe_cache(void)
{ {
dcache_size = r3k_cache_size(ST0_ISC); dcache_size = r3k_cache_size(ST0_ISC);
if (dcache_size) if (dcache_size)
......
...@@ -235,13 +235,12 @@ static void __cpuinit set_prefetch_parameters(void) ...@@ -235,13 +235,12 @@ static void __cpuinit set_prefetch_parameters(void)
} }
/* /*
* Too much unrolling will overflow the available space in * Too much unrolling will overflow the available space in
* clear_space_array / copy_page_array. 8 words sounds generous, * clear_space_array / copy_page_array.
* but a R4000 with 128 byte L2 line length can exceed even that.
*/ */
half_clear_loop_size = min(8 * clear_word_size, half_clear_loop_size = min(16 * clear_word_size,
max(cache_line_size >> 1, max(cache_line_size >> 1,
4 * clear_word_size)); 4 * clear_word_size));
half_copy_loop_size = min(8 * copy_word_size, half_copy_loop_size = min(16 * copy_word_size,
max(cache_line_size >> 1, max(cache_line_size >> 1,
4 * copy_word_size)); 4 * copy_word_size));
} }
...@@ -263,21 +262,23 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off) ...@@ -263,21 +262,23 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
if (pref_bias_clear_store) { if (pref_bias_clear_store) {
uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
A0); A0);
} else if (cpu_has_cache_cdex_s) { } else if (cache_line_size == (half_clear_loop_size << 1)) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); if (cpu_has_cache_cdex_s) {
} else if (cpu_has_cache_cdex_p) { uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { } else if (cpu_has_cache_cdex_p) {
uasm_i_nop(buf); if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
uasm_i_nop(buf); uasm_i_nop(buf);
uasm_i_nop(buf); uasm_i_nop(buf);
uasm_i_nop(buf); uasm_i_nop(buf);
} uasm_i_nop(buf);
}
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT); uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
} }
}
} }
void __cpuinit build_clear_page(void) void __cpuinit build_clear_page(void)
...@@ -403,20 +404,22 @@ static inline void build_copy_store_pref(u32 **buf, int off) ...@@ -403,20 +404,22 @@ static inline void build_copy_store_pref(u32 **buf, int off)
if (pref_bias_copy_store) { if (pref_bias_copy_store) {
uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
A0); A0);
} else if (cpu_has_cache_cdex_s) { } else if (cache_line_size == (half_copy_loop_size << 1)) {
uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); if (cpu_has_cache_cdex_s) {
} else if (cpu_has_cache_cdex_p) { uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { } else if (cpu_has_cache_cdex_p) {
uasm_i_nop(buf); if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
uasm_i_nop(buf); uasm_i_nop(buf);
uasm_i_nop(buf); uasm_i_nop(buf);
uasm_i_nop(buf); uasm_i_nop(buf);
} uasm_i_nop(buf);
}
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lw(buf, ZERO, ZERO, AT); uasm_i_lw(buf, ZERO, ZERO, AT);
uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
}
} }
} }
......
...@@ -86,7 +86,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size) ...@@ -86,7 +86,7 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
/* /*
* This function is executed in uncached address space. * This function is executed in uncached address space.
*/ */
static __init void __rm7k_sc_enable(void) static __cpuinit void __rm7k_sc_enable(void)
{ {
int i; int i;
...@@ -107,7 +107,7 @@ static __init void __rm7k_sc_enable(void) ...@@ -107,7 +107,7 @@ static __init void __rm7k_sc_enable(void)
} }
} }
static __init void rm7k_sc_enable(void) static __cpuinit void rm7k_sc_enable(void)
{ {
if (read_c0_config() & RM7K_CONF_SE) if (read_c0_config() & RM7K_CONF_SE)
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment