Commit 11c19656 authored by Paul Mundt's avatar Paul Mundt

sh: Fixup cpu_data references for the non-boot CPUs.

There are a lot of bogus cpu_data-> references that only end up working
for the boot CPU, convert these to current_cpu_data to fixup SMP.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent aec5e0e1
......@@ -48,7 +48,7 @@ static void __init cache_init(void)
{
unsigned long ccr, flags;
if (cpu_data->type == CPU_SH_NONE)
if (current_cpu_data.type == CPU_SH_NONE)
panic("Unknown CPU");
jump_to_P2();
......@@ -68,7 +68,7 @@ static void __init cache_init(void)
if (ccr & CCR_CACHE_ENABLE) {
unsigned long ways, waysize, addrstart;
waysize = cpu_data->dcache.sets;
waysize = current_cpu_data.dcache.sets;
#ifdef CCR_CACHE_ORA
/*
......@@ -79,7 +79,7 @@ static void __init cache_init(void)
waysize >>= 1;
#endif
waysize <<= cpu_data->dcache.entry_shift;
waysize <<= current_cpu_data.dcache.entry_shift;
#ifdef CCR_CACHE_EMODE
/* If EMODE is not set, we only have 1 way to flush. */
......@@ -87,7 +87,7 @@ static void __init cache_init(void)
ways = 1;
else
#endif
ways = cpu_data->dcache.ways;
ways = current_cpu_data.dcache.ways;
addrstart = CACHE_OC_ADDRESS_ARRAY;
do {
......@@ -95,10 +95,10 @@ static void __init cache_init(void)
for (addr = addrstart;
addr < addrstart + waysize;
addr += cpu_data->dcache.linesz)
addr += current_cpu_data.dcache.linesz)
ctrl_outl(0, addr);
addrstart += cpu_data->dcache.way_incr;
addrstart += current_cpu_data.dcache.way_incr;
} while (--ways);
}
......@@ -110,7 +110,7 @@ static void __init cache_init(void)
#ifdef CCR_CACHE_EMODE
/* Force EMODE if possible */
if (cpu_data->dcache.ways > 1)
if (current_cpu_data.dcache.ways > 1)
flags |= CCR_CACHE_EMODE;
else
flags &= ~CCR_CACHE_EMODE;
......@@ -127,10 +127,10 @@ static void __init cache_init(void)
#ifdef CONFIG_SH_OCRAM
/* Turn on OCRAM -- halve the OC */
flags |= CCR_CACHE_ORA;
cpu_data->dcache.sets >>= 1;
current_cpu_data.dcache.sets >>= 1;
cpu_data->dcache.way_size = cpu_data->dcache.sets *
cpu_data->dcache.linesz;
current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
current_cpu_data.dcache.linesz;
#endif
ctrl_outl(flags, CCR);
......@@ -172,7 +172,7 @@ static void __init dsp_init(void)
/* If the DSP bit is still set, this CPU has a DSP */
if (sr & SR_DSP)
cpu_data->flags |= CPU_HAS_DSP;
current_cpu_data.flags |= CPU_HAS_DSP;
/* Now that we've determined the DSP status, clear the DSP bit. */
release_dsp();
......@@ -204,18 +204,18 @@ asmlinkage void __init sh_cpu_init(void)
cache_init();
shm_align_mask = max_t(unsigned long,
cpu_data->dcache.way_size - 1,
current_cpu_data.dcache.way_size - 1,
PAGE_SIZE - 1);
/* Disable the FPU */
if (fpu_disabled) {
printk("FPU Disabled\n");
cpu_data->flags &= ~CPU_HAS_FPU;
current_cpu_data.flags &= ~CPU_HAS_FPU;
disable_fpu();
}
/* FPU initialization */
if ((cpu_data->flags & CPU_HAS_FPU)) {
if ((current_cpu_data.flags & CPU_HAS_FPU)) {
clear_thread_flag(TIF_USEDFPU);
clear_used_math();
}
......@@ -233,7 +233,7 @@ asmlinkage void __init sh_cpu_init(void)
/* Disable the DSP */
if (dsp_disabled) {
printk("DSP Disabled\n");
cpu_data->flags &= ~CPU_HAS_DSP;
current_cpu_data.flags &= ~CPU_HAS_DSP;
release_dsp();
}
#endif
......
......@@ -18,27 +18,27 @@
int __init detect_cpu_and_cache_system(void)
{
#if defined(CONFIG_CPU_SUBTYPE_SH7604)
cpu_data->type = CPU_SH7604;
cpu_data->dcache.ways = 4;
cpu_data->dcache.way_incr = (1<<10);
cpu_data->dcache.sets = 64;
cpu_data->dcache.entry_shift = 4;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
cpu_data->dcache.flags = 0;
current_cpu_data.type = CPU_SH7604;
current_cpu_data.dcache.ways = 4;
current_cpu_data.dcache.way_incr = (1<<10);
current_cpu_data.dcache.sets = 64;
current_cpu_data.dcache.entry_shift = 4;
current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
current_cpu_data.dcache.flags = 0;
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
cpu_data->type = CPU_SH7619;
cpu_data->dcache.ways = 4;
cpu_data->dcache.way_incr = (1<<12);
cpu_data->dcache.sets = 256;
cpu_data->dcache.entry_shift = 4;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
cpu_data->dcache.flags = 0;
current_cpu_data.type = CPU_SH7619;
current_cpu_data.dcache.ways = 4;
current_cpu_data.dcache.way_incr = (1<<12);
current_cpu_data.dcache.sets = 256;
current_cpu_data.dcache.entry_shift = 4;
current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
current_cpu_data.dcache.flags = 0;
#endif
/*
* SH-2 doesn't have separate caches
*/
cpu_data->dcache.flags |= SH_CACHE_COMBINED;
cpu_data->icache = cpu_data->dcache;
current_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
current_cpu_data.icache = current_cpu_data.dcache;
return 0;
}
......
......@@ -17,14 +17,14 @@
int __init detect_cpu_and_cache_system(void)
{
/* Just SH7206 for now .. */
cpu_data->type = CPU_SH7206;
current_cpu_data.type = CPU_SH7206;
cpu_data->dcache.ways = 4;
cpu_data->dcache.way_incr = (1 << 11);
cpu_data->dcache.sets = 128;
cpu_data->dcache.entry_shift = 4;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
cpu_data->dcache.flags = 0;
current_cpu_data.dcache.ways = 4;
current_cpu_data.dcache.way_incr = (1 << 11);
current_cpu_data.dcache.sets = 128;
current_cpu_data.dcache.entry_shift = 4;
current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
current_cpu_data.dcache.flags = 0;
/*
* The icache is the same as the dcache as far as this setup is
......@@ -32,7 +32,7 @@ int __init detect_cpu_and_cache_system(void)
* lacks the U bit that the dcache has, none of this has any bearing
* on the cache info.
*/
cpu_data->icache = cpu_data->dcache;
current_cpu_data.icache = current_cpu_data.dcache;
return 0;
}
......
......@@ -50,41 +50,41 @@ int __init detect_cpu_and_cache_system(void)
back_to_P1();
cpu_data->dcache.ways = 4;
cpu_data->dcache.entry_shift = 4;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
cpu_data->dcache.flags = 0;
current_cpu_data.dcache.ways = 4;
current_cpu_data.dcache.entry_shift = 4;
current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
current_cpu_data.dcache.flags = 0;
/*
* 7709A/7729 has 16K cache (256-entry), while 7702 has only
* 2K(direct) 7702 is not supported (yet)
*/
if (data0 == data1 && data2 == data3) { /* Shadow */
cpu_data->dcache.way_incr = (1 << 11);
cpu_data->dcache.entry_mask = 0x7f0;
cpu_data->dcache.sets = 128;
cpu_data->type = CPU_SH7708;
current_cpu_data.dcache.way_incr = (1 << 11);
current_cpu_data.dcache.entry_mask = 0x7f0;
current_cpu_data.dcache.sets = 128;
current_cpu_data.type = CPU_SH7708;
cpu_data->flags |= CPU_HAS_MMU_PAGE_ASSOC;
current_cpu_data.flags |= CPU_HAS_MMU_PAGE_ASSOC;
} else { /* 7709A or 7729 */
cpu_data->dcache.way_incr = (1 << 12);
cpu_data->dcache.entry_mask = 0xff0;
cpu_data->dcache.sets = 256;
cpu_data->type = CPU_SH7729;
current_cpu_data.dcache.way_incr = (1 << 12);
current_cpu_data.dcache.entry_mask = 0xff0;
current_cpu_data.dcache.sets = 256;
current_cpu_data.type = CPU_SH7729;
#if defined(CONFIG_CPU_SUBTYPE_SH7706)
cpu_data->type = CPU_SH7706;
current_cpu_data.type = CPU_SH7706;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7710)
cpu_data->type = CPU_SH7710;
current_cpu_data.type = CPU_SH7710;
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7705)
cpu_data->type = CPU_SH7705;
current_cpu_data.type = CPU_SH7705;
#if defined(CONFIG_SH7705_CACHE_32KB)
cpu_data->dcache.way_incr = (1 << 13);
cpu_data->dcache.entry_mask = 0x1ff0;
cpu_data->dcache.sets = 512;
current_cpu_data.dcache.way_incr = (1 << 13);
current_cpu_data.dcache.entry_mask = 0x1ff0;
current_cpu_data.dcache.sets = 512;
ctrl_outl(CCR_CACHE_32KB, CCR3);
#else
ctrl_outl(CCR_CACHE_16KB, CCR3);
......@@ -95,8 +95,8 @@ int __init detect_cpu_and_cache_system(void)
/*
* SH-3 doesn't have separate caches
*/
cpu_data->dcache.flags |= SH_CACHE_COMBINED;
cpu_data->icache = cpu_data->dcache;
current_cpu_data.dcache.flags |= SH_CACHE_COMBINED;
current_cpu_data.icache = current_cpu_data.dcache;
return 0;
}
......
......@@ -10,11 +10,10 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/io.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/io.h>
int __init detect_cpu_and_cache_system(void)
{
......@@ -36,20 +35,20 @@ int __init detect_cpu_and_cache_system(void)
/*
* Setup some sane SH-4 defaults for the icache
*/
cpu_data->icache.way_incr = (1 << 13);
cpu_data->icache.entry_shift = 5;
cpu_data->icache.sets = 256;
cpu_data->icache.ways = 1;
cpu_data->icache.linesz = L1_CACHE_BYTES;
current_cpu_data.icache.way_incr = (1 << 13);
current_cpu_data.icache.entry_shift = 5;
current_cpu_data.icache.sets = 256;
current_cpu_data.icache.ways = 1;
current_cpu_data.icache.linesz = L1_CACHE_BYTES;
/*
* And again for the dcache ..
*/
cpu_data->dcache.way_incr = (1 << 14);
cpu_data->dcache.entry_shift = 5;
cpu_data->dcache.sets = 512;
cpu_data->dcache.ways = 1;
cpu_data->dcache.linesz = L1_CACHE_BYTES;
current_cpu_data.dcache.way_incr = (1 << 14);
current_cpu_data.dcache.entry_shift = 5;
current_cpu_data.dcache.sets = 512;
current_cpu_data.dcache.ways = 1;
current_cpu_data.dcache.linesz = L1_CACHE_BYTES;
/*
* Setup some generic flags we can probe
......@@ -57,16 +56,16 @@ int __init detect_cpu_and_cache_system(void)
*/
if (((pvr >> 16) & 0xff) == 0x10) {
if ((cvr & 0x02000000) == 0)
cpu_data->flags |= CPU_HAS_L2_CACHE;
current_cpu_data.flags |= CPU_HAS_L2_CACHE;
if ((cvr & 0x10000000) == 0)
cpu_data->flags |= CPU_HAS_DSP;
current_cpu_data.flags |= CPU_HAS_DSP;
cpu_data->flags |= CPU_HAS_LLSC;
current_cpu_data.flags |= CPU_HAS_LLSC;
}
/* FPU detection works for everyone */
if ((cvr & 0x20000000) == 1)
cpu_data->flags |= CPU_HAS_FPU;
current_cpu_data.flags |= CPU_HAS_FPU;
/* Mask off the upper chip ID */
pvr &= 0xffff;
......@@ -77,147 +76,151 @@ int __init detect_cpu_and_cache_system(void)
*/
switch (pvr) {
case 0x205:
cpu_data->type = CPU_SH7750;
cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
current_cpu_data.type = CPU_SH7750;
current_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
CPU_HAS_PERF_COUNTER;
break;
case 0x206:
cpu_data->type = CPU_SH7750S;
cpu_data->flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
current_cpu_data.type = CPU_SH7750S;
current_cpu_data.flags |= CPU_HAS_P2_FLUSH_BUG | CPU_HAS_FPU |
CPU_HAS_PERF_COUNTER;
break;
case 0x1100:
cpu_data->type = CPU_SH7751;
cpu_data->flags |= CPU_HAS_FPU;
current_cpu_data.type = CPU_SH7751;
current_cpu_data.flags |= CPU_HAS_FPU;
break;
case 0x2000:
cpu_data->type = CPU_SH73180;
cpu_data->icache.ways = 4;
cpu_data->dcache.ways = 4;
cpu_data->flags |= CPU_HAS_LLSC;
current_cpu_data.type = CPU_SH73180;
current_cpu_data.icache.ways = 4;
current_cpu_data.dcache.ways = 4;
current_cpu_data.flags |= CPU_HAS_LLSC;
break;
case 0x2001:
case 0x2004:
cpu_data->type = CPU_SH7770;
cpu_data->icache.ways = 4;
cpu_data->dcache.ways = 4;
current_cpu_data.type = CPU_SH7770;
current_cpu_data.icache.ways = 4;
current_cpu_data.dcache.ways = 4;
cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_LLSC;
current_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_LLSC;
break;
case 0x2006:
case 0x200A:
if (prr == 0x61)
cpu_data->type = CPU_SH7781;
current_cpu_data.type = CPU_SH7781;
else
cpu_data->type = CPU_SH7780;
current_cpu_data.type = CPU_SH7780;
cpu_data->icache.ways = 4;
cpu_data->dcache.ways = 4;
current_cpu_data.icache.ways = 4;
current_cpu_data.dcache.ways = 4;
cpu_data->flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER |
current_cpu_data.flags |= CPU_HAS_FPU | CPU_HAS_PERF_COUNTER |
CPU_HAS_LLSC;
break;
case 0x3000:
case 0x3003:
case 0x3009:
cpu_data->type = CPU_SH7343;
cpu_data->icache.ways = 4;
cpu_data->dcache.ways = 4;
cpu_data->flags |= CPU_HAS_LLSC;
current_cpu_data.type = CPU_SH7343;
current_cpu_data.icache.ways = 4;
current_cpu_data.dcache.ways = 4;
current_cpu_data.flags |= CPU_HAS_LLSC;
break;
case 0x3008:
if (prr == 0xa0) {
cpu_data->type = CPU_SH7722;
cpu_data->icache.ways = 4;
cpu_data->dcache.ways = 4;
cpu_data->flags |= CPU_HAS_LLSC;
current_cpu_data.type = CPU_SH7722;
current_cpu_data.icache.ways = 4;
current_cpu_data.dcache.ways = 4;
current_cpu_data.flags |= CPU_HAS_LLSC;
}
break;
case 0x8000:
cpu_data->type = CPU_ST40RA;
cpu_data->flags |= CPU_HAS_FPU;
current_cpu_data.type = CPU_ST40RA;
current_cpu_data.flags |= CPU_HAS_FPU;
break;
case 0x8100:
cpu_data->type = CPU_ST40GX1;
cpu_data->flags |= CPU_HAS_FPU;
current_cpu_data.type = CPU_ST40GX1;
current_cpu_data.flags |= CPU_HAS_FPU;
break;
case 0x700:
cpu_data->type = CPU_SH4_501;
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
current_cpu_data.type = CPU_SH4_501;
current_cpu_data.icache.ways = 2;
current_cpu_data.dcache.ways = 2;
break;
case 0x600:
cpu_data->type = CPU_SH4_202;
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
cpu_data->flags |= CPU_HAS_FPU;
current_cpu_data.type = CPU_SH4_202;
current_cpu_data.icache.ways = 2;
current_cpu_data.dcache.ways = 2;
current_cpu_data.flags |= CPU_HAS_FPU;
break;
case 0x500 ... 0x501:
switch (prr) {
case 0x10:
cpu_data->type = CPU_SH7750R;
current_cpu_data.type = CPU_SH7750R;
break;
case 0x11:
cpu_data->type = CPU_SH7751R;
current_cpu_data.type = CPU_SH7751R;
break;
case 0x50 ... 0x5f:
cpu_data->type = CPU_SH7760;
current_cpu_data.type = CPU_SH7760;
break;
}
cpu_data->icache.ways = 2;
cpu_data->dcache.ways = 2;
current_cpu_data.icache.ways = 2;
current_cpu_data.dcache.ways = 2;
cpu_data->flags |= CPU_HAS_FPU;
current_cpu_data.flags |= CPU_HAS_FPU;
break;
default:
cpu_data->type = CPU_SH_NONE;
current_cpu_data.type = CPU_SH_NONE;
break;
}
#ifdef CONFIG_SH_DIRECT_MAPPED
cpu_data->icache.ways = 1;
cpu_data->dcache.ways = 1;
current_cpu_data.icache.ways = 1;
current_cpu_data.dcache.ways = 1;
#endif
#ifdef CONFIG_CPU_HAS_PTEA
current_cpu_data.flags |= CPU_HAS_PTEA;
#endif
/*
* On anything that's not a direct-mapped cache, look to the CVR
* for I/D-cache specifics.
*/
if (cpu_data->icache.ways > 1) {
if (current_cpu_data.icache.ways > 1) {
size = sizes[(cvr >> 20) & 0xf];
cpu_data->icache.way_incr = (size >> 1);
cpu_data->icache.sets = (size >> 6);
current_cpu_data.icache.way_incr = (size >> 1);
current_cpu_data.icache.sets = (size >> 6);
}
/* Setup the rest of the I-cache info */
cpu_data->icache.entry_mask = cpu_data->icache.way_incr -
cpu_data->icache.linesz;
current_cpu_data.icache.entry_mask = current_cpu_data.icache.way_incr -
current_cpu_data.icache.linesz;
cpu_data->icache.way_size = cpu_data->icache.sets *
cpu_data->icache.linesz;
current_cpu_data.icache.way_size = current_cpu_data.icache.sets *
current_cpu_data.icache.linesz;
/* And the rest of the D-cache */
if (cpu_data->dcache.ways > 1) {
if (current_cpu_data.dcache.ways > 1) {
size = sizes[(cvr >> 16) & 0xf];
cpu_data->dcache.way_incr = (size >> 1);
cpu_data->dcache.sets = (size >> 6);
current_cpu_data.dcache.way_incr = (size >> 1);
current_cpu_data.dcache.sets = (size >> 6);
}
cpu_data->dcache.entry_mask = cpu_data->dcache.way_incr -
cpu_data->dcache.linesz;
current_cpu_data.dcache.entry_mask = current_cpu_data.dcache.way_incr -
current_cpu_data.dcache.linesz;
cpu_data->dcache.way_size = cpu_data->dcache.sets *
cpu_data->dcache.linesz;
current_cpu_data.dcache.way_size = current_cpu_data.dcache.sets *
current_cpu_data.dcache.linesz;
/*
* Setup the L2 cache desc
*
* SH-4A's have an optional PIPT L2.
*/
if (cpu_data->flags & CPU_HAS_L2_CACHE) {
if (current_cpu_data.flags & CPU_HAS_L2_CACHE) {
/*
* Size calculation is much more sensible
* than it is for the L1.
......@@ -228,16 +231,22 @@ int __init detect_cpu_and_cache_system(void)
BUG_ON(!size);
cpu_data->scache.way_incr = (1 << 16);
cpu_data->scache.entry_shift = 5;
cpu_data->scache.ways = 4;
cpu_data->scache.linesz = L1_CACHE_BYTES;
cpu_data->scache.entry_mask =
(cpu_data->scache.way_incr - cpu_data->scache.linesz);
cpu_data->scache.sets = size /
(cpu_data->scache.linesz * cpu_data->scache.ways);
cpu_data->scache.way_size =
(cpu_data->scache.sets * cpu_data->scache.linesz);
current_cpu_data.scache.way_incr = (1 << 16);
current_cpu_data.scache.entry_shift = 5;
current_cpu_data.scache.ways = 4;
current_cpu_data.scache.linesz = L1_CACHE_BYTES;
current_cpu_data.scache.entry_mask =
(current_cpu_data.scache.way_incr -
current_cpu_data.scache.linesz);
current_cpu_data.scache.sets = size /
(current_cpu_data.scache.linesz *
current_cpu_data.scache.ways);
current_cpu_data.scache.way_size =
(current_cpu_data.scache.sets *
current_cpu_data.scache.linesz);
}
return 0;
......
......@@ -293,13 +293,14 @@ static void ubc_set_tracing(int asid, unsigned long pc)
#ifdef CONFIG_MMU
/* We don't have any ASID settings for the SH-2! */
if (cpu_data->type != CPU_SH7604)
if (current_cpu_data.type != CPU_SH7604)
ctrl_outb(asid, UBC_BASRA);
#endif
ctrl_outl(0, UBC_BAMRA);
if (cpu_data->type == CPU_SH7729 || cpu_data->type == CPU_SH7710) {
if (current_cpu_data.type == CPU_SH7729 ||
current_cpu_data.type == CPU_SH7710) {
ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
} else {
......
/*
* linux/arch/sh/kernel/setup.c
* arch/sh/kernel/setup.c
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002, 2003 Paul Mundt
*/
/*
* This file handles the architecture-dependent parts of initialization
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 - 2006 Paul Mundt
*/
#include <linux/screen_info.h>
#include <linux/ioport.h>
#include <linux/init.h>
......@@ -395,9 +392,9 @@ static const char *cpu_name[] = {
[CPU_SH_NONE] = "Unknown"
};
const char *get_cpu_subtype(void)
const char *get_cpu_subtype(struct sh_cpuinfo *c)
{
return cpu_name[boot_cpu_data.type];
return cpu_name[c->type];
}
#ifdef CONFIG_PROC_FS
......@@ -407,19 +404,19 @@ static const char *cpu_flags[] = {
"ptea", "llsc", "l2", NULL
};
static void show_cpuflags(struct seq_file *m)
static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
{
unsigned long i;
seq_printf(m, "cpu flags\t:");
if (!cpu_data->flags) {
if (!c->flags) {
seq_printf(m, " %s\n", cpu_flags[0]);
return;
}
for (i = 0; cpu_flags[i]; i++)
if ((cpu_data->flags & (1 << i)))
if ((c->flags & (1 << i)))
seq_printf(m, " %s", cpu_flags[i+1]);
seq_printf(m, "\n");
......@@ -441,16 +438,20 @@ static void show_cacheinfo(struct seq_file *m, const char *type,
*/
static int show_cpuinfo(struct seq_file *m, void *v)
{
unsigned int cpu = smp_processor_id();
struct sh_cpuinfo *c = v;
unsigned int cpu = c - cpu_data;
if (!cpu_online(cpu))
return 0;
if (!cpu && cpu_online(cpu))
if (cpu == 0)
seq_printf(m, "machine\t\t: %s\n", get_system_type());
seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype());
seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
show_cpuflags(m);
show_cpuflags(m, c);
seq_printf(m, "cache type\t: ");
......@@ -459,22 +460,22 @@ static int show_cpuinfo(struct seq_file *m, void *v)
* unified cache on the SH-2 and SH-3, as well as the harvard
* style cache on the SH-4.
*/
if (boot_cpu_data.icache.flags & SH_CACHE_COMBINED) {
if (c->icache.flags & SH_CACHE_COMBINED) {
seq_printf(m, "unified\n");
show_cacheinfo(m, "cache", boot_cpu_data.icache);
show_cacheinfo(m, "cache", c->icache);
} else {
seq_printf(m, "split (harvard)\n");
show_cacheinfo(m, "icache", boot_cpu_data.icache);
show_cacheinfo(m, "dcache", boot_cpu_data.dcache);
show_cacheinfo(m, "icache", c->icache);
show_cacheinfo(m, "dcache", c->dcache);
}
/* Optional secondary cache */
if (boot_cpu_data.flags & CPU_HAS_L2_CACHE)
show_cacheinfo(m, "scache", boot_cpu_data.scache);
if (c->flags & CPU_HAS_L2_CACHE)
show_cacheinfo(m, "scache", c->scache);
seq_printf(m, "bogomips\t: %lu.%02lu\n",
boot_cpu_data.loops_per_jiffy/(500000/HZ),
(boot_cpu_data.loops_per_jiffy/(5000/HZ)) % 100);
c->loops_per_jiffy/(500000/HZ),
(c->loops_per_jiffy/(5000/HZ)) % 100);
return show_clocks(m);
}
......
......@@ -127,7 +127,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
{
struct task_struct *tsk = current;
if (!(cpu_data->flags & CPU_HAS_FPU))
if (!(current_cpu_data.flags & CPU_HAS_FPU))
return 0;
set_used_math();
......@@ -140,7 +140,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
{
struct task_struct *tsk = current;
if (!(cpu_data->flags & CPU_HAS_FPU))
if (!(current_cpu_data.flags & CPU_HAS_FPU))
return 0;
if (!used_math()) {
......@@ -181,7 +181,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
#undef COPY
#ifdef CONFIG_SH_FPU
if (cpu_data->flags & CPU_HAS_FPU) {
if (current_cpu_data.flags & CPU_HAS_FPU) {
int owned_fp;
struct task_struct *tsk = current;
......
......@@ -641,7 +641,7 @@ int is_dsp_inst(struct pt_regs *regs)
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
if (!(cpu_data->flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
return 0;
get_user(inst, ((unsigned short *) regs->pc));
......
......@@ -46,10 +46,10 @@ static int cache_seq_show(struct seq_file *file, void *iter)
if (cache_type == CACHE_TYPE_DCACHE) {
base = CACHE_OC_ADDRESS_ARRAY;
cache = &cpu_data->dcache;
cache = &current_cpu_data.dcache;
} else {
base = CACHE_IC_ADDRESS_ARRAY;
cache = &cpu_data->icache;
cache = &current_cpu_data.icache;
}
/*
......
......@@ -44,11 +44,11 @@ void __flush_wback_region(void *start, int size)
for (v = begin; v < end; v+=L1_CACHE_BYTES) {
unsigned long addrstart = CACHE_OC_ADDRESS_ARRAY;
for (j = 0; j < cpu_data->dcache.ways; j++) {
for (j = 0; j < current_cpu_data.dcache.ways; j++) {
unsigned long data, addr, p;
p = __pa(v);
addr = addrstart | (v & cpu_data->dcache.entry_mask);
addr = addrstart | (v & current_cpu_data.dcache.entry_mask);
local_irq_save(flags);
data = ctrl_inl(addr);
......@@ -60,7 +60,7 @@ void __flush_wback_region(void *start, int size)
break;
}
local_irq_restore(flags);
addrstart += cpu_data->dcache.way_incr;
addrstart += current_cpu_data.dcache.way_incr;
}
}
}
......@@ -85,7 +85,7 @@ void __flush_purge_region(void *start, int size)
data = (v & 0xfffffc00); /* _Virtual_ address, ~U, ~V */
addr = CACHE_OC_ADDRESS_ARRAY |
(v & cpu_data->dcache.entry_mask) | SH_CACHE_ASSOC;
(v & current_cpu_data.dcache.entry_mask) | SH_CACHE_ASSOC;
ctrl_outl(data, addr);
}
}
......
......@@ -54,21 +54,21 @@ static void __init emit_cache_params(void)
ctrl_inl(CCN_CVR),
ctrl_inl(CCN_PRR));
printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
cpu_data->icache.ways,
cpu_data->icache.sets,
cpu_data->icache.way_incr);
current_cpu_data.icache.ways,
current_cpu_data.icache.sets,
current_cpu_data.icache.way_incr);
printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
cpu_data->icache.entry_mask,
cpu_data->icache.alias_mask,
cpu_data->icache.n_aliases);
current_cpu_data.icache.entry_mask,
current_cpu_data.icache.alias_mask,
current_cpu_data.icache.n_aliases);
printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
cpu_data->dcache.ways,
cpu_data->dcache.sets,
cpu_data->dcache.way_incr);
current_cpu_data.dcache.ways,
current_cpu_data.dcache.sets,
current_cpu_data.dcache.way_incr);
printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
cpu_data->dcache.entry_mask,
cpu_data->dcache.alias_mask,
cpu_data->dcache.n_aliases);
current_cpu_data.dcache.entry_mask,
current_cpu_data.dcache.alias_mask,
current_cpu_data.dcache.n_aliases);
if (!__flush_dcache_segment_fn)
panic("unknown number of cache ways\n");
......@@ -87,10 +87,10 @@ void __init p3_cache_init(void)
{
int i;
compute_alias(&cpu_data->icache);
compute_alias(&cpu_data->dcache);
compute_alias(&current_cpu_data.icache);
compute_alias(&current_cpu_data.dcache);
switch (cpu_data->dcache.ways) {
switch (current_cpu_data.dcache.ways) {
case 1:
__flush_dcache_segment_fn = __flush_dcache_segment_1way;
break;
......@@ -110,7 +110,7 @@ void __init p3_cache_init(void)
if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
panic("%s failed.", __FUNCTION__);
for (i = 0; i < cpu_data->dcache.n_aliases; i++)
for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
mutex_init(&p3map_mutex[i]);
}
......@@ -200,13 +200,14 @@ void flush_cache_sigtramp(unsigned long addr)
: /* no output */
: "m" (__m(v)));
index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask);
index = CACHE_IC_ADDRESS_ARRAY |
(v & current_cpu_data.icache.entry_mask);
local_irq_save(flags);
jump_to_P2();
for (i = 0; i < cpu_data->icache.ways;
i++, index += cpu_data->icache.way_incr)
for (i = 0; i < current_cpu_data.icache.ways;
i++, index += current_cpu_data.icache.way_incr)
ctrl_outl(0, index); /* Clear out Valid-bit */
back_to_P1();
......@@ -223,7 +224,7 @@ static inline void flush_cache_4096(unsigned long start,
* All types of SH-4 require PC to be in P2 to operate on the I-cache.
* Some types of SH-4 require PC to be in P2 to operate on the D-cache.
*/
if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG) ||
if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
(start < CACHE_OC_ADDRESS_ARRAY))
exec_offset = 0x20000000;
......@@ -255,7 +256,7 @@ void flush_dcache_page(struct page *page)
int i, n;
/* Loop all the D-cache */
n = cpu_data->dcache.n_aliases;
n = current_cpu_data.dcache.n_aliases;
for (i = 0; i < n; i++, addr += 4096)
flush_cache_4096(addr, phys);
}
......@@ -287,7 +288,7 @@ static inline void flush_icache_all(void)
void flush_dcache_all(void)
{
(*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size);
(*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
wmb();
}
......@@ -301,8 +302,8 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
unsigned long d = 0, p = start & PAGE_MASK;
unsigned long alias_mask = cpu_data->dcache.alias_mask;
unsigned long n_aliases = cpu_data->dcache.n_aliases;
unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
unsigned long select_bit;
unsigned long all_aliases_mask;
unsigned long addr_offset;
......@@ -389,7 +390,7 @@ void flush_cache_mm(struct mm_struct *mm)
* If cache is only 4k-per-way, there are never any 'aliases'. Since
* the cache is physically tagged, the data can just be left in there.
*/
if (cpu_data->dcache.n_aliases == 0)
if (current_cpu_data.dcache.n_aliases == 0)
return;
/*
......@@ -426,7 +427,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
unsigned long phys = pfn << PAGE_SHIFT;
unsigned int alias_mask;
alias_mask = cpu_data->dcache.alias_mask;
alias_mask = current_cpu_data.dcache.alias_mask;
/* We only need to flush D-cache when we have alias */
if ((address^phys) & alias_mask) {
......@@ -440,7 +441,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
phys);
}
alias_mask = cpu_data->icache.alias_mask;
alias_mask = current_cpu_data.icache.alias_mask;
if (vma->vm_flags & VM_EXEC) {
/*
* Evict entries from the portion of the cache from which code
......@@ -472,7 +473,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
* If cache is only 4k-per-way, there are never any 'aliases'. Since
* the cache is physically tagged, the data can just be left in there.
*/
if (cpu_data->dcache.n_aliases == 0)
if (current_cpu_data.dcache.n_aliases == 0)
return;
/*
......@@ -533,7 +534,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
unsigned long a, ea, p;
unsigned long temp_pc;
dcache = &cpu_data->dcache;
dcache = &current_cpu_data.dcache;
/* Write this way for better assembly. */
way_count = dcache->ways;
way_incr = dcache->way_incr;
......@@ -608,7 +609,7 @@ static void __flush_dcache_segment_1way(unsigned long start,
base_addr = ((base_addr >> 16) << 16);
base_addr |= start;
dcache = &cpu_data->dcache;
dcache = &current_cpu_data.dcache;
linesz = dcache->linesz;
way_incr = dcache->way_incr;
way_size = dcache->way_size;
......@@ -650,7 +651,7 @@ static void __flush_dcache_segment_2way(unsigned long start,
base_addr = ((base_addr >> 16) << 16);
base_addr |= start;
dcache = &cpu_data->dcache;
dcache = &current_cpu_data.dcache;
linesz = dcache->linesz;
way_incr = dcache->way_incr;
way_size = dcache->way_size;
......@@ -709,7 +710,7 @@ static void __flush_dcache_segment_4way(unsigned long start,
base_addr = ((base_addr >> 16) << 16);
base_addr |= start;
dcache = &cpu_data->dcache;
dcache = &current_cpu_data.dcache;
linesz = dcache->linesz;
way_incr = dcache->way_incr;
way_size = dcache->way_size;
......
......@@ -32,9 +32,9 @@ static inline void cache_wback_all(void)
{
unsigned long ways, waysize, addrstart;
ways = cpu_data->dcache.ways;
waysize = cpu_data->dcache.sets;
waysize <<= cpu_data->dcache.entry_shift;
ways = current_cpu_data.dcache.ways;
waysize = current_cpu_data.dcache.sets;
waysize <<= current_cpu_data.dcache.entry_shift;
addrstart = CACHE_OC_ADDRESS_ARRAY;
......@@ -43,7 +43,7 @@ static inline void cache_wback_all(void)
for (addr = addrstart;
addr < addrstart + waysize;
addr += cpu_data->dcache.linesz) {
addr += current_cpu_data.dcache.linesz) {
unsigned long data;
int v = SH_CACHE_UPDATED | SH_CACHE_VALID;
......@@ -53,7 +53,7 @@ static inline void cache_wback_all(void)
ctrl_outl(data & ~v, addr);
}
addrstart += cpu_data->dcache.way_incr;
addrstart += current_cpu_data.dcache.way_incr;
} while (--ways);
}
......@@ -93,9 +93,9 @@ static void __flush_dcache_page(unsigned long phys)
local_irq_save(flags);
jump_to_P2();
ways = cpu_data->dcache.ways;
waysize = cpu_data->dcache.sets;
waysize <<= cpu_data->dcache.entry_shift;
ways = current_cpu_data.dcache.ways;
waysize = current_cpu_data.dcache.sets;
waysize <<= current_cpu_data.dcache.entry_shift;
addrstart = CACHE_OC_ADDRESS_ARRAY;
......@@ -104,7 +104,7 @@ static void __flush_dcache_page(unsigned long phys)
for (addr = addrstart;
addr < addrstart + waysize;
addr += cpu_data->dcache.linesz) {
addr += current_cpu_data.dcache.linesz) {
unsigned long data;
data = ctrl_inl(addr) & (0x1ffffC00 | SH_CACHE_VALID);
......@@ -114,7 +114,7 @@ static void __flush_dcache_page(unsigned long phys)
}
}
addrstart += cpu_data->dcache.way_incr;
addrstart += current_cpu_data.dcache.way_incr;
} while (--ways);
back_to_P1();
......
......@@ -13,7 +13,7 @@
extern struct mutex p3map_mutex[];
#define CACHE_ALIAS (cpu_data->dcache.alias_mask)
#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
/*
* clear_user_page
......
......@@ -43,13 +43,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
p = __pa(p1_begin);
ways = cpu_data->dcache.ways;
ways = current_cpu_data.dcache.ways;
addr = CACHE_OC_ADDRESS_ARRAY;
do {
unsigned long data;
addr |= (v & cpu_data->dcache.entry_mask);
addr |= (v & current_cpu_data.dcache.entry_mask);
data = ctrl_inl(addr);
if ((data & CACHE_PHYSADDR_MASK) ==
......@@ -58,7 +58,7 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
ctrl_outl(data, addr);
}
addr += cpu_data->dcache.way_incr;
addr += current_cpu_data.dcache.way_incr;
} while (--ways);
p1_begin += L1_CACHE_BYTES;
......
......@@ -26,7 +26,7 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
data = (page & 0xfffe0000) | asid; /* VALID bit is off */
if ((cpu_data->flags & CPU_HAS_MMU_PAGE_ASSOC)) {
if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) {
addr |= MMU_PAGE_ASSOC_BIT;
ways = 1; /* we already know the way .. */
}
......
......@@ -259,7 +259,7 @@ static struct oprofile_operations sh7750_perf_counter_ops = {
int __init oprofile_arch_init(struct oprofile_operations **ops)
{
if (!(cpu_data->flags & CPU_HAS_PERF_COUNTER))
if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
return -ENODEV;
sh7750_perf_counter_ops.cpu_type = (char *)get_cpu_subtype();
......
......@@ -19,9 +19,9 @@ static void __init check_bugs(void)
extern unsigned long loops_per_jiffy;
char *p = &init_utsname()->machine[2]; /* "sh" */
cpu_data->loops_per_jiffy = loops_per_jiffy;
current_cpu_data.loops_per_jiffy = loops_per_jiffy;
switch (cpu_data->type) {
switch (current_cpu_data.type) {
case CPU_SH7604 ... CPU_SH7619:
*p++ = '2';
break;
......@@ -54,7 +54,7 @@ static void __init check_bugs(void)
break;
}
printk("CPU: %s\n", get_cpu_subtype());
printk("CPU: %s\n", get_cpu_subtype(&current_cpu_data));
#ifndef __LITTLE_ENDIAN__
/* 'eb' means 'Endian Big' */
......
......@@ -27,8 +27,6 @@
#define CCN_CVR 0xff000040
#define CCN_PRR 0xff000044
const char *get_cpu_subtype(void);
/*
* CPU type and hardware bug flags. Kept separately for each CPU.
*
......@@ -289,5 +287,8 @@ extern int vsyscall_init(void);
#define vsyscall_init() do { } while (0)
#endif
/* arch/sh/kernel/setup.c */
const char *get_cpu_subtype(struct sh_cpuinfo *c);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_PROCESSOR_H */
......@@ -17,7 +17,7 @@
/* User Break Controller */
#if defined(CONFIG_CPU_SUBTYPE_SH7707) || defined(CONFIG_CPU_SUBTYPE_SH7709) || \
defined(CONFIG_CPU_SUBTYPE_SH7300)
#define UBC_TYPE_SH7729 (cpu_data->type == CPU_SH7729)
#define UBC_TYPE_SH7729 (current_cpu_data.type == CPU_SH7729)
#else
#define UBC_TYPE_SH7729 0
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment