Commit 6e98ee75 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6:
  [SPARC64]: Fill holes in hypervisor APIs and fix KTSB registry.
  [SPARC64]: Fix two bugs wrt. kernel 4MB TSB.
  [SPARC]: Mark as emulating cmpxchg, add appropriate depends for DRM.
  [SPARC]: Emulate cmpxchg like parisc
  [SPARC64]: Fix _PAGE_EXEC_4U check in sun4u I-TLB miss handler.
  [SPARC]: Linux always started with 9600 8N1
  [SPARC64]: arch/sparc64/time.c doesn't compile on Ultra 1 (no PCI)
  [SPARC64]: Eliminate NR_CPUS limitations.
  [SPARC64]: Use machine description and OBP properly for cpu probing.
  [SPARC64]: Negotiate hypervisor API for PCI services.
  [SPARC64]: Report proper system soft state to the hypervisor.
  [SPARC64]: Fix typo in sun4v_hvapi_register error handling.
  [SCSI] ESP: Kill SCSI_ESP_CORE and link directly just like jazz_esp
  [SCSI] jazz_esp: Converted to use esp_core.
  [SPARC64]: PCI device scan is way too verbose by default.
  [SERIAL] sunzilog: section mismatch fix
  [SPARC32]: Removes mismatch section warnigs in sparc time.c file
  [SPARC64]: Don't be picky about virtual-dma values on sun4v.
  [SPARC64]: Kill unused DIE_PAGE_FAULT enum value.
  [SCSI] pluto: Use wait_for_completion_timeout.
parents 486b4ce1 7db35f31
...@@ -178,6 +178,13 @@ config ARCH_HAS_ILOG2_U64 ...@@ -178,6 +178,13 @@ config ARCH_HAS_ILOG2_U64
bool bool
default n default n
config EMULATED_CMPXCHG
bool
default y
help
Sparc32 does not have a CAS instruction like sparc64. cmpxchg()
is emulated, and therefore it is not completely atomic.
config SUN_PM config SUN_PM
bool bool
default y default y
......
...@@ -148,7 +148,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) ...@@ -148,7 +148,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
} }
/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
static void __init kick_start_clock(void) static void __devinit kick_start_clock(void)
{ {
struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs; struct mostek48t02 *regs = (struct mostek48t02 *)mstk48t02_regs;
unsigned char sec; unsigned char sec;
...@@ -223,7 +223,7 @@ static __inline__ int has_low_battery(void) ...@@ -223,7 +223,7 @@ static __inline__ int has_low_battery(void)
return (data1 == data2); /* Was the write blocked? */ return (data1 == data2); /* Was the write blocked? */
} }
static void __init mostek_set_system_time(void) static void __devinit mostek_set_system_time(void)
{ {
unsigned int year, mon, day, hour, min, sec; unsigned int year, mon, day, hour, min, sec;
struct mostek48t02 *mregs; struct mostek48t02 *mregs;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* atomic32.c: 32-bit atomic_t implementation * atomic32.c: 32-bit atomic_t implementation
* *
* Copyright (C) 2004 Keith M Wesolowski * Copyright (C) 2004 Keith M Wesolowski
* Copyright (C) 2007 Kyle McMartin
* *
* Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
*/ */
...@@ -117,3 +118,17 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask) ...@@ -117,3 +118,17 @@ unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
return old & mask; return old & mask;
} }
EXPORT_SYMBOL(___change_bit); EXPORT_SYMBOL(___change_bit);
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
{
unsigned long flags;
u32 prev;
spin_lock_irqsave(ATOMIC_HASH(addr), flags);
if ((prev = *ptr) == old)
*ptr = new;
spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
return (unsigned long)prev;
}
EXPORT_SYMBOL(__cmpxchg_u32);
...@@ -147,10 +147,10 @@ config SMP ...@@ -147,10 +147,10 @@ config SMP
If you don't know what to do here, say N. If you don't know what to do here, say N.
config NR_CPUS config NR_CPUS
int "Maximum number of CPUs (2-64)" int "Maximum number of CPUs (2-1024)"
range 2 64 range 2 1024
depends on SMP depends on SMP
default "32" default "64"
source "drivers/cpufreq/Kconfig" source "drivers/cpufreq/Kconfig"
......
...@@ -8,11 +8,11 @@ EXTRA_CFLAGS := -Werror ...@@ -8,11 +8,11 @@ EXTRA_CFLAGS := -Werror
extra-y := head.o init_task.o vmlinux.lds extra-y := head.o init_task.o vmlinux.lds
obj-y := process.o setup.o cpu.o idprom.o \ obj-y := process.o setup.o cpu.o idprom.o \
traps.o devices.o auxio.o una_asm.o \ traps.o auxio.o una_asm.o \
irq.o ptrace.o time.o sys_sparc.o signal.o \ irq.o ptrace.o time.o sys_sparc.o signal.o \
unaligned.o central.o pci.o starfire.o semaphore.o \ unaligned.o central.o pci.o starfire.o semaphore.o \
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \
visemul.o prom.o of_device.o hvapi.o visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
......
/* devices.c: Initial scan of the prom device tree for important
* Sparc device nodes which we need to find.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/kernel.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/bootmem.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/system.h>
#include <asm/smp.h>
#include <asm/spitfire.h>
#include <asm/timer.h>
#include <asm/cpudata.h>
/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
* operations in asm/ns87303.h
*/
DEFINE_SPINLOCK(ns87303_lock);
extern void cpu_probe(void);
extern void central_probe(void);
static const char *cpu_mid_prop(void)
{
if (tlb_type == spitfire)
return "upa-portid";
return "portid";
}
static int get_cpu_mid(struct device_node *dp)
{
struct property *prop;
if (tlb_type == hypervisor) {
struct linux_prom64_registers *reg;
int len;
prop = of_find_property(dp, "cpuid", &len);
if (prop && len == 4)
return *(int *) prop->value;
prop = of_find_property(dp, "reg", NULL);
reg = prop->value;
return (reg[0].phys_addr >> 32) & 0x0fffffffUL;
} else {
const char *prop_name = cpu_mid_prop();
prop = of_find_property(dp, prop_name, NULL);
if (prop)
return *(int *) prop->value;
return 0;
}
}
static int check_cpu_node(struct device_node *dp, int *cur_inst,
int (*compare)(struct device_node *, int, void *),
void *compare_arg,
struct device_node **dev_node, int *mid)
{
if (!compare(dp, *cur_inst, compare_arg)) {
if (dev_node)
*dev_node = dp;
if (mid)
*mid = get_cpu_mid(dp);
return 0;
}
(*cur_inst)++;
return -ENODEV;
}
static int __cpu_find_by(int (*compare)(struct device_node *, int, void *),
void *compare_arg,
struct device_node **dev_node, int *mid)
{
struct device_node *dp;
int cur_inst;
cur_inst = 0;
for_each_node_by_type(dp, "cpu") {
int err = check_cpu_node(dp, &cur_inst,
compare, compare_arg,
dev_node, mid);
if (err == 0)
return 0;
}
return -ENODEV;
}
static int cpu_instance_compare(struct device_node *dp, int instance, void *_arg)
{
int desired_instance = (int) (long) _arg;
if (instance == desired_instance)
return 0;
return -ENODEV;
}
int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid)
{
return __cpu_find_by(cpu_instance_compare, (void *)(long)instance,
dev_node, mid);
}
static int cpu_mid_compare(struct device_node *dp, int instance, void *_arg)
{
int desired_mid = (int) (long) _arg;
int this_mid;
this_mid = get_cpu_mid(dp);
if (this_mid == desired_mid)
return 0;
return -ENODEV;
}
int cpu_find_by_mid(int mid, struct device_node **dev_node)
{
return __cpu_find_by(cpu_mid_compare, (void *)(long)mid,
dev_node, NULL);
}
void __init device_scan(void)
{
/* FIX ME FAST... -DaveM */
ioport_resource.end = 0xffffffffffffffffUL;
prom_printf("Booting Linux...\n");
#ifndef CONFIG_SMP
{
struct device_node *dp;
int err, def;
err = cpu_find_by_instance(0, &dp, NULL);
if (err) {
prom_printf("No cpu nodes, cannot continue\n");
prom_halt();
}
cpu_data(0).clock_tick =
of_getintprop_default(dp, "clock-frequency", 0);
def = ((tlb_type == hypervisor) ?
(8 * 1024) :
(16 * 1024));
cpu_data(0).dcache_size = of_getintprop_default(dp,
"dcache-size",
def);
def = 32;
cpu_data(0).dcache_line_size =
of_getintprop_default(dp, "dcache-line-size", def);
def = 16 * 1024;
cpu_data(0).icache_size = of_getintprop_default(dp,
"icache-size",
def);
def = 32;
cpu_data(0).icache_line_size =
of_getintprop_default(dp, "icache-line-size", def);
def = ((tlb_type == hypervisor) ?
(3 * 1024 * 1024) :
(4 * 1024 * 1024));
cpu_data(0).ecache_size = of_getintprop_default(dp,
"ecache-size",
def);
def = 64;
cpu_data(0).ecache_line_size =
of_getintprop_default(dp, "ecache-line-size", def);
printk("CPU[0]: Caches "
"D[sz(%d):line_sz(%d)] "
"I[sz(%d):line_sz(%d)] "
"E[sz(%d):line_sz(%d)]\n",
cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
cpu_data(0).icache_size, cpu_data(0).icache_line_size,
cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
}
#endif
central_probe();
cpu_probe();
}
This diff is collapsed.
...@@ -523,7 +523,7 @@ tlb_fixup_done: ...@@ -523,7 +523,7 @@ tlb_fixup_done:
#else #else
mov 0, %o0 mov 0, %o0
#endif #endif
stb %o0, [%g6 + TI_CPU] sth %o0, [%g6 + TI_CPU]
/* Off we go.... */ /* Off we go.... */
call start_kernel call start_kernel
...@@ -653,33 +653,54 @@ setup_tba: ...@@ -653,33 +653,54 @@ setup_tba:
restore restore
sparc64_boot_end: sparc64_boot_end:
#include "ktlb.S"
#include "tsb.S"
#include "etrap.S" #include "etrap.S"
#include "rtrap.S" #include "rtrap.S"
#include "winfixup.S" #include "winfixup.S"
#include "entry.S" #include "entry.S"
#include "sun4v_tlb_miss.S" #include "sun4v_tlb_miss.S"
#include "sun4v_ivec.S" #include "sun4v_ivec.S"
#include "ktlb.S"
#include "tsb.S"
/* /*
* The following skip makes sure the trap table in ttable.S is aligned * The following skip makes sure the trap table in ttable.S is aligned
* on a 32K boundary as required by the v9 specs for TBA register. * on a 32K boundary as required by the v9 specs for TBA register.
* *
* We align to a 32K boundary, then we have the 32K kernel TSB, * We align to a 32K boundary, then we have the 32K kernel TSB,
* then the 32K aligned trap table. * the 64K kernel 4MB TSB, and then the 32K aligned trap table.
*/ */
1: 1:
.skip 0x4000 + _start - 1b .skip 0x4000 + _start - 1b
! 0x0000000000408000
.globl swapper_tsb .globl swapper_tsb
swapper_tsb: swapper_tsb:
.skip (32 * 1024) .skip (32 * 1024)
! 0x0000000000408000 .globl swapper_4m_tsb
swapper_4m_tsb:
.skip (64 * 1024)
! 0x0000000000420000
/* Some care needs to be exercised if you try to move the
* location of the trap table relative to other things. For
* one thing there are br* instructions in some of the
* trap table entires which branch back to code in ktlb.S
* Those instructions can only handle a signed 16-bit
* displacement.
*
* There is a binutils bug (bugzilla #4558) which causes
* the relocation overflow checks for such instructions to
* not be done correctly. So bintuils will not notice the
* error and will instead write junk into the relocation and
* you'll have an unbootable kernel.
*/
#include "ttable.S" #include "ttable.S"
! 0x0000000000428000
#include "systbls.S" #include "systbls.S"
.data .data
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/sstate.h>
/* If the hypervisor indicates that the API setting /* If the hypervisor indicates that the API setting
* calls are unsupported, by returning HV_EBADTRAP or * calls are unsupported, by returning HV_EBADTRAP or
...@@ -107,7 +108,7 @@ int sun4v_hvapi_register(unsigned long group, unsigned long major, ...@@ -107,7 +108,7 @@ int sun4v_hvapi_register(unsigned long group, unsigned long major,
p->minor = actual_minor; p->minor = actual_minor;
ret = 0; ret = 0;
} else if (hv_ret == HV_EBADTRAP || } else if (hv_ret == HV_EBADTRAP ||
HV_ENOTSUPPORTED) { hv_ret == HV_ENOTSUPPORTED) {
if (p->flags & FLAG_PRE_API) { if (p->flags & FLAG_PRE_API) {
if (major == 1) { if (major == 1) {
p->major = 1; p->major = 1;
...@@ -179,6 +180,8 @@ void __init sun4v_hvapi_init(void) ...@@ -179,6 +180,8 @@ void __init sun4v_hvapi_init(void)
if (sun4v_hvapi_register(group, major, &minor)) if (sun4v_hvapi_register(group, major, &minor))
goto bad; goto bad;
sun4v_sstate_init();
return; return;
bad: bad:
......
...@@ -171,8 +171,6 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -171,8 +171,6 @@ int show_interrupts(struct seq_file *p, void *v)
return 0; return 0;
} }
extern unsigned long real_hard_smp_processor_id(void);
static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
{ {
unsigned int tid; unsigned int tid;
...@@ -694,9 +692,20 @@ void init_irqwork_curcpu(void) ...@@ -694,9 +692,20 @@ void init_irqwork_curcpu(void)
trap_block[cpu].irq_worklist = 0; trap_block[cpu].irq_worklist = 0;
} }
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) /* Please be very careful with register_one_mondo() and
* sun4v_register_mondo_queues().
*
* On SMP this gets invoked from the CPU trampoline before
* the cpu has fully taken over the trap table from OBP,
* and it's kernel stack + %g6 thread register state is
* not fully cooked yet.
*
* Therefore you cannot make any OBP calls, not even prom_printf,
* from these two routines.
*/
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
{ {
unsigned long num_entries = 128; unsigned long num_entries = (qmask + 1) / 64;
unsigned long status; unsigned long status;
status = sun4v_cpu_qconf(type, paddr, num_entries); status = sun4v_cpu_qconf(type, paddr, num_entries);
...@@ -711,44 +720,58 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu) ...@@ -711,44 +720,58 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
{ {
struct trap_per_cpu *tb = &trap_block[this_cpu]; struct trap_per_cpu *tb = &trap_block[this_cpu];
register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); tb->cpu_mondo_qmask);
register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); tb->dev_mondo_qmask);
register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
tb->resum_qmask);
register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
tb->nonresum_qmask);
} }
static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
{ {
void *page; unsigned long size = PAGE_ALIGN(qmask + 1);
unsigned long order = get_order(size);
void *p = NULL;
if (use_bootmem) if (use_bootmem) {
page = alloc_bootmem_low_pages(PAGE_SIZE); p = __alloc_bootmem_low(size, size, 0);
else } else {
page = (void *) get_zeroed_page(GFP_ATOMIC); struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
if (page)
p = page_address(page);
}
if (!page) { if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
prom_halt(); prom_halt();
} }
*pa_ptr = __pa(page); *pa_ptr = __pa(p);
} }
static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
{ {
void *page; unsigned long size = PAGE_ALIGN(qmask + 1);
unsigned long order = get_order(size);
void *p = NULL;
if (use_bootmem) if (use_bootmem) {
page = alloc_bootmem_low_pages(PAGE_SIZE); p = __alloc_bootmem_low(size, size, 0);
else } else {
page = (void *) get_zeroed_page(GFP_ATOMIC); struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
if (page)
p = page_address(page);
}
if (!page) { if (!p) {
prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
prom_halt(); prom_halt();
} }
*pa_ptr = __pa(page); *pa_ptr = __pa(p);
} }
static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
...@@ -779,12 +802,12 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int ...@@ -779,12 +802,12 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int
struct trap_per_cpu *tb = &trap_block[cpu]; struct trap_per_cpu *tb = &trap_block[cpu];
if (alloc) { if (alloc) {
alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
init_cpu_send_mondo_info(tb, use_bootmem); init_cpu_send_mondo_info(tb, use_bootmem);
} }
......
...@@ -11,12 +11,12 @@ ...@@ -11,12 +11,12 @@
/* ITLB ** ICACHE line 2: TSB compare and TLB load */ /* ITLB ** ICACHE line 2: TSB compare and TLB load */
bne,pn %xcc, tsb_miss_itlb ! Miss bne,pn %xcc, tsb_miss_itlb ! Miss
mov FAULT_CODE_ITLB, %g3 mov FAULT_CODE_ITLB, %g3
andcc %g5, _PAGE_EXEC_4U, %g0 ! Executable? sethi %hi(_PAGE_EXEC_4U), %g4
andcc %g5, %g4, %g0 ! Executable?
be,pn %xcc, tsb_do_fault be,pn %xcc, tsb_do_fault
nop ! Delay slot, fill me nop ! Delay slot, fill me
stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
retry ! Trap done retry ! Trap done
nop
/* ITLB ** ICACHE line 3: */ /* ITLB ** ICACHE line 3: */
nop nop
......
This diff is collapsed.
...@@ -306,6 +306,20 @@ static void __init pci_controller_probe(void) ...@@ -306,6 +306,20 @@ static void __init pci_controller_probe(void)
pci_controller_scan(pci_controller_init); pci_controller_scan(pci_controller_init);
} }
static int ofpci_verbose;
static int __init ofpci_debug(char *str)
{
int val = 0;
get_option(&str, &val);
if (val)
ofpci_verbose = 1;
return 1;
}
__setup("ofpci_debug=", ofpci_debug);
static unsigned long pci_parse_of_flags(u32 addr0) static unsigned long pci_parse_of_flags(u32 addr0)
{ {
unsigned long flags = 0; unsigned long flags = 0;
...@@ -337,7 +351,9 @@ static void pci_parse_of_addrs(struct of_device *op, ...@@ -337,7 +351,9 @@ static void pci_parse_of_addrs(struct of_device *op,
addrs = of_get_property(node, "assigned-addresses", &proplen); addrs = of_get_property(node, "assigned-addresses", &proplen);
if (!addrs) if (!addrs)
return; return;
printk(" parse addresses (%d bytes) @ %p\n", proplen, addrs); if (ofpci_verbose)
printk(" parse addresses (%d bytes) @ %p\n",
proplen, addrs);
op_res = &op->resource[0]; op_res = &op->resource[0];
for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) { for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
struct resource *res; struct resource *res;
...@@ -348,6 +364,7 @@ static void pci_parse_of_addrs(struct of_device *op, ...@@ -348,6 +364,7 @@ static void pci_parse_of_addrs(struct of_device *op,
if (!flags) if (!flags)
continue; continue;
i = addrs[0] & 0xff; i = addrs[0] & 0xff;
if (ofpci_verbose)
printk(" start: %lx, end: %lx, i: %x\n", printk(" start: %lx, end: %lx, i: %x\n",
op_res->start, op_res->end, i); op_res->start, op_res->end, i);
...@@ -393,8 +410,9 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, ...@@ -393,8 +410,9 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
if (type == NULL) if (type == NULL)
type = ""; type = "";
printk(" create device, devfn: %x, type: %s hostcontroller(%d)\n", if (ofpci_verbose)
devfn, type, host_controller); printk(" create device, devfn: %x, type: %s\n",
devfn, type);
dev->bus = bus; dev->bus = bus;
dev->sysdata = node; dev->sysdata = node;
...@@ -434,6 +452,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, ...@@ -434,6 +452,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
} }
if (ofpci_verbose)
printk(" class: 0x%x device name: %s\n", printk(" class: 0x%x device name: %s\n",
dev->class, pci_name(dev)); dev->class, pci_name(dev));
...@@ -469,6 +488,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, ...@@ -469,6 +488,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
} }
pci_parse_of_addrs(sd->op, node, dev); pci_parse_of_addrs(sd->op, node, dev);
if (ofpci_verbose)
printk(" adding to system ...\n"); printk(" adding to system ...\n");
pci_device_add(dev, bus); pci_device_add(dev, bus);
...@@ -547,6 +567,7 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, ...@@ -547,6 +567,7 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
unsigned int flags; unsigned int flags;
u64 size; u64 size;
if (ofpci_verbose)
printk("of_scan_pci_bridge(%s)\n", node->full_name); printk("of_scan_pci_bridge(%s)\n", node->full_name);
/* parse bus-range property */ /* parse bus-range property */
...@@ -632,6 +653,7 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, ...@@ -632,6 +653,7 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
simba_cont: simba_cont:
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number); bus->number);
if (ofpci_verbose)
printk(" bus name: %s\n", bus->name); printk(" bus name: %s\n", bus->name);
pci_of_scan_bus(pbm, node, bus); pci_of_scan_bus(pbm, node, bus);
...@@ -646,11 +668,13 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, ...@@ -646,11 +668,13 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
int reglen, devfn; int reglen, devfn;
struct pci_dev *dev; struct pci_dev *dev;
if (ofpci_verbose)
printk("PCI: scan_bus[%s] bus no %d\n", printk("PCI: scan_bus[%s] bus no %d\n",
node->full_name, bus->number); node->full_name, bus->number);
child = NULL; child = NULL;
while ((child = of_get_next_child(node, child)) != NULL) { while ((child = of_get_next_child(node, child)) != NULL) {
if (ofpci_verbose)
printk(" * %s\n", child->full_name); printk(" * %s\n", child->full_name);
reg = of_get_property(child, "reg", &reglen); reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20) if (reg == NULL || reglen < 20)
...@@ -661,7 +685,9 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, ...@@ -661,7 +685,9 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
dev = of_create_pci_dev(pbm, child, bus, devfn, 0); dev = of_create_pci_dev(pbm, child, bus, devfn, 0);
if (!dev) if (!dev)
continue; continue;
printk("PCI: dev header type: %x\n", dev->hdr_type); if (ofpci_verbose)
printk("PCI: dev header type: %x\n",
dev->hdr_type);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
......
...@@ -762,11 +762,12 @@ void sabre_init(struct device_node *dp, char *model_name) ...@@ -762,11 +762,12 @@ void sabre_init(struct device_node *dp, char *model_name)
/* Of course, Sun has to encode things a thousand /* Of course, Sun has to encode things a thousand
* different ways, inconsistently. * different ways, inconsistently.
*/ */
cpu_find_by_instance(0, &dp, NULL); for_each_node_by_type(dp, "cpu") {
if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe"))
hummingbird_p = 1; hummingbird_p = 1;
} }
} }
}
p = kzalloc(sizeof(*p), GFP_ATOMIC); p = kzalloc(sizeof(*p), GFP_ATOMIC);
if (!p) { if (!p) {
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/log2.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -26,6 +27,9 @@ ...@@ -26,6 +27,9 @@
#include "pci_sun4v.h" #include "pci_sun4v.h"
static unsigned long vpci_major = 1;
static unsigned long vpci_minor = 1;
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct iommu_batch { struct iommu_batch {
...@@ -638,9 +642,8 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) ...@@ -638,9 +642,8 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{ {
struct iommu *iommu = pbm->iommu; struct iommu *iommu = pbm->iommu;
struct property *prop; struct property *prop;
unsigned long num_tsb_entries, sz; unsigned long num_tsb_entries, sz, tsbsize;
u32 vdma[2], dma_mask, dma_offset; u32 vdma[2], dma_mask, dma_offset;
int tsbsize;
prop = of_find_property(pbm->prom_node, "virtual-dma", NULL); prop = of_find_property(pbm->prom_node, "virtual-dma", NULL);
if (prop) { if (prop) {
...@@ -654,31 +657,15 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) ...@@ -654,31 +657,15 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
vdma[1] = 0x80000000; vdma[1] = 0x80000000;
} }
dma_mask = vdma[0]; if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
switch (vdma[1]) { prom_printf("PCI-SUN4V: strange virtual-dma[%08x:%08x].\n",
case 0x20000000: vdma[0], vdma[1]);
dma_mask |= 0x1fffffff;
tsbsize = 64;
break;
case 0x40000000:
dma_mask |= 0x3fffffff;
tsbsize = 128;
break;
case 0x80000000:
dma_mask |= 0x7fffffff;
tsbsize = 256;
break;
default:
prom_printf("PCI-SUN4V: strange virtual-dma size.\n");
prom_halt(); prom_halt();
}; };
tsbsize *= (8 * 1024); dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
num_tsb_entries = tsbsize / sizeof(iopte_t); tsbsize = num_tsb_entries * sizeof(iopte_t);
dma_offset = vdma[0]; dma_offset = vdma[0];
...@@ -689,7 +676,7 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm) ...@@ -689,7 +676,7 @@ static void pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
iommu->dma_addr_mask = dma_mask; iommu->dma_addr_mask = dma_mask;
/* Allocate and initialize the free area map. */ /* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8; sz = (num_tsb_entries + 7) / 8;
sz = (sz + 7UL) & ~7UL; sz = (sz + 7UL) & ~7UL;
iommu->arena.map = kzalloc(sz, GFP_KERNEL); iommu->arena.map = kzalloc(sz, GFP_KERNEL);
if (!iommu->arena.map) { if (!iommu->arena.map) {
...@@ -1178,6 +1165,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node ...@@ -1178,6 +1165,7 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, struct device_node
void sun4v_pci_init(struct device_node *dp, char *model_name) void sun4v_pci_init(struct device_node *dp, char *model_name)
{ {
static int hvapi_negotiated = 0;
struct pci_controller_info *p; struct pci_controller_info *p;
struct pci_pbm_info *pbm; struct pci_pbm_info *pbm;
struct iommu *iommu; struct iommu *iommu;
...@@ -1186,6 +1174,20 @@ void sun4v_pci_init(struct device_node *dp, char *model_name) ...@@ -1186,6 +1174,20 @@ void sun4v_pci_init(struct device_node *dp, char *model_name)
u32 devhandle; u32 devhandle;
int i; int i;
if (!hvapi_negotiated++) {
int err = sun4v_hvapi_register(HV_GRP_PCI,
vpci_major,
&vpci_minor);
if (err) {
prom_printf("SUN4V_PCI: Could not register hvapi, "
"err=%d\n", err);
prom_halt();
}
printk("SUN4V_PCI: Registered hvapi major[%lu] minor[%lu]\n",
vpci_major, vpci_minor);
}
prop = of_find_property(dp, "reg", NULL); prop = of_find_property(dp, "reg", NULL);
regs = prop->value; regs = prop->value;
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/of_device.h> #include <asm/of_device.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/sstate.h>
#include <linux/unistd.h> #include <linux/unistd.h>
...@@ -53,6 +54,7 @@ static void (*poweroff_method)(void) = machine_alt_power_off; ...@@ -53,6 +54,7 @@ static void (*poweroff_method)(void) = machine_alt_power_off;
void machine_power_off(void) void machine_power_off(void)
{ {
sstate_poweroff();
if (!serial_console || scons_pwroff) { if (!serial_console || scons_pwroff) {
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
if (power_reg) { if (power_reg) {
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/sstate.h>
/* #define VERBOSE_SHOWREGS */ /* #define VERBOSE_SHOWREGS */
...@@ -106,6 +107,7 @@ extern void (*prom_keyboard)(void); ...@@ -106,6 +107,7 @@ extern void (*prom_keyboard)(void);
void machine_halt(void) void machine_halt(void)
{ {
sstate_halt();
if (!serial_console && prom_palette) if (!serial_console && prom_palette)
prom_palette (1); prom_palette (1);
if (prom_keyboard) if (prom_keyboard)
...@@ -116,6 +118,7 @@ void machine_halt(void) ...@@ -116,6 +118,7 @@ void machine_halt(void)
void machine_alt_power_off(void) void machine_alt_power_off(void)
{ {
sstate_poweroff();
if (!serial_console && prom_palette) if (!serial_console && prom_palette)
prom_palette(1); prom_palette(1);
if (prom_keyboard) if (prom_keyboard)
...@@ -128,6 +131,7 @@ void machine_restart(char * cmd) ...@@ -128,6 +131,7 @@ void machine_restart(char * cmd)
{ {
char *p; char *p;
sstate_reboot();
p = strchr (reboot_command, '\n'); p = strchr (reboot_command, '\n');
if (p) *p = 0; if (p) *p = 0;
if (!serial_console && prom_palette) if (!serial_console && prom_palette)
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/asi.h> #include <asm/asi.h>
#include <asm/upa.h> #include <asm/upa.h>
#include <asm/smp.h>
static struct device_node *allnodes; static struct device_node *allnodes;
...@@ -1665,6 +1666,150 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl ...@@ -1665,6 +1666,150 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl
return ret; return ret;
} }
static const char *get_mid_prop(void)
{
return (tlb_type == spitfire ? "upa-portid" : "portid");
}
struct device_node *of_find_node_by_cpuid(int cpuid)
{
struct device_node *dp;
const char *mid_prop = get_mid_prop();
for_each_node_by_type(dp, "cpu") {
int id = of_getintprop_default(dp, mid_prop, -1);
const char *this_mid_prop = mid_prop;
if (id < 0) {
this_mid_prop = "cpuid";
id = of_getintprop_default(dp, this_mid_prop, -1);
}
if (id < 0) {
prom_printf("OF: Serious problem, cpu lacks "
"%s property", this_mid_prop);
prom_halt();
}
if (cpuid == id)
return dp;
}
return NULL;
}
static void __init of_fill_in_cpu_data(void)
{
struct device_node *dp;
const char *mid_prop = get_mid_prop();
ncpus_probed = 0;
for_each_node_by_type(dp, "cpu") {
int cpuid = of_getintprop_default(dp, mid_prop, -1);
const char *this_mid_prop = mid_prop;
struct device_node *portid_parent;
int portid = -1;
portid_parent = NULL;
if (cpuid < 0) {
this_mid_prop = "cpuid";
cpuid = of_getintprop_default(dp, this_mid_prop, -1);
if (cpuid >= 0) {
int limit = 2;
portid_parent = dp;
while (limit--) {
portid_parent = portid_parent->parent;
if (!portid_parent)
break;
portid = of_getintprop_default(portid_parent,
"portid", -1);
if (portid >= 0)
break;
}
}
}
if (cpuid < 0) {
prom_printf("OF: Serious problem, cpu lacks "
"%s property", this_mid_prop);
prom_halt();
}
ncpus_probed++;
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS)
continue;
#else
/* On uniprocessor we only want the values for the
* real physical cpu the kernel booted onto, however
* cpu_data() only has one entry at index 0.
*/
if (cpuid != real_hard_smp_processor_id())
continue;
cpuid = 0;
#endif
cpu_data(cpuid).clock_tick =
of_getintprop_default(dp, "clock-frequency", 0);
if (portid_parent) {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "l1-dcache-size",
16 * 1024);
cpu_data(cpuid).dcache_line_size =
of_getintprop_default(dp, "l1-dcache-line-size",
32);
cpu_data(cpuid).icache_size =
of_getintprop_default(dp, "l1-icache-size",
8 * 1024);
cpu_data(cpuid).icache_line_size =
of_getintprop_default(dp, "l1-icache-line-size",
32);
cpu_data(cpuid).ecache_size =
of_getintprop_default(dp, "l2-cache-size", 0);
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(dp, "l2-cache-line-size", 0);
if (!cpu_data(cpuid).ecache_size ||
!cpu_data(cpuid).ecache_line_size) {
cpu_data(cpuid).ecache_size =
of_getintprop_default(portid_parent,
"l2-cache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(portid_parent,
"l2-cache-line-size", 64);
}
cpu_data(cpuid).core_id = portid + 1;
} else {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "dcache-size", 16 * 1024);
cpu_data(cpuid).dcache_line_size =
of_getintprop_default(dp, "dcache-line-size", 32);
cpu_data(cpuid).icache_size =
of_getintprop_default(dp, "icache-size", 16 * 1024);
cpu_data(cpuid).icache_line_size =
of_getintprop_default(dp, "icache-line-size", 32);
cpu_data(cpuid).ecache_size =
of_getintprop_default(dp, "ecache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(dp, "ecache-line-size", 64);
cpu_data(cpuid).core_id = 0;
}
#ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map);
cpu_set(cpuid, phys_cpu_present_map);
#endif
}
smp_fill_in_sib_core_maps();
}
void __init prom_build_devicetree(void) void __init prom_build_devicetree(void)
{ {
struct device_node **nextp; struct device_node **nextp;
...@@ -1679,4 +1824,7 @@ void __init prom_build_devicetree(void) ...@@ -1679,4 +1824,7 @@ void __init prom_build_devicetree(void)
&nextp); &nextp);
printk("PROM: Built device tree with %u bytes of memory.\n", printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated); prom_early_allocated);
if (tlb_type != hypervisor)
of_fill_in_cpu_data();
} }
...@@ -46,11 +46,17 @@ ...@@ -46,11 +46,17 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/ns87303.h>
#ifdef CONFIG_IP_PNP #ifdef CONFIG_IP_PNP
#include <net/ipconfig.h> #include <net/ipconfig.h>
#endif #endif
/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
* operations in asm/ns87303.h
*/
DEFINE_SPINLOCK(ns87303_lock);
struct screen_info screen_info = { struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */ 0, 0, /* orig-x, orig-y */
0, /* unused */ 0, /* unused */
...@@ -370,8 +376,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -370,8 +376,6 @@ void __init setup_arch(char **cmdline_p)
init_cur_cpu_trap(current_thread_info()); init_cur_cpu_trap(current_thread_info());
paging_init(); paging_init();
smp_setup_cpu_possible_map();
} }
static int __init set_preferred_console(void) static int __init set_preferred_console(void)
...@@ -424,7 +428,7 @@ extern void mmu_info(struct seq_file *); ...@@ -424,7 +428,7 @@ extern void mmu_info(struct seq_file *);
unsigned int dcache_parity_tl1_occurred; unsigned int dcache_parity_tl1_occurred;
unsigned int icache_parity_tl1_occurred; unsigned int icache_parity_tl1_occurred;
static int ncpus_probed; int ncpus_probed;
static int show_cpuinfo(struct seq_file *m, void *__unused) static int show_cpuinfo(struct seq_file *m, void *__unused)
{ {
...@@ -516,14 +520,6 @@ static int __init topology_init(void) ...@@ -516,14 +520,6 @@ static int __init topology_init(void)
err = -ENOMEM; err = -ENOMEM;
/* Count the number of physically present processors in
* the machine, even on uniprocessor, so that /proc/cpuinfo
* output is consistent with 2.4.x
*/
ncpus_probed = 0;
while (!cpu_find_by_instance(ncpus_probed, NULL, NULL))
ncpus_probed++;
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) { if (p) {
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/mdesc.h>
extern void calibrate_delay(void); extern void calibrate_delay(void);
...@@ -75,53 +76,6 @@ void smp_bogo(struct seq_file *m) ...@@ -75,53 +76,6 @@ void smp_bogo(struct seq_file *m)
i, cpu_data(i).clock_tick); i, cpu_data(i).clock_tick);
} }
void __init smp_store_cpu_info(int id)
{
struct device_node *dp;
int def;
cpu_data(id).udelay_val = loops_per_jiffy;
cpu_find_by_mid(id, &dp);
cpu_data(id).clock_tick =
of_getintprop_default(dp, "clock-frequency", 0);
def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
cpu_data(id).dcache_size =
of_getintprop_default(dp, "dcache-size", def);
def = 32;
cpu_data(id).dcache_line_size =
of_getintprop_default(dp, "dcache-line-size", def);
def = 16 * 1024;
cpu_data(id).icache_size =
of_getintprop_default(dp, "icache-size", def);
def = 32;
cpu_data(id).icache_line_size =
of_getintprop_default(dp, "icache-line-size", def);
def = ((tlb_type == hypervisor) ?
(3 * 1024 * 1024) :
(4 * 1024 * 1024));
cpu_data(id).ecache_size =
of_getintprop_default(dp, "ecache-size", def);
def = 64;
cpu_data(id).ecache_line_size =
of_getintprop_default(dp, "ecache-line-size", def);
printk("CPU[%d]: Caches "
"D[sz(%d):line_sz(%d)] "
"I[sz(%d):line_sz(%d)] "
"E[sz(%d):line_sz(%d)]\n",
id,
cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
cpu_data(id).icache_size, cpu_data(id).icache_line_size,
cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
}
extern void setup_sparc64_timer(void); extern void setup_sparc64_timer(void);
static volatile unsigned long callin_flag = 0; static volatile unsigned long callin_flag = 0;
...@@ -145,7 +99,7 @@ void __init smp_callin(void) ...@@ -145,7 +99,7 @@ void __init smp_callin(void)
local_irq_enable(); local_irq_enable();
calibrate_delay(); calibrate_delay();
smp_store_cpu_info(cpuid); cpu_data(cpuid).udelay_val = loops_per_jiffy;
callin_flag = 1; callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t" __asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory"); "flush %%g6" : : : "memory");
...@@ -340,9 +294,8 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) ...@@ -340,9 +294,8 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
prom_startcpu_cpuid(cpu, entry, cookie); prom_startcpu_cpuid(cpu, entry, cookie);
} else { } else {
struct device_node *dp; struct device_node *dp = of_find_node_by_cpuid(cpu);
cpu_find_by_mid(cpu, &dp);
prom_startcpu(dp->node, entry, cookie); prom_startcpu(dp->node, entry, cookie);
} }
...@@ -447,7 +400,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c ...@@ -447,7 +400,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
{ {
u64 pstate, ver; u64 pstate, ver;
int nack_busy_id, is_jbus; int nack_busy_id, is_jbus, need_more;
if (cpus_empty(mask)) if (cpus_empty(mask))
return; return;
...@@ -463,6 +416,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas ...@@ -463,6 +416,7 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
retry: retry:
need_more = 0;
__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t" __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
: : "r" (pstate), "i" (PSTATE_IE)); : : "r" (pstate), "i" (PSTATE_IE));
...@@ -491,6 +445,10 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas ...@@ -491,6 +445,10 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
: /* no outputs */ : /* no outputs */
: "r" (target), "i" (ASI_INTR_W)); : "r" (target), "i" (ASI_INTR_W));
nack_busy_id++; nack_busy_id++;
if (nack_busy_id == 32) {
need_more = 1;
break;
}
} }
} }
...@@ -507,6 +465,16 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas ...@@ -507,6 +465,16 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
if (dispatch_stat == 0UL) { if (dispatch_stat == 0UL) {
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
: : "r" (pstate)); : : "r" (pstate));
if (unlikely(need_more)) {
int i, cnt = 0;
for_each_cpu_mask(i, mask) {
cpu_clear(i, mask);
cnt++;
if (cnt == 32)
break;
}
goto retry;
}
return; return;
} }
if (!--stuck) if (!--stuck)
...@@ -544,6 +512,8 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas ...@@ -544,6 +512,8 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
if ((dispatch_stat & check_mask) == 0) if ((dispatch_stat & check_mask) == 0)
cpu_clear(i, mask); cpu_clear(i, mask);
this_busy_nack += 2; this_busy_nack += 2;
if (this_busy_nack == 64)
break;
} }
goto retry; goto retry;
...@@ -1191,23 +1161,14 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -1191,23 +1161,14 @@ int setup_profiling_timer(unsigned int multiplier)
static void __init smp_tune_scheduling(void) static void __init smp_tune_scheduling(void)
{ {
struct device_node *dp; unsigned int smallest = ~0U;
int instance; int i;
unsigned int def, smallest = ~0U;
def = ((tlb_type == hypervisor) ?
(3 * 1024 * 1024) :
(4 * 1024 * 1024));
instance = 0; for (i = 0; i < NR_CPUS; i++) {
while (!cpu_find_by_instance(instance, &dp, NULL)) { unsigned int val = cpu_data(i).ecache_size;
unsigned int val;
val = of_getintprop_default(dp, "ecache-size", def); if (val && val < smallest)
if (val < smallest)
smallest = val; smallest = val;
instance++;
} }
/* Any value less than 256K is nonsense. */ /* Any value less than 256K is nonsense. */
...@@ -1230,58 +1191,42 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -1230,58 +1191,42 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
int i; int i;
if (num_possible_cpus() > max_cpus) { if (num_possible_cpus() > max_cpus) {
int instance, mid; for_each_possible_cpu(i) {
if (i != boot_cpu_id) {
instance = 0; cpu_clear(i, phys_cpu_present_map);
while (!cpu_find_by_instance(instance, NULL, &mid)) { cpu_clear(i, cpu_present_map);
if (mid != boot_cpu_id) {
cpu_clear(mid, phys_cpu_present_map);
cpu_clear(mid, cpu_present_map);
if (num_possible_cpus() <= max_cpus) if (num_possible_cpus() <= max_cpus)
break; break;
} }
instance++;
} }
} }
for_each_possible_cpu(i) { cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy;
if (tlb_type == hypervisor) {
int j;
/* XXX get this mapping from machine description */
for_each_possible_cpu(j) {
if ((j >> 2) == (i >> 2))
cpu_set(j, cpu_sibling_map[i]);
}
} else {
cpu_set(i, cpu_sibling_map[i]);
}
}
smp_store_cpu_info(boot_cpu_id);
smp_tune_scheduling(); smp_tune_scheduling();
} }
/* Set this up early so that things like the scheduler can init void __devinit smp_prepare_boot_cpu(void)
* properly. We use the same cpu mask for both the present and
* possible cpu map.
*/
void __init smp_setup_cpu_possible_map(void)
{ {
int instance, mid;
instance = 0;
while (!cpu_find_by_instance(instance, NULL, &mid)) {
if (mid < NR_CPUS) {
cpu_set(mid, phys_cpu_present_map);
cpu_set(mid, cpu_present_map);
}
instance++;
}
} }
void __devinit smp_prepare_boot_cpu(void) void __devinit smp_fill_in_sib_core_maps(void)
{ {
unsigned int i;
for_each_possible_cpu(i) {
unsigned int j;
if (cpu_data(i).core_id == 0) {
cpu_set(i, cpu_sibling_map[i]);
continue;
}
for_each_possible_cpu(j) {
if (cpu_data(i).core_id ==
cpu_data(j).core_id)
cpu_set(j, cpu_sibling_map[i]);
}
}
} }
int __cpuinit __cpu_up(unsigned int cpu) int __cpuinit __cpu_up(unsigned int cpu)
...@@ -1337,7 +1282,7 @@ unsigned long __per_cpu_shift __read_mostly; ...@@ -1337,7 +1282,7 @@ unsigned long __per_cpu_shift __read_mostly;
EXPORT_SYMBOL(__per_cpu_base); EXPORT_SYMBOL(__per_cpu_base);
EXPORT_SYMBOL(__per_cpu_shift); EXPORT_SYMBOL(__per_cpu_shift);
void __init setup_per_cpu_areas(void) void __init real_setup_per_cpu_areas(void)
{ {
unsigned long goal, size, i; unsigned long goal, size, i;
char *ptr; char *ptr;
......
/* sstate.c: System soft state support.
*
* Copyright (C) 2007 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <asm/hypervisor.h>
#include <asm/sstate.h>
#include <asm/oplib.h>
#include <asm/head.h>
#include <asm/io.h>
static int hv_supports_soft_state;
static unsigned long kimage_addr_to_ra(const char *p)
{
unsigned long val = (unsigned long) p;
return kern_base + (val - KERNBASE);
}
static void do_set_sstate(unsigned long state, const char *msg)
{
unsigned long err;
if (!hv_supports_soft_state)
return;
err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg));
if (err) {
printk(KERN_WARNING "SSTATE: Failed to set soft-state to "
"state[%lx] msg[%s], err=%lu\n",
state, msg, err);
}
}
static const char booting_msg[32] __attribute__((aligned(32))) =
"Linux booting";
static const char running_msg[32] __attribute__((aligned(32))) =
"Linux running";
static const char halting_msg[32] __attribute__((aligned(32))) =
"Linux halting";
static const char poweroff_msg[32] __attribute__((aligned(32))) =
"Linux powering off";
static const char rebooting_msg[32] __attribute__((aligned(32))) =
"Linux rebooting";
static const char panicing_msg[32] __attribute__((aligned(32))) =
"Linux panicing";
void sstate_booting(void)
{
do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg);
}
void sstate_running(void)
{
do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg);
}
void sstate_halt(void)
{
do_set_sstate(HV_SOFT_STATE_TRANSITION, halting_msg);
}
void sstate_poweroff(void)
{
do_set_sstate(HV_SOFT_STATE_TRANSITION, poweroff_msg);
}
void sstate_reboot(void)
{
do_set_sstate(HV_SOFT_STATE_TRANSITION, rebooting_msg);
}
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
{
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
return NOTIFY_DONE;
}
static struct notifier_block sstate_panic_block = {
.notifier_call = sstate_panic_event,
.priority = INT_MAX,
};
void __init sun4v_sstate_init(void)
{
unsigned long major, minor;
major = 1;
minor = 0;
if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor))
return;
hv_supports_soft_state = 1;
prom_sun4v_guest_soft_state();
atomic_notifier_chain_register(&panic_notifier_list,
&sstate_panic_block);
}
...@@ -22,12 +22,12 @@ sun4v_cpu_mondo: ...@@ -22,12 +22,12 @@ sun4v_cpu_mondo:
be,pn %xcc, sun4v_cpu_mondo_queue_empty be,pn %xcc, sun4v_cpu_mondo_queue_empty
nop nop
/* Get &trap_block[smp_processor_id()] into %g3. */ /* Get &trap_block[smp_processor_id()] into %g4. */
ldxa [%g0] ASI_SCRATCHPAD, %g3 ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get CPU mondo queue base phys address into %g7. */ /* Get CPU mondo queue base phys address into %g7. */
ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
/* Now get the cross-call arguments and handler PC, same /* Now get the cross-call arguments and handler PC, same
* layout as sun4u: * layout as sun4u:
...@@ -47,8 +47,7 @@ sun4v_cpu_mondo: ...@@ -47,8 +47,7 @@ sun4v_cpu_mondo:
add %g2, 0x40 - 0x8 - 0x8, %g2 add %g2, 0x40 - 0x8 - 0x8, %g2
/* Update queue head pointer. */ /* Update queue head pointer. */
sethi %hi(8192 - 1), %g4 lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
or %g4, %lo(8192 - 1), %g4
and %g2, %g4, %g2 and %g2, %g4, %g2
mov INTRQ_CPU_MONDO_HEAD, %g4 mov INTRQ_CPU_MONDO_HEAD, %g4
...@@ -71,12 +70,12 @@ sun4v_dev_mondo: ...@@ -71,12 +70,12 @@ sun4v_dev_mondo:
be,pn %xcc, sun4v_dev_mondo_queue_empty be,pn %xcc, sun4v_dev_mondo_queue_empty
nop nop
/* Get &trap_block[smp_processor_id()] into %g3. */ /* Get &trap_block[smp_processor_id()] into %g4. */
ldxa [%g0] ASI_SCRATCHPAD, %g3 ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get DEV mondo queue base phys address into %g5. */ /* Get DEV mondo queue base phys address into %g5. */
ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
/* Load IVEC into %g3. */ /* Load IVEC into %g3. */
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
...@@ -90,8 +89,7 @@ sun4v_dev_mondo: ...@@ -90,8 +89,7 @@ sun4v_dev_mondo:
*/ */
/* Update queue head pointer, this frees up some registers. */ /* Update queue head pointer, this frees up some registers. */
sethi %hi(8192 - 1), %g4 lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
or %g4, %lo(8192 - 1), %g4
and %g2, %g4, %g2 and %g2, %g4, %g2
mov INTRQ_DEVICE_MONDO_HEAD, %g4 mov INTRQ_DEVICE_MONDO_HEAD, %g4
...@@ -143,6 +141,8 @@ sun4v_res_mondo: ...@@ -143,6 +141,8 @@ sun4v_res_mondo:
brnz,pn %g1, sun4v_res_mondo_queue_full brnz,pn %g1, sun4v_res_mondo_queue_full
nop nop
lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
/* Remember this entry's offset in %g1. */ /* Remember this entry's offset in %g1. */
mov %g2, %g1 mov %g2, %g1
...@@ -173,8 +173,6 @@ sun4v_res_mondo: ...@@ -173,8 +173,6 @@ sun4v_res_mondo:
add %g2, 0x08, %g2 add %g2, 0x08, %g2
/* Update queue head pointer. */ /* Update queue head pointer. */
sethi %hi(8192 - 1), %g4
or %g4, %lo(8192 - 1), %g4
and %g2, %g4, %g2 and %g2, %g4, %g2
mov INTRQ_RESUM_MONDO_HEAD, %g4 mov INTRQ_RESUM_MONDO_HEAD, %g4
...@@ -254,6 +252,8 @@ sun4v_nonres_mondo: ...@@ -254,6 +252,8 @@ sun4v_nonres_mondo:
brnz,pn %g1, sun4v_nonres_mondo_queue_full brnz,pn %g1, sun4v_nonres_mondo_queue_full
nop nop
lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
/* Remember this entry's offset in %g1. */ /* Remember this entry's offset in %g1. */
mov %g2, %g1 mov %g2, %g1
...@@ -284,8 +284,6 @@ sun4v_nonres_mondo: ...@@ -284,8 +284,6 @@ sun4v_nonres_mondo:
add %g2, 0x08, %g2 add %g2, 0x08, %g2
/* Update queue head pointer. */ /* Update queue head pointer. */
sethi %hi(8192 - 1), %g4
or %g4, %lo(8192 - 1), %g4
and %g2, %g4, %g2 and %g2, %g4, %g2
mov INTRQ_NONRESUM_MONDO_HEAD, %g4 mov INTRQ_NONRESUM_MONDO_HEAD, %g4
......
...@@ -680,22 +680,14 @@ static int starfire_set_time(u32 val) ...@@ -680,22 +680,14 @@ static int starfire_set_time(u32 val)
static u32 hypervisor_get_time(void) static u32 hypervisor_get_time(void)
{ {
register unsigned long func asm("%o5"); unsigned long ret, time;
register unsigned long arg0 asm("%o0");
register unsigned long arg1 asm("%o1");
int retries = 10000; int retries = 10000;
retry: retry:
func = HV_FAST_TOD_GET; ret = sun4v_tod_get(&time);
arg0 = 0; if (ret == HV_EOK)
arg1 = 0; return time;
__asm__ __volatile__("ta %6" if (ret == HV_EWOULDBLOCK) {
: "=&r" (func), "=&r" (arg0), "=&r" (arg1)
: "0" (func), "1" (arg0), "2" (arg1),
"i" (HV_FAST_TRAP));
if (arg0 == HV_EOK)
return arg1;
if (arg0 == HV_EWOULDBLOCK) {
if (--retries > 0) { if (--retries > 0) {
udelay(100); udelay(100);
goto retry; goto retry;
...@@ -709,20 +701,14 @@ static u32 hypervisor_get_time(void) ...@@ -709,20 +701,14 @@ static u32 hypervisor_get_time(void)
static int hypervisor_set_time(u32 secs) static int hypervisor_set_time(u32 secs)
{ {
register unsigned long func asm("%o5"); unsigned long ret;
register unsigned long arg0 asm("%o0");
int retries = 10000; int retries = 10000;
retry: retry:
func = HV_FAST_TOD_SET; ret = sun4v_tod_set(secs);
arg0 = secs; if (ret == HV_EOK)
__asm__ __volatile__("ta %4"
: "=&r" (func), "=&r" (arg0)
: "0" (func), "1" (arg0),
"i" (HV_FAST_TRAP));
if (arg0 == HV_EOK)
return 0; return 0;
if (arg0 == HV_EWOULDBLOCK) { if (ret == HV_EWOULDBLOCK) {
if (--retries > 0) { if (--retries > 0) {
udelay(100); udelay(100);
goto retry; goto retry;
...@@ -862,7 +848,6 @@ fs_initcall(clock_init); ...@@ -862,7 +848,6 @@ fs_initcall(clock_init);
static unsigned long sparc64_init_timers(void) static unsigned long sparc64_init_timers(void)
{ {
struct device_node *dp; struct device_node *dp;
struct property *prop;
unsigned long clock; unsigned long clock;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void smp_tick_init(void); extern void smp_tick_init(void);
...@@ -879,17 +864,15 @@ static unsigned long sparc64_init_timers(void) ...@@ -879,17 +864,15 @@ static unsigned long sparc64_init_timers(void)
if (manuf == 0x17 && impl == 0x13) { if (manuf == 0x17 && impl == 0x13) {
/* Hummingbird, aka Ultra-IIe */ /* Hummingbird, aka Ultra-IIe */
tick_ops = &hbtick_operations; tick_ops = &hbtick_operations;
prop = of_find_property(dp, "stick-frequency", NULL); clock = of_getintprop_default(dp, "stick-frequency", 0);
} else { } else {
tick_ops = &tick_operations; tick_ops = &tick_operations;
cpu_find_by_instance(0, &dp, NULL); clock = local_cpu_data().clock_tick;
prop = of_find_property(dp, "clock-frequency", NULL);
} }
} else { } else {
tick_ops = &stick_operations; tick_ops = &stick_operations;
prop = of_find_property(dp, "stick-frequency", NULL); clock = of_getintprop_default(dp, "stick-frequency", 0);
} }
clock = *(unsigned int *) prop->value;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
smp_tick_init(); smp_tick_init();
...@@ -1365,6 +1348,7 @@ static int hypervisor_set_rtc_time(struct rtc_time *time) ...@@ -1365,6 +1348,7 @@ static int hypervisor_set_rtc_time(struct rtc_time *time)
return hypervisor_set_time(seconds); return hypervisor_set_time(seconds);
} }
#ifdef CONFIG_PCI
static void bq4802_get_rtc_time(struct rtc_time *time) static void bq4802_get_rtc_time(struct rtc_time *time)
{ {
unsigned char val = readb(bq4802_regs + 0x0e); unsigned char val = readb(bq4802_regs + 0x0e);
...@@ -1436,6 +1420,7 @@ static int bq4802_set_rtc_time(struct rtc_time *time) ...@@ -1436,6 +1420,7 @@ static int bq4802_set_rtc_time(struct rtc_time *time)
return 0; return 0;
} }
#endif /* CONFIG_PCI */
struct mini_rtc_ops { struct mini_rtc_ops {
void (*get_rtc_time)(struct rtc_time *); void (*get_rtc_time)(struct rtc_time *);
...@@ -1452,10 +1437,12 @@ static struct mini_rtc_ops hypervisor_rtc_ops = { ...@@ -1452,10 +1437,12 @@ static struct mini_rtc_ops hypervisor_rtc_ops = {
.set_rtc_time = hypervisor_set_rtc_time, .set_rtc_time = hypervisor_set_rtc_time,
}; };
#ifdef CONFIG_PCI
static struct mini_rtc_ops bq4802_rtc_ops = { static struct mini_rtc_ops bq4802_rtc_ops = {
.get_rtc_time = bq4802_get_rtc_time, .get_rtc_time = bq4802_get_rtc_time,
.set_rtc_time = bq4802_set_rtc_time, .set_rtc_time = bq4802_set_rtc_time,
}; };
#endif /* CONFIG_PCI */
static struct mini_rtc_ops *mini_rtc_ops; static struct mini_rtc_ops *mini_rtc_ops;
...@@ -1579,8 +1566,10 @@ static int __init rtc_mini_init(void) ...@@ -1579,8 +1566,10 @@ static int __init rtc_mini_init(void)
mini_rtc_ops = &hypervisor_rtc_ops; mini_rtc_ops = &hypervisor_rtc_ops;
else if (this_is_starfire) else if (this_is_starfire)
mini_rtc_ops = &starfire_rtc_ops; mini_rtc_ops = &starfire_rtc_ops;
#ifdef CONFIG_PCI
else if (bq4802_regs) else if (bq4802_regs)
mini_rtc_ops = &bq4802_rtc_ops; mini_rtc_ops = &bq4802_rtc_ops;
#endif /* CONFIG_PCI */
else else
return -ENODEV; return -ENODEV;
......
...@@ -795,8 +795,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector ...@@ -795,8 +795,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector
void __init cheetah_ecache_flush_init(void) void __init cheetah_ecache_flush_init(void)
{ {
unsigned long largest_size, smallest_linesize, order, ver; unsigned long largest_size, smallest_linesize, order, ver;
struct device_node *dp; int i, sz;
int i, instance, sz;
/* Scan all cpu device tree nodes, note two values: /* Scan all cpu device tree nodes, note two values:
* 1) largest E-cache size * 1) largest E-cache size
...@@ -805,18 +804,20 @@ void __init cheetah_ecache_flush_init(void) ...@@ -805,18 +804,20 @@ void __init cheetah_ecache_flush_init(void)
largest_size = 0UL; largest_size = 0UL;
smallest_linesize = ~0UL; smallest_linesize = ~0UL;
instance = 0; for (i = 0; i < NR_CPUS; i++) {
while (!cpu_find_by_instance(instance, &dp, NULL)) {
unsigned long val; unsigned long val;
val = of_getintprop_default(dp, "ecache-size", val = cpu_data(i).ecache_size;
(2 * 1024 * 1024)); if (!val)
continue;
if (val > largest_size) if (val > largest_size)
largest_size = val; largest_size = val;
val = of_getintprop_default(dp, "ecache-line-size", 64);
val = cpu_data(i).ecache_line_size;
if (val < smallest_linesize) if (val < smallest_linesize)
smallest_linesize = val; smallest_linesize = val;
instance++;
} }
if (largest_size == 0UL || smallest_linesize == ~0UL) { if (largest_size == 0UL || smallest_linesize == ~0UL) {
...@@ -2564,7 +2565,15 @@ void __init trap_init(void) ...@@ -2564,7 +2565,15 @@ void __init trap_init(void)
(TRAP_PER_CPU_TSB_HUGE_TEMP != (TRAP_PER_CPU_TSB_HUGE_TEMP !=
offsetof(struct trap_per_cpu, tsb_huge_temp)) || offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
(TRAP_PER_CPU_IRQ_WORKLIST != (TRAP_PER_CPU_IRQ_WORKLIST !=
offsetof(struct trap_per_cpu, irq_worklist))) offsetof(struct trap_per_cpu, irq_worklist)) ||
(TRAP_PER_CPU_CPU_MONDO_QMASK !=
offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
(TRAP_PER_CPU_DEV_MONDO_QMASK !=
offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
(TRAP_PER_CPU_RESUM_QMASK !=
offsetof(struct trap_per_cpu, resum_qmask)) ||
(TRAP_PER_CPU_NONRESUM_QMASK !=
offsetof(struct trap_per_cpu, nonresum_qmask)))
trap_per_cpu_offsets_are_bolixed_dave(); trap_per_cpu_offsets_are_bolixed_dave();
if ((TSB_CONFIG_TSB != if ((TSB_CONFIG_TSB !=
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/percpu.h>
#include <asm/head.h> #include <asm/head.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -43,8 +44,8 @@ ...@@ -43,8 +44,8 @@
#include <asm/tsb.h> #include <asm/tsb.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/sstate.h>
extern void device_scan(void); #include <asm/mdesc.h>
#define MAX_PHYS_ADDRESS (1UL << 42UL) #define MAX_PHYS_ADDRESS (1UL << 42UL)
#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
...@@ -60,8 +61,11 @@ unsigned long kern_linear_pte_xor[2] __read_mostly; ...@@ -60,8 +61,11 @@ unsigned long kern_linear_pte_xor[2] __read_mostly;
unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
#ifndef CONFIG_DEBUG_PAGEALLOC #ifndef CONFIG_DEBUG_PAGEALLOC
/* A special kernel TSB for 4MB and 256MB linear mappings. */ /* A special kernel TSB for 4MB and 256MB linear mappings.
struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; * Space is allocated for this right after the trap table
* in arch/sparc64/kernel/head.S
*/
extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
#endif #endif
#define MAX_BANKS 32 #define MAX_BANKS 32
...@@ -190,12 +194,9 @@ inline void flush_dcache_page_impl(struct page *page) ...@@ -190,12 +194,9 @@ inline void flush_dcache_page_impl(struct page *page)
} }
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_dirty PG_arch_1
#define PG_dcache_cpu_shift 24UL #define PG_dcache_cpu_shift 32UL
#define PG_dcache_cpu_mask (256UL - 1UL) #define PG_dcache_cpu_mask \
((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
#if NR_CPUS > 256
#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
#endif
#define dcache_dirty_cpu(page) \ #define dcache_dirty_cpu(page) \
(((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
...@@ -557,26 +558,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr, ...@@ -557,26 +558,11 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
unsigned long pte, unsigned long pte,
unsigned long mmu) unsigned long mmu)
{ {
register unsigned long func asm("%o5"); unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
register unsigned long arg0 asm("%o0");
register unsigned long arg1 asm("%o1"); if (ret != 0) {
register unsigned long arg2 asm("%o2");
register unsigned long arg3 asm("%o3");
func = HV_FAST_MMU_MAP_PERM_ADDR;
arg0 = vaddr;
arg1 = 0;
arg2 = pte;
arg3 = mmu;
__asm__ __volatile__("ta 0x80"
: "=&r" (func), "=&r" (arg0),
"=&r" (arg1), "=&r" (arg2),
"=&r" (arg3)
: "0" (func), "1" (arg0), "2" (arg1),
"3" (arg2), "4" (arg3));
if (arg0 != 0) {
prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
"errors with %lx\n", vaddr, 0, pte, mmu, arg0); "errors with %lx\n", vaddr, 0, pte, mmu, ret);
prom_halt(); prom_halt();
} }
} }
...@@ -1313,20 +1299,16 @@ static void __init sun4v_ktsb_init(void) ...@@ -1313,20 +1299,16 @@ static void __init sun4v_ktsb_init(void)
void __cpuinit sun4v_ktsb_register(void) void __cpuinit sun4v_ktsb_register(void)
{ {
register unsigned long func asm("%o5"); unsigned long pa, ret;
register unsigned long arg0 asm("%o0");
register unsigned long arg1 asm("%o1");
unsigned long pa;
pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE); pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
func = HV_FAST_MMU_TSB_CTX0; ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
arg0 = NUM_KTSB_DESCR; if (ret != 0) {
arg1 = pa; prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
__asm__ __volatile__("ta %6" "errors with %lx\n", pa, ret);
: "=&r" (func), "=&r" (arg0), "=&r" (arg1) prom_halt();
: "0" (func), "1" (arg0), "2" (arg1), }
"i" (HV_FAST_TRAP));
} }
/* paging_init() sets up the page tables */ /* paging_init() sets up the page tables */
...@@ -1334,6 +1316,9 @@ void __cpuinit sun4v_ktsb_register(void) ...@@ -1334,6 +1316,9 @@ void __cpuinit sun4v_ktsb_register(void)
extern void cheetah_ecache_flush_init(void); extern void cheetah_ecache_flush_init(void);
extern void sun4v_patch_tlb_handlers(void); extern void sun4v_patch_tlb_handlers(void);
extern void cpu_probe(void);
extern void central_probe(void);
static unsigned long last_valid_pfn; static unsigned long last_valid_pfn;
pgd_t swapper_pg_dir[2048]; pgd_t swapper_pg_dir[2048];
...@@ -1345,9 +1330,24 @@ void __init paging_init(void) ...@@ -1345,9 +1330,24 @@ void __init paging_init(void)
unsigned long end_pfn, pages_avail, shift, phys_base; unsigned long end_pfn, pages_avail, shift, phys_base;
unsigned long real_end, i; unsigned long real_end, i;
/* These build time checkes make sure that the dcache_dirty_cpu()
* page->flags usage will work.
*
* When a page gets marked as dcache-dirty, we store the
* cpu number starting at bit 32 in the page->flags. Also,
* functions like clear_dcache_dirty_cpu use the cpu mask
* in 13-bit signed-immediate instruction fields.
*/
BUILD_BUG_ON(FLAGS_RESERVED != 32);
BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
BUILD_BUG_ON(NR_CPUS > 4096);
kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
sstate_booting();
/* Invalidate both kernel TSBs. */ /* Invalidate both kernel TSBs. */
memset(swapper_tsb, 0x40, sizeof(swapper_tsb)); memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
#ifndef CONFIG_DEBUG_PAGEALLOC #ifndef CONFIG_DEBUG_PAGEALLOC
...@@ -1416,8 +1416,13 @@ void __init paging_init(void) ...@@ -1416,8 +1416,13 @@ void __init paging_init(void)
kernel_physical_mapping_init(); kernel_physical_mapping_init();
real_setup_per_cpu_areas();
prom_build_devicetree(); prom_build_devicetree();
if (tlb_type == hypervisor)
sun4v_mdesc_init();
{ {
unsigned long zones_size[MAX_NR_ZONES]; unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES];
...@@ -1434,7 +1439,10 @@ void __init paging_init(void) ...@@ -1434,7 +1439,10 @@ void __init paging_init(void)
zholes_size); zholes_size);
} }
device_scan(); prom_printf("Booting Linux...\n");
central_probe();
cpu_probe();
} }
static void __init taint_real_pages(void) static void __init taint_real_pages(void)
......
...@@ -15,6 +15,25 @@ ...@@ -15,6 +15,25 @@
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/system.h> #include <asm/system.h>
int prom_service_exists(const char *service_name)
{
int err = p1275_cmd("test", P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_INOUT(1, 1), service_name);
if (err)
return 0;
return 1;
}
void prom_sun4v_guest_soft_state(void)
{
const char *svc = "SUNW,soft-state-supported";
if (!prom_service_exists(svc))
return;
p1275_cmd(svc, P1275_INOUT(0, 0));
}
/* Reset and reboot the machine with the command 'bcommand'. */ /* Reset and reboot the machine with the command 'bcommand'. */
void prom_reboot(const char *bcommand) void prom_reboot(const char *bcommand)
{ {
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
# #
config DRM config DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && PCI depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
help help
Kernel-level support for the Direct Rendering Infrastructure (DRI) Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select introduced in XFree86 4.0. If you say Y here, you need to select
......
...@@ -1753,23 +1753,9 @@ config SUN3X_ESP ...@@ -1753,23 +1753,9 @@ config SUN3X_ESP
The ESP was an on-board SCSI controller used on Sun 3/80 The ESP was an on-board SCSI controller used on Sun 3/80
machines. Say Y here to compile in support for it. machines. Say Y here to compile in support for it.
config SCSI_ESP_CORE
tristate "ESP Scsi Driver Core"
depends on SCSI
select SCSI_SPI_ATTRS
help
This is a core driver for NCR53c9x based scsi chipsets,
also known as "ESP" for Emulex Scsi Processor or
Enhanced Scsi Processor. This driver does not exist by
itself, there are front-end drivers which, when enabled,
select and enable this driver. One example is SCSI_SUNESP.
These front-end drivers provide probing, DMA, and register
access support for the core driver.
config SCSI_SUNESP config SCSI_SUNESP
tristate "Sparc ESP Scsi Driver" tristate "Sparc ESP Scsi Driver"
depends on SBUS && SCSI depends on SBUS && SCSI
select SCSI_ESP_CORE
help help
This is the driver for the Sun ESP SCSI host adapter. The ESP This is the driver for the Sun ESP SCSI host adapter. The ESP
chipset is present in most SPARC SBUS-based computers. chipset is present in most SPARC SBUS-based computers.
......
...@@ -106,8 +106,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o ...@@ -106,8 +106,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/ obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o
obj-$(CONFIG_SCSI_GDTH) += gdth.o obj-$(CONFIG_SCSI_GDTH) += gdth.o
obj-$(CONFIG_SCSI_INITIO) += initio.o obj-$(CONFIG_SCSI_INITIO) += initio.o
obj-$(CONFIG_SCSI_INIA100) += a100u2w.o obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
...@@ -121,7 +120,7 @@ obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o ...@@ -121,7 +120,7 @@ obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_PPA) += ppa.o obj-$(CONFIG_SCSI_PPA) += ppa.o
obj-$(CONFIG_SCSI_IMM) += imm.o obj-$(CONFIG_SCSI_IMM) += imm.o
obj-$(CONFIG_JAZZ_ESP) += NCR53C9x.o jazz_esp.o obj-$(CONFIG_JAZZ_ESP) += esp_scsi.o jazz_esp.o
obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o obj-$(CONFIG_SUN3X_ESP) += NCR53C9x.o sun3x_esp.o
obj-$(CONFIG_SCSI_FCAL) += fcal.o obj-$(CONFIG_SCSI_FCAL) += fcal.o
obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
......
This diff is collapsed.
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* *
*/ */
#include <linux/completion.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -50,16 +51,10 @@ static struct ctrl_inquiry { ...@@ -50,16 +51,10 @@ static struct ctrl_inquiry {
} *fcs __initdata; } *fcs __initdata;
static int fcscount __initdata = 0; static int fcscount __initdata = 0;
static atomic_t fcss __initdata = ATOMIC_INIT(0); static atomic_t fcss __initdata = ATOMIC_INIT(0);
DECLARE_MUTEX_LOCKED(fc_sem); static DECLARE_COMPLETION(fc_detect_complete);
static int pluto_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd); static int pluto_encode_addr(Scsi_Cmnd *SCpnt, u16 *addr, fc_channel *fc, fcp_cmnd *fcmd);
static void __init pluto_detect_timeout(unsigned long data)
{
PLND(("Timeout\n"))
up(&fc_sem);
}
static void __init pluto_detect_done(Scsi_Cmnd *SCpnt) static void __init pluto_detect_done(Scsi_Cmnd *SCpnt)
{ {
/* Do nothing */ /* Do nothing */
...@@ -69,7 +64,7 @@ static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt) ...@@ -69,7 +64,7 @@ static void __init pluto_detect_scsi_done(Scsi_Cmnd *SCpnt)
{ {
PLND(("Detect done %08lx\n", (long)SCpnt)) PLND(("Detect done %08lx\n", (long)SCpnt))
if (atomic_dec_and_test (&fcss)) if (atomic_dec_and_test (&fcss))
up(&fc_sem); complete(&fc_detect_complete);
} }
int pluto_slave_configure(struct scsi_device *device) int pluto_slave_configure(struct scsi_device *device)
...@@ -96,7 +91,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt) ...@@ -96,7 +91,6 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
int i, retry, nplutos; int i, retry, nplutos;
fc_channel *fc; fc_channel *fc;
struct scsi_device dev; struct scsi_device dev;
DEFINE_TIMER(fc_timer, pluto_detect_timeout, 0, 0);
tpnt->proc_name = "pluto"; tpnt->proc_name = "pluto";
fcscount = 0; fcscount = 0;
...@@ -187,15 +181,11 @@ int __init pluto_detect(struct scsi_host_template *tpnt) ...@@ -187,15 +181,11 @@ int __init pluto_detect(struct scsi_host_template *tpnt)
} }
} }
fc_timer.expires = jiffies + 10 * HZ; wait_for_completion_timeout(&fc_detect_complete, 10 * HZ);
add_timer(&fc_timer);
down(&fc_sem);
PLND(("Woken up\n")) PLND(("Woken up\n"))
if (!atomic_read(&fcss)) if (!atomic_read(&fcss))
break; /* All fc channels have answered us */ break; /* All fc channels have answered us */
} }
del_timer_sync(&fc_timer);
PLND(("Finished search\n")) PLND(("Finished search\n"))
for (i = 0, nplutos = 0; i < fcscount; i++) { for (i = 0, nplutos = 0; i < fcscount; i++) {
......
...@@ -30,9 +30,9 @@ void ...@@ -30,9 +30,9 @@ void
sunserial_console_termios(struct console *con) sunserial_console_termios(struct console *con)
{ {
char mode[16], buf[16], *s; char mode[16], buf[16], *s;
char *mode_prop = "ttyX-mode"; char mode_prop[] = "ttyX-mode";
char *cd_prop = "ttyX-ignore-cd"; char cd_prop[] = "ttyX-ignore-cd";
char *dtr_prop = "ttyX-rts-dtr-off"; char dtr_prop[] = "ttyX-rts-dtr-off";
char *ssp_console_modes_prop = "ssp-console-modes"; char *ssp_console_modes_prop = "ssp-console-modes";
int baud, bits, stop, cflag; int baud, bits, stop, cflag;
char parity; char parity;
......
...@@ -1239,7 +1239,7 @@ static inline struct console *SUNZILOG_CONSOLE(void) ...@@ -1239,7 +1239,7 @@ static inline struct console *SUNZILOG_CONSOLE(void)
#define SUNZILOG_CONSOLE() (NULL) #define SUNZILOG_CONSOLE() (NULL)
#endif #endif
static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel) static void __devinit sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel)
{ {
int baud, brg; int baud, brg;
...@@ -1259,7 +1259,7 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe ...@@ -1259,7 +1259,7 @@ static void __init sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channe
} }
#ifdef CONFIG_SERIO #ifdef CONFIG_SERIO
static void __init sunzilog_register_serio(struct uart_sunzilog_port *up) static void __devinit sunzilog_register_serio(struct uart_sunzilog_port *up)
{ {
struct serio *serio = &up->serio; struct serio *serio = &up->serio;
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au) * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
* Copyright (C) 2007 Kyle McMartin (kyle@parisc-linux.org)
* *
* Additions by Keith M Wesolowski (wesolows@foobazco.org) based * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
* on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
...@@ -10,11 +11,48 @@ ...@@ -10,11 +11,48 @@
#ifndef __ARCH_SPARC_ATOMIC__ #ifndef __ARCH_SPARC_ATOMIC__
#define __ARCH_SPARC_ATOMIC__ #define __ARCH_SPARC_ATOMIC__
#include <linux/types.h>
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__ #ifdef __KERNEL__
/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
* of spinlocks to get a bit of performance...
*
* See arch/sparc/lib/atomic32.c for implementation.
*
* Cribbed from <asm-parisc/atomic.h>
*/
#define __HAVE_ARCH_CMPXCHG 1
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
/* we only need to support cmpxchg of a u32 on sparc */
extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
/* don't worry...optimizer will get rid of most of this */
static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch(size) {
case 4:
return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
default:
__cmpxchg_called_with_bad_pointer();
break;
}
return old;
}
#define cmpxchg(ptr,o,n) ({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
extern int __atomic_add_return(int, atomic_t *); extern int __atomic_add_return(int, atomic_t *);
......
/* $Id: bugs.h,v 1.1 1996/12/26 13:25:20 davem Exp $ /* bugs.h: Sparc64 probes for various bugs.
* include/asm-sparc64/bugs.h: Sparc probes for various bugs.
* *
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
*/ */
#include <asm/sstate.h>
extern unsigned long loops_per_jiffy; extern unsigned long loops_per_jiffy;
...@@ -12,4 +11,5 @@ static void __init check_bugs(void) ...@@ -12,4 +11,5 @@ static void __init check_bugs(void)
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
cpu_data(0).udelay_val = loops_per_jiffy; cpu_data(0).udelay_val = loops_per_jiffy;
#endif #endif
sstate_running();
} }
...@@ -17,11 +17,11 @@ ...@@ -17,11 +17,11 @@
typedef struct { typedef struct {
/* Dcache line 1 */ /* Dcache line 1 */
unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ unsigned int __softirq_pending; /* must be 1st, see rtrap.S */
unsigned int __pad0_1; unsigned int __pad0;
unsigned int __pad0_2;
unsigned int __pad1;
unsigned long clock_tick; /* %tick's per second */ unsigned long clock_tick; /* %tick's per second */
unsigned long udelay_val; unsigned long udelay_val;
unsigned int __pad1;
unsigned int __pad2;
/* Dcache line 2, rarely used */ /* Dcache line 2, rarely used */
unsigned int dcache_size; unsigned int dcache_size;
...@@ -30,8 +30,8 @@ typedef struct { ...@@ -30,8 +30,8 @@ typedef struct {
unsigned int icache_line_size; unsigned int icache_line_size;
unsigned int ecache_size; unsigned int ecache_size;
unsigned int ecache_line_size; unsigned int ecache_line_size;
int core_id;
unsigned int __pad3; unsigned int __pad3;
unsigned int __pad4;
} cpuinfo_sparc; } cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
...@@ -76,12 +76,18 @@ struct trap_per_cpu { ...@@ -76,12 +76,18 @@ struct trap_per_cpu {
/* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */
unsigned int irq_worklist; unsigned int irq_worklist;
unsigned int __pad1; unsigned int cpu_mondo_qmask;
unsigned long __pad2[3]; unsigned int dev_mondo_qmask;
unsigned int resum_qmask;
unsigned int nonresum_qmask;
unsigned int __pad2[3];
} __attribute__((aligned(64))); } __attribute__((aligned(64)));
extern struct trap_per_cpu trap_block[NR_CPUS]; extern struct trap_per_cpu trap_block[NR_CPUS];
extern void init_cur_cpu_trap(struct thread_info *); extern void init_cur_cpu_trap(struct thread_info *);
extern void setup_tba(void); extern void setup_tba(void);
extern int ncpus_probed;
extern unsigned long real_hard_smp_processor_id(void);
struct cpuid_patch_entry { struct cpuid_patch_entry {
unsigned int addr; unsigned int addr;
...@@ -122,6 +128,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ...@@ -122,6 +128,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
#define TRAP_PER_CPU_TSB_HUGE 0xd0 #define TRAP_PER_CPU_TSB_HUGE 0xd0
#define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8
#define TRAP_PER_CPU_IRQ_WORKLIST 0xe0 #define TRAP_PER_CPU_IRQ_WORKLIST 0xe0
#define TRAP_PER_CPU_CPU_MONDO_QMASK 0xe4
#define TRAP_PER_CPU_DEV_MONDO_QMASK 0xe8
#define TRAP_PER_CPU_RESUM_QMASK 0xec
#define TRAP_PER_CPU_NONRESUM_QMASK 0xf0
#define TRAP_BLOCK_SZ_SHIFT 8 #define TRAP_BLOCK_SZ_SHIFT 8
...@@ -192,7 +202,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, ...@@ -192,7 +202,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
* the calculations done by the macro mid-stream. * the calculations done by the macro mid-stream.
*/ */
#define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \ #define LOAD_PER_CPU_BASE(DEST, THR, REG1, REG2, REG3) \
ldub [THR + TI_CPU], REG1; \ lduh [THR + TI_CPU], REG1; \
sethi %hi(__per_cpu_shift), REG3; \ sethi %hi(__per_cpu_shift), REG3; \
sethi %hi(__per_cpu_base), REG2; \ sethi %hi(__per_cpu_base), REG2; \
ldx [REG3 + %lo(__per_cpu_shift)], REG3; \ ldx [REG3 + %lo(__per_cpu_shift)], REG3; \
......
This diff is collapsed.
...@@ -32,7 +32,6 @@ enum die_val { ...@@ -32,7 +32,6 @@ enum die_val {
DIE_TRAP, DIE_TRAP,
DIE_TRAP_TL1, DIE_TRAP_TL1,
DIE_CALL, DIE_CALL,
DIE_PAGE_FAULT,
}; };
#endif #endif
#ifndef _SPARC64_MDESC_H
#define _SPARC64_MDESC_H
#include <linux/types.h>
#include <asm/prom.h>
struct mdesc_node;
struct mdesc_arc {
const char *name;
struct mdesc_node *arc;
};
struct mdesc_node {
const char *name;
u64 node;
unsigned int unique_id;
unsigned int num_arcs;
struct property *properties;
struct mdesc_node *hash_next;
struct mdesc_node *allnodes_next;
struct mdesc_arc arcs[0];
};
extern struct mdesc_node *md_find_node_by_name(struct mdesc_node *from,
const char *name);
#define md_for_each_node_by_name(__mn, __name) \
for (__mn = md_find_node_by_name(NULL, __name); __mn; \
__mn = md_find_node_by_name(__mn, __name))
extern struct property *md_find_property(const struct mdesc_node *mp,
const char *name,
int *lenp);
extern const void *md_get_property(const struct mdesc_node *mp,
const char *name,
int *lenp);
extern void sun4v_mdesc_init(void);
#endif
...@@ -316,11 +316,8 @@ extern int prom_setprop(int node, const char *prop_name, char *prop_value, ...@@ -316,11 +316,8 @@ extern int prom_setprop(int node, const char *prop_name, char *prop_value,
extern int prom_pathtoinode(const char *path); extern int prom_pathtoinode(const char *path);
extern int prom_inst2pkg(int); extern int prom_inst2pkg(int);
extern int prom_service_exists(const char *service_name);
/* CPU probing helpers. */ extern void prom_sun4v_guest_soft_state(void);
struct device_node;
int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid);
int cpu_find_by_mid(int mid, struct device_node **prom_node);
/* Client interface level routines. */ /* Client interface level routines. */
extern void prom_set_trap_table(unsigned long tba); extern void prom_set_trap_table(unsigned long tba);
......
...@@ -5,7 +5,8 @@ ...@@ -5,7 +5,8 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void setup_per_cpu_areas(void); #define setup_per_cpu_areas() do { } while (0)
extern void real_setup_per_cpu_areas(void);
extern unsigned long __per_cpu_base; extern unsigned long __per_cpu_base;
extern unsigned long __per_cpu_shift; extern unsigned long __per_cpu_shift;
...@@ -34,6 +35,7 @@ do { \ ...@@ -34,6 +35,7 @@ do { \
} while (0) } while (0)
#else /* ! SMP */ #else /* ! SMP */
#define real_setup_per_cpu_areas() do { } while (0)
#define DEFINE_PER_CPU(type, name) \ #define DEFINE_PER_CPU(type, name) \
__typeof__(type) per_cpu__##name __typeof__(type) per_cpu__##name
......
...@@ -90,6 +90,7 @@ extern struct device_node *of_find_compatible_node(struct device_node *from, ...@@ -90,6 +90,7 @@ extern struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compat); const char *type, const char *compat);
extern struct device_node *of_find_node_by_path(const char *path); extern struct device_node *of_find_node_by_path(const char *path);
extern struct device_node *of_find_node_by_phandle(phandle handle); extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_find_node_by_cpuid(int cpuid);
extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_parent(const struct device_node *node);
extern struct device_node *of_get_next_child(const struct device_node *node, extern struct device_node *of_get_next_child(const struct device_node *node,
struct device_node *prev); struct device_node *prev);
......
...@@ -41,7 +41,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS]; ...@@ -41,7 +41,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS];
extern int hard_smp_processor_id(void); extern int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
extern void smp_setup_cpu_possible_map(void); extern void smp_fill_in_sib_core_maps(void);
extern unsigned char boot_cpu_id; extern unsigned char boot_cpu_id;
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
...@@ -49,7 +49,7 @@ extern unsigned char boot_cpu_id; ...@@ -49,7 +49,7 @@ extern unsigned char boot_cpu_id;
#else #else
#define hard_smp_processor_id() 0 #define hard_smp_processor_id() 0
#define smp_setup_cpu_possible_map() do { } while (0) #define smp_fill_in_sib_core_maps() do { } while (0)
#define boot_cpu_id (0) #define boot_cpu_id (0)
#endif /* !(CONFIG_SMP) */ #endif /* !(CONFIG_SMP) */
......
#ifndef _SPARC64_SSTATE_H
#define _SPARC64_SSTATE_H
extern void sstate_booting(void);
extern void sstate_running(void);
extern void sstate_halt(void);
extern void sstate_poweroff(void);
extern void sstate_panic(void);
extern void sstate_reboot(void);
extern void sun4v_sstate_init(void);
#endif /* _SPARC64_SSTATE_H */
...@@ -38,8 +38,8 @@ struct thread_info { ...@@ -38,8 +38,8 @@ struct thread_info {
/* D$ line 1 */ /* D$ line 1 */
struct task_struct *task; struct task_struct *task;
unsigned long flags; unsigned long flags;
__u8 cpu;
__u8 fpsaved[7]; __u8 fpsaved[7];
__u8 pad;
unsigned long ksp; unsigned long ksp;
/* D$ line 2 */ /* D$ line 2 */
...@@ -49,7 +49,7 @@ struct thread_info { ...@@ -49,7 +49,7 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, <0 => BUG */ int preempt_count; /* 0 => preemptable, <0 => BUG */
__u8 new_child; __u8 new_child;
__u8 syscall_noerror; __u8 syscall_noerror;
__u16 __pad; __u16 cpu;
unsigned long *utraps; unsigned long *utraps;
...@@ -83,8 +83,7 @@ struct thread_info { ...@@ -83,8 +83,7 @@ struct thread_info {
#define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS) #define TI_CURRENT_DS (TI_FLAGS + TI_FLAG_BYTE_CURRENT_DS)
#define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH) #define TI_FPDEPTH (TI_FLAGS + TI_FLAG_BYTE_FPDEPTH)
#define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED) #define TI_WSAVED (TI_FLAGS + TI_FLAG_BYTE_WSAVED)
#define TI_CPU 0x00000010 #define TI_FPSAVED 0x00000010
#define TI_FPSAVED 0x00000011
#define TI_KSP 0x00000018 #define TI_KSP 0x00000018
#define TI_FAULT_ADDR 0x00000020 #define TI_FAULT_ADDR 0x00000020
#define TI_KREGS 0x00000028 #define TI_KREGS 0x00000028
...@@ -92,6 +91,7 @@ struct thread_info { ...@@ -92,6 +91,7 @@ struct thread_info {
#define TI_PRE_COUNT 0x00000038 #define TI_PRE_COUNT 0x00000038
#define TI_NEW_CHILD 0x0000003c #define TI_NEW_CHILD 0x0000003c
#define TI_SYS_NOERROR 0x0000003d #define TI_SYS_NOERROR 0x0000003d
#define TI_CPU 0x0000003e
#define TI_UTRAPS 0x00000040 #define TI_UTRAPS 0x00000040
#define TI_REG_WINDOW 0x00000048 #define TI_REG_WINDOW 0x00000048
#define TI_RWIN_SPTRS 0x000003c8 #define TI_RWIN_SPTRS 0x000003c8
......
...@@ -6,4 +6,7 @@ ...@@ -6,4 +6,7 @@
#include <asm-generic/topology.h> #include <asm-generic/topology.h>
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#endif /* _ASM_SPARC64_TOPOLOGY_H */ #endif /* _ASM_SPARC64_TOPOLOGY_H */
...@@ -271,7 +271,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -271,7 +271,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \ #define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
sethi %hi(swapper_4m_tsb), REG1; \ sethi %hi(swapper_4m_tsb), REG1; \
or REG1, %lo(swapper_4m_tsb), REG1; \ or REG1, %lo(swapper_4m_tsb), REG1; \
and TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \ and TAG, (KERNEL_TSB4M_NENTRIES - 1), REG2; \
sllx REG2, 4, REG2; \ sllx REG2, 4, REG2; \
add REG1, REG2, REG2; \ add REG1, REG2, REG2; \
KTSB_LOAD_QUAD(REG2, REG3); \ KTSB_LOAD_QUAD(REG2, REG3); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment