Commit 292a6c58 authored by Paul Mackerras's avatar Paul Mackerras

Merge branch 'for-paulus' of git://kernel/home/michael/src/work/

parents 8ad200d7 dc3a9efb
...@@ -12,9 +12,6 @@ ...@@ -12,9 +12,6 @@
# Rewritten by Cort Dougan and Paul Mackerras # Rewritten by Cort Dougan and Paul Mackerras
# #
# This must match PAGE_OFFSET in include/asm-powerpc/page.h.
KERNELLOAD := $(CONFIG_KERNEL_START)
HAS_BIARCH := $(call cc-option-yn, -m32) HAS_BIARCH := $(call cc-option-yn, -m32)
ifeq ($(CONFIG_PPC64),y) ifeq ($(CONFIG_PPC64),y)
...@@ -59,7 +56,7 @@ override LD += -m elf$(SZ)ppc ...@@ -59,7 +56,7 @@ override LD += -m elf$(SZ)ppc
override CC += -m$(SZ) override CC += -m$(SZ)
endif endif
LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD) LDFLAGS_vmlinux := -Bstatic
# The -Iarch/$(ARCH)/include is temporary while we are merging # The -Iarch/$(ARCH)/include is temporary while we are merging
CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -1914,24 +1914,6 @@ _GLOBAL(hmt_start_secondary) ...@@ -1914,24 +1914,6 @@ _GLOBAL(hmt_start_secondary)
blr blr
#endif #endif
#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
_GLOBAL(smp_release_cpus)
/* All secondary cpus are spinning on a common
* spinloop, release them all now so they can start
* to spin on their individual paca spinloops.
* For non SMP kernels, the secondary cpus never
* get out of the common spinloop.
* XXX This does nothing useful on iSeries, secondaries are
* already waiting on their paca.
*/
li r3,1
LOADADDR(r5,__secondary_hold_spinloop)
std r3,0(r5)
sync
blr
#endif /* CONFIG_SMP */
/* /*
* We put a few things here that have to be page-aligned. * We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the bss, which is page-aligned. * This stuff goes at the beginning of the bss, which is page-aligned.
......
...@@ -48,8 +48,8 @@ ...@@ -48,8 +48,8 @@
#include <asm/prom.h> #include <asm/prom.h>
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/plpar_wrappers.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/machdep.h>
#endif #endif
extern unsigned long _get_SP(void); extern unsigned long _get_SP(void);
...@@ -201,27 +201,15 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs) ...@@ -201,27 +201,15 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
} }
#endif /* CONFIG_SPE */ #endif /* CONFIG_SPE */
static void set_dabr_spr(unsigned long val)
{
mtspr(SPRN_DABR, val);
}
int set_dabr(unsigned long dabr) int set_dabr(unsigned long dabr)
{ {
int ret = 0;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
if (firmware_has_feature(FW_FEATURE_XDABR)) { if (ppc_md.set_dabr)
/* We want to catch accesses from kernel and userspace */ return ppc_md.set_dabr(dabr);
unsigned long flags = H_DABRX_KERNEL|H_DABRX_USER;
ret = plpar_set_xdabr(dabr, flags);
} else if (firmware_has_feature(FW_FEATURE_DABR)) {
ret = plpar_set_dabr(dabr);
} else
#endif #endif
set_dabr_spr(dabr);
return ret; mtspr(SPRN_DABR, dabr);
return 0;
} }
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
......
...@@ -1285,7 +1285,7 @@ static int __init early_init_dt_scan_memory(unsigned long node, ...@@ -1285,7 +1285,7 @@ static int __init early_init_dt_scan_memory(unsigned long node,
endp = reg + (l / sizeof(cell_t)); endp = reg + (l / sizeof(cell_t));
DBG("memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...\n", DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n",
uname, l, reg[0], reg[1], reg[2], reg[3]); uname, l, reg[0], reg[1], reg[2], reg[3]);
while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
......
...@@ -103,8 +103,6 @@ extern void htab_initialize(void); ...@@ -103,8 +103,6 @@ extern void htab_initialize(void);
extern void early_init_devtree(void *flat_dt); extern void early_init_devtree(void *flat_dt);
extern void unflatten_device_tree(void); extern void unflatten_device_tree(void);
extern void smp_release_cpus(void);
int have_of = 1; int have_of = 1;
int boot_cpuid = 0; int boot_cpuid = 0;
int boot_cpuid_phys = 0; int boot_cpuid_phys = 0;
...@@ -400,6 +398,27 @@ void __init early_setup(unsigned long dt_ptr) ...@@ -400,6 +398,27 @@ void __init early_setup(unsigned long dt_ptr)
} }
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
void smp_release_cpus(void)
{
extern unsigned long __secondary_hold_spinloop;
DBG(" -> smp_release_cpus()\n");
/* All secondary cpus are spinning on a common spinloop, release them
* all now so they can start to spin on their individual paca
* spinloops. For non SMP kernels, the secondary cpus never get out
* of the common spinloop.
* This is useless but harmless on iSeries, secondaries are already
* waiting on their paca spinloops. */
__secondary_hold_spinloop = 1;
mb();
DBG(" <- smp_release_cpus()\n");
}
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/* /*
* Initialize some remaining members of the ppc64_caches and systemcfg structures * Initialize some remaining members of the ppc64_caches and systemcfg structures
* (at least until we get rid of them completely). This is mostly some * (at least until we get rid of them completely). This is mostly some
......
#include <linux/config.h> #include <linux/config.h>
#ifdef CONFIG_PPC64
#include <asm/page.h> #include <asm/page.h>
#else
#define PAGE_SIZE 4096
#endif
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
ENTRY(_stext)
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
OUTPUT_ARCH(powerpc:common64) OUTPUT_ARCH(powerpc:common64)
jiffies = jiffies_64; jiffies = jiffies_64;
...@@ -21,6 +19,7 @@ SECTIONS ...@@ -21,6 +19,7 @@ SECTIONS
*(.exit.data) *(.exit.data)
} }
. = KERNELBASE;
/* Read-only sections, merged into text segment: */ /* Read-only sections, merged into text segment: */
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
......
...@@ -42,13 +42,14 @@ ...@@ -42,13 +42,14 @@
#include <asm/pci-bridge.h> #include <asm/pci-bridge.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/abs_addr.h> #include <asm/abs_addr.h>
#include <asm/plpar_wrappers.h>
#include <asm/pSeries_reconfig.h> #include <asm/pSeries_reconfig.h>
#include <asm/systemcfg.h> #include <asm/systemcfg.h>
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/tce.h> #include <asm/tce.h>
#include <asm/ppc-pci.h> #include <asm/ppc-pci.h>
#include "plpar_wrappers.h"
#define DBG(fmt...) #define DBG(fmt...)
extern int is_python(struct device_node *); extern int is_python(struct device_node *);
......
...@@ -38,7 +38,8 @@ ...@@ -38,7 +38,8 @@
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/abs_addr.h> #include <asm/abs_addr.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/plpar_wrappers.h>
#include "plpar_wrappers.h"
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
...@@ -260,22 +261,18 @@ int find_udbg_vterm(void) ...@@ -260,22 +261,18 @@ int find_udbg_vterm(void)
void vpa_init(int cpu) void vpa_init(int cpu)
{ {
int hwcpu = get_hard_smp_processor_id(cpu); int hwcpu = get_hard_smp_processor_id(cpu);
unsigned long vpa = (unsigned long)&(paca[cpu].lppaca); unsigned long vpa = __pa(&paca[cpu].lppaca);
long ret; long ret;
unsigned long flags;
/* Register the Virtual Processor Area (VPA) */
flags = 1UL << (63 - 18);
if (cpu_has_feature(CPU_FTR_ALTIVEC)) if (cpu_has_feature(CPU_FTR_ALTIVEC))
paca[cpu].lppaca.vmxregs_in_use = 1; paca[cpu].lppaca.vmxregs_in_use = 1;
ret = register_vpa(flags, hwcpu, __pa(vpa)); ret = register_vpa(hwcpu, vpa);
if (ret) if (ret)
printk(KERN_ERR "WARNING: vpa_init: VPA registration for " printk(KERN_ERR "WARNING: vpa_init: VPA registration for "
"cpu %d (hw %d) of area %lx returns %ld\n", "cpu %d (hw %d) of area %lx returns %ld\n",
cpu, hwcpu, __pa(vpa), ret); cpu, hwcpu, vpa, ret);
} }
long pSeries_lpar_hpte_insert(unsigned long hpte_group, long pSeries_lpar_hpte_insert(unsigned long hpte_group,
......
#ifndef _PPC64_PLPAR_WRAPPERS_H #ifndef _PSERIES_PLPAR_WRAPPERS_H
#define _PPC64_PLPAR_WRAPPERS_H #define _PSERIES_PLPAR_WRAPPERS_H
#include <asm/hvcall.h> #include <asm/hvcall.h>
static inline long poll_pending(void) static inline long poll_pending(void)
{ {
unsigned long dummy; unsigned long dummy;
return plpar_hcall(H_POLL_PENDING, 0, 0, 0, 0, return plpar_hcall(H_POLL_PENDING, 0, 0, 0, 0, &dummy, &dummy, &dummy);
&dummy, &dummy, &dummy);
} }
static inline long prod_processor(void) static inline long prod_processor(void)
{ {
plpar_hcall_norets(H_PROD); plpar_hcall_norets(H_PROD);
return(0); return 0;
} }
static inline long cede_processor(void) static inline long cede_processor(void)
{ {
plpar_hcall_norets(H_CEDE); plpar_hcall_norets(H_CEDE);
return(0); return 0;
} }
static inline long register_vpa(unsigned long flags, unsigned long proc, static inline long vpa_call(unsigned long flags, unsigned long cpu,
unsigned long vpa) unsigned long vpa)
{ {
return plpar_hcall_norets(H_REGISTER_VPA, flags, proc, vpa); /* flags are in bits 16-18 (counting from most significant bit) */
flags = flags << (63 - 18);
return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
}
static inline long unregister_vpa(unsigned long cpu, unsigned long vpa)
{
return vpa_call(0x5, cpu, vpa);
}
static inline long register_vpa(unsigned long cpu, unsigned long vpa)
{
return vpa_call(0x1, cpu, vpa);
} }
void vpa_init(int cpu); extern void vpa_init(int cpu);
static inline long plpar_pte_remove(unsigned long flags, static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
unsigned long ptex, unsigned long avpn, unsigned long *old_pteh_ret,
unsigned long avpn, unsigned long *old_ptel_ret)
unsigned long *old_pteh_ret,
unsigned long *old_ptel_ret)
{ {
unsigned long dummy; unsigned long dummy;
return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0, return plpar_hcall(H_REMOVE, flags, ptex, avpn, 0, old_pteh_ret,
old_pteh_ret, old_ptel_ret, &dummy); old_ptel_ret, &dummy);
} }
static inline long plpar_pte_read(unsigned long flags, static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
unsigned long ptex, unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
{ {
unsigned long dummy; unsigned long dummy;
return plpar_hcall(H_READ, flags, ptex, 0, 0, return plpar_hcall(H_READ, flags, ptex, 0, 0, old_pteh_ret,
old_pteh_ret, old_ptel_ret, &dummy); old_ptel_ret, &dummy);
} }
static inline long plpar_pte_protect(unsigned long flags, static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
unsigned long ptex, unsigned long avpn)
unsigned long avpn)
{ {
return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn); return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
} }
static inline long plpar_tce_get(unsigned long liobn, static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
unsigned long ioba, unsigned long *tce_ret)
unsigned long *tce_ret)
{ {
unsigned long dummy; unsigned long dummy;
return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0, return plpar_hcall(H_GET_TCE, liobn, ioba, 0, 0, tce_ret, &dummy,
tce_ret, &dummy, &dummy); &dummy);
} }
static inline long plpar_tce_put(unsigned long liobn, static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
unsigned long ioba, unsigned long tceval)
unsigned long tceval)
{ {
return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval); return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
} }
static inline long plpar_tce_put_indirect(unsigned long liobn, static inline long plpar_tce_put_indirect(unsigned long liobn,
unsigned long ioba, unsigned long ioba, unsigned long page, unsigned long count)
unsigned long page,
unsigned long count)
{ {
return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count); return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
} }
static inline long plpar_tce_stuff(unsigned long liobn, static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
unsigned long ioba, unsigned long tceval, unsigned long count)
unsigned long tceval,
unsigned long count)
{ {
return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count); return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
} }
static inline long plpar_get_term_char(unsigned long termno, static inline long plpar_get_term_char(unsigned long termno,
unsigned long *len_ret, unsigned long *len_ret, char *buf_ret)
char *buf_ret)
{ {
unsigned long *lbuf = (unsigned long *)buf_ret; /* ToDo: alignment? */ unsigned long *lbuf = (unsigned long *)buf_ret; /* TODO: alignment? */
return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0, return plpar_hcall(H_GET_TERM_CHAR, termno, 0, 0, 0, len_ret,
len_ret, lbuf+0, lbuf+1); lbuf + 0, lbuf + 1);
} }
static inline long plpar_put_term_char(unsigned long termno, static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
unsigned long len, const char *buffer)
const char *buffer)
{ {
unsigned long *lbuf = (unsigned long *)buffer; /* ToDo: alignment? */ unsigned long *lbuf = (unsigned long *)buffer; /* TODO: alignment? */
return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0], return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
lbuf[1]); lbuf[1]);
} }
static inline long plpar_set_xdabr(unsigned long address, unsigned long flags) static inline long plpar_set_xdabr(unsigned long address, unsigned long flags)
...@@ -117,4 +117,4 @@ static inline long plpar_set_dabr(unsigned long val) ...@@ -117,4 +117,4 @@ static inline long plpar_set_dabr(unsigned long val)
return plpar_hcall_norets(H_SET_DABR, val); return plpar_hcall_norets(H_SET_DABR, val);
} }
#endif /* _PPC64_PLPAR_WRAPPERS_H */ #endif /* _PSERIES_PLPAR_WRAPPERS_H */
...@@ -58,7 +58,6 @@ ...@@ -58,7 +58,6 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/nvram.h> #include <asm/nvram.h>
#include <asm/plpar_wrappers.h>
#include "xics.h" #include "xics.h"
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/pmc.h> #include <asm/pmc.h>
...@@ -67,6 +66,8 @@ ...@@ -67,6 +66,8 @@
#include <asm/i8259.h> #include <asm/i8259.h>
#include <asm/udbg.h> #include <asm/udbg.h>
#include "plpar_wrappers.h"
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
#else #else
...@@ -350,6 +351,16 @@ static void pSeries_mach_cpu_die(void) ...@@ -350,6 +351,16 @@ static void pSeries_mach_cpu_die(void)
for(;;); for(;;);
} }
static int pseries_set_dabr(unsigned long dabr)
{
if (firmware_has_feature(FW_FEATURE_XDABR)) {
/* We want to catch accesses from kernel and userspace */
return plpar_set_xdabr(dabr, H_DABRX_KERNEL | H_DABRX_USER);
}
return plpar_set_dabr(dabr);
}
/* /*
* Early initialization. Relocation is on but do not reference unbolted pages * Early initialization. Relocation is on but do not reference unbolted pages
...@@ -385,6 +396,8 @@ static void __init pSeries_init_early(void) ...@@ -385,6 +396,8 @@ static void __init pSeries_init_early(void)
DBG("Hello World !\n"); DBG("Hello World !\n");
} }
if (firmware_has_feature(FW_FEATURE_XDABR | FW_FEATURE_DABR))
ppc_md.set_dabr = pseries_set_dabr;
iommu_init_early_pSeries(); iommu_init_early_pSeries();
......
...@@ -44,10 +44,11 @@ ...@@ -44,10 +44,11 @@
#include <asm/firmware.h> #include <asm/firmware.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/rtas.h> #include <asm/rtas.h>
#include <asm/plpar_wrappers.h>
#include <asm/pSeries_reconfig.h> #include <asm/pSeries_reconfig.h>
#include <asm/mpic.h> #include <asm/mpic.h>
#include "plpar_wrappers.h"
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
#else #else
......
...@@ -32,7 +32,7 @@ const extern unsigned int relocate_new_kernel_size; ...@@ -32,7 +32,7 @@ const extern unsigned int relocate_new_kernel_size;
* Provide a dummy crash_notes definition while crash dump arrives to ppc. * Provide a dummy crash_notes definition while crash dump arrives to ppc.
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c. * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
*/ */
void *crash_notes = NULL; note_buf_t crash_notes[NR_CPUS];
void machine_shutdown(void) void machine_shutdown(void)
{ {
......
...@@ -1914,24 +1914,6 @@ _GLOBAL(hmt_start_secondary) ...@@ -1914,24 +1914,6 @@ _GLOBAL(hmt_start_secondary)
blr blr
#endif #endif
#if defined(CONFIG_KEXEC) || defined(CONFIG_SMP)
_GLOBAL(smp_release_cpus)
/* All secondary cpus are spinning on a common
* spinloop, release them all now so they can start
* to spin on their individual paca spinloops.
* For non SMP kernels, the secondary cpus never
* get out of the common spinloop.
* XXX This does nothing useful on iSeries, secondaries are
* already waiting on their paca.
*/
li r3,1
LOADADDR(r5,__secondary_hold_spinloop)
std r3,0(r5)
sync
blr
#endif /* CONFIG_SMP */
/* /*
* We put a few things here that have to be page-aligned. * We put a few things here that have to be page-aligned.
* This stuff goes at the beginning of the bss, which is page-aligned. * This stuff goes at the beginning of the bss, which is page-aligned.
......
...@@ -244,7 +244,6 @@ static void kexec_prepare_cpus(void) ...@@ -244,7 +244,6 @@ static void kexec_prepare_cpus(void)
static void kexec_prepare_cpus(void) static void kexec_prepare_cpus(void)
{ {
extern void smp_release_cpus(void);
/* /*
* move the secondarys to us so that we can copy * move the secondarys to us so that we can copy
* the new kernel 0-0x100 safely * the new kernel 0-0x100 safely
......
...@@ -178,18 +178,22 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32]; ...@@ -178,18 +178,22 @@ typedef elf_vrreg_t elf_vrregset_t32[ELF_NVRREG32];
static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs, static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
struct pt_regs *regs) struct pt_regs *regs)
{ {
int i; int i, nregs;
int gprs = sizeof(struct pt_regs)/sizeof(ELF_GREG_TYPE);
if (gprs > ELF_NGREG) memset((void *)elf_regs, 0, sizeof(elf_gregset_t));
gprs = ELF_NGREG;
for (i=0; i < gprs; i++) /* Our registers are always unsigned longs, whether we're a 32 bit
elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i]; * process or 64 bit, on either a 64 bit or 32 bit kernel.
* Don't use ELF_GREG_TYPE here. */
memset((char *)(elf_regs) + sizeof(struct pt_regs), 0, \ nregs = sizeof(struct pt_regs) / sizeof(unsigned long);
sizeof(elf_gregset_t) - sizeof(struct pt_regs)); if (nregs > ELF_NGREG)
nregs = ELF_NGREG;
for (i = 0; i < nregs; i++) {
/* This will correctly truncate 64 bit registers to 32 bits
* for a 32 bit process on a 64 bit kernel. */
elf_regs[i] = (elf_greg_t)((ELF_GREG_TYPE *)regs)[i];
}
} }
#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); #define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
......
...@@ -80,6 +80,7 @@ struct machdep_calls { ...@@ -80,6 +80,7 @@ struct machdep_calls {
void (*iommu_dev_setup)(struct pci_dev *dev); void (*iommu_dev_setup)(struct pci_dev *dev);
void (*iommu_bus_setup)(struct pci_bus *bus); void (*iommu_bus_setup)(struct pci_bus *bus);
void (*irq_bus_setup)(struct pci_bus *bus); void (*irq_bus_setup)(struct pci_bus *bus);
int (*set_dabr)(unsigned long dabr);
#endif #endif
int (*probe)(int platform); int (*probe)(int platform);
......
This diff is collapsed.
This diff is collapsed.
...@@ -79,20 +79,13 @@ extern int smt_enabled_at_boot; ...@@ -79,20 +79,13 @@ extern int smt_enabled_at_boot;
extern int smp_mpic_probe(void); extern int smp_mpic_probe(void);
extern void smp_mpic_setup_cpu(int cpu); extern void smp_mpic_setup_cpu(int cpu);
extern void smp_generic_kick_cpu(int nr); extern void smp_generic_kick_cpu(int nr);
extern void smp_release_cpus(void);
extern void smp_generic_give_timebase(void); extern void smp_generic_give_timebase(void);
extern void smp_generic_take_timebase(void); extern void smp_generic_take_timebase(void);
extern struct smp_ops_t *smp_ops; extern struct smp_ops_t *smp_ops;
#ifdef CONFIG_PPC_PSERIES
void vpa_init(int cpu);
#else
static inline void vpa_init(int cpu)
{
}
#endif /* CONFIG_PPC_PSERIES */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* !(_PPC64_SMP_H) */ #endif /* !(_PPC64_SMP_H) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment