Commit 5e2bc458 authored by James Simmons's avatar James Simmons

Merge kozmo.(none):/usr/src/linus-2.5

into kozmo.(none):/usr/src/fbdev-2.5
parents fa7f5bbf 6c39ac1f
...@@ -48,25 +48,35 @@ cflags-$(CONFIG_MCYRIXIII) += $(call check_gcc,-falign-functions=0 -falign-jumps ...@@ -48,25 +48,35 @@ cflags-$(CONFIG_MCYRIXIII) += $(call check_gcc,-falign-functions=0 -falign-jumps
CFLAGS += $(cflags-y) CFLAGS += $(cflags-y)
ifdef CONFIG_VISWS #default subarch .c files
MACHINE := mach-visws mcore-y := mach-default
else
MACHINE := mach-generic #VISWS subarch support
endif mflags-$(CONFIG_VISWS) := -Iinclude/asm-i386/mach-visws
mcore-$(CONFIG_VISWS) := mach-visws
#NUMAQ subarch support
mflags-$(CONFIG_X86_NUMAQ) := -Iinclude/asm-i386/mach-numaq
mcore-$(CONFIG_X86_NUMAQ) := mach-default
#add other subarch support here
#default subarch .h files
mflags-y += -Iinclude/asm-i386/mach-default
HEAD := arch/i386/kernel/head.o arch/i386/kernel/init_task.o HEAD := arch/i386/kernel/head.o arch/i386/kernel/init_task.o
libs-y += arch/i386/lib/ libs-y += arch/i386/lib/
core-y += arch/i386/kernel/ \ core-y += arch/i386/kernel/ \
arch/i386/mm/ \ arch/i386/mm/ \
arch/i386/$(MACHINE)/ arch/i386/$(mcore-y)/
drivers-$(CONFIG_MATH_EMULATION) += arch/i386/math-emu/ drivers-$(CONFIG_MATH_EMULATION) += arch/i386/math-emu/
drivers-$(CONFIG_PCI) += arch/i386/pci/ drivers-$(CONFIG_PCI) += arch/i386/pci/
# FIXME: is drivers- right ? # FIXME: is drivers- right ?
drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/ drivers-$(CONFIG_OPROFILE) += arch/i386/oprofile/
CFLAGS += -Iarch/i386/$(MACHINE) CFLAGS += $(mflags-y)
AFLAGS += -Iarch/i386/$(MACHINE) AFLAGS += $(mflags-y)
makeboot =$(Q)$(MAKE) -f scripts/Makefile.build obj=arch/i386/boot $(1) makeboot =$(Q)$(MAKE) -f scripts/Makefile.build obj=arch/i386/boot $(1)
......
...@@ -31,7 +31,8 @@ ...@@ -31,7 +31,8 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
#include "mach_apic.h"
#include <mach_apic.h>
void __init apic_intr_init(void) void __init apic_intr_init(void)
{ {
...@@ -310,11 +311,9 @@ void __init setup_local_APIC (void) ...@@ -310,11 +311,9 @@ void __init setup_local_APIC (void)
__error_in_apic_c(); __error_in_apic_c();
/* /*
* Double-check wether this APIC is really registered. * Double-check whether this APIC is really registered.
* This is meaningless in clustered apic mode, so we skip it.
*/ */
if (!clustered_apic_mode && if (!apic_id_registered())
!test_bit(GET_APIC_ID(apic_read(APIC_ID)), &phys_cpu_present_map))
BUG(); BUG();
/* /*
...@@ -322,21 +321,7 @@ void __init setup_local_APIC (void) ...@@ -322,21 +321,7 @@ void __init setup_local_APIC (void)
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes... * document number 292116). So here it goes...
*/ */
init_apic_ldr();
if (!clustered_apic_mode) {
/*
* In clustered apic mode, the firmware does this for us
* Put the APIC into flat delivery mode.
* Must be "all ones" explicitly for 82489DX.
*/
apic_write_around(APIC_DFR, APIC_DFR_VALUE);
/*
* Set up the logical destination ID.
*/
value = apic_read(APIC_LDR);
apic_write_around(APIC_LDR, calculate_ldr(value));
}
/* /*
* Set Task Priority to 'accept all'. We never change this * Set Task Priority to 'accept all'. We never change this
......
...@@ -18,6 +18,8 @@ struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; ...@@ -18,6 +18,8 @@ struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
extern void mcheck_init(struct cpuinfo_x86 *c); extern void mcheck_init(struct cpuinfo_x86 *c);
extern int disable_pse;
static void default_init(struct cpuinfo_x86 * c) static void default_init(struct cpuinfo_x86 * c)
{ {
/* Not much we can do here... */ /* Not much we can do here... */
...@@ -305,12 +307,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -305,12 +307,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
else else
generic_identify(c); generic_identify(c);
printk(KERN_DEBUG "CPU: Before vendor init, caps: %08lx %08lx %08lx, vendor = %d\n",
c->x86_capability[0],
c->x86_capability[1],
c->x86_capability[2],
c->x86_vendor);
/* /*
* Vendor-specific initialization. In this section we * Vendor-specific initialization. In this section we
* canonicalize the feature flags, meaning if there are * canonicalize the feature flags, meaning if there are
...@@ -327,12 +323,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -327,12 +323,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
/* Disable the PN if appropriate */ /* Disable the PN if appropriate */
squash_the_stupid_serial_number(c); squash_the_stupid_serial_number(c);
printk(KERN_DEBUG "CPU: After vendor init, caps: %08lx %08lx %08lx %08lx\n",
c->x86_capability[0],
c->x86_capability[1],
c->x86_capability[2],
c->x86_capability[3]);
/* /*
* The vendor-specific functions might have changed features. Now * The vendor-specific functions might have changed features. Now
* we do "generic changes." * we do "generic changes."
...@@ -348,6 +338,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -348,6 +338,9 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
clear_bit(X86_FEATURE_XMM, c->x86_capability); clear_bit(X86_FEATURE_XMM, c->x86_capability);
} }
if (disable_pse)
clear_bit(X86_FEATURE_PSE, c->x86_capability);
/* If the model name is still unset, do table lookup. */ /* If the model name is still unset, do table lookup. */
if ( !c->x86_model_id[0] ) { if ( !c->x86_model_id[0] ) {
char *p; char *p;
...@@ -380,12 +373,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c) ...@@ -380,12 +373,6 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
boot_cpu_data.x86_capability[i] &= c->x86_capability[i]; boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
} }
printk(KERN_DEBUG "CPU: Common caps: %08lx %08lx %08lx %08lx\n",
boot_cpu_data.x86_capability[0],
boot_cpu_data.x86_capability[1],
boot_cpu_data.x86_capability[2],
boot_cpu_data.x86_capability[3]);
/* Init Machine Check Exception if available. */ /* Init Machine Check Exception if available. */
#ifdef CONFIG_X86_MCE #ifdef CONFIG_X86_MCE
mcheck_init(c); mcheck_init(c);
......
...@@ -23,10 +23,10 @@ ...@@ -23,10 +23,10 @@
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */ #define NEW_CL_POINTER 0x228 /* Relative to real mode data */
/* /*
* References to members of the boot_cpu_data structure. * References to members of the new_cpu_data structure.
*/ */
#define CPU_PARAMS boot_cpu_data #define CPU_PARAMS new_cpu_data
#define X86 CPU_PARAMS+0 #define X86 CPU_PARAMS+0
#define X86_VENDOR CPU_PARAMS+1 #define X86_VENDOR CPU_PARAMS+1
#define X86_MODEL CPU_PARAMS+2 #define X86_MODEL CPU_PARAMS+2
......
...@@ -35,7 +35,8 @@ ...@@ -35,7 +35,8 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/desc.h> #include <asm/desc.h>
#include "mach_apic.h"
#include <mach_apic.h>
#undef APIC_LOCKUP_DEBUG #undef APIC_LOCKUP_DEBUG
...@@ -261,7 +262,7 @@ static inline void balance_irq(int irq) ...@@ -261,7 +262,7 @@ static inline void balance_irq(int irq)
irq_balance_t *entry = irq_balance + irq; irq_balance_t *entry = irq_balance + irq;
unsigned long now = jiffies; unsigned long now = jiffies;
if (clustered_apic_mode) if (no_balance_irq)
return; return;
if (unlikely(time_after(now, entry->timestamp + IRQ_BALANCE_INTERVAL))) { if (unlikely(time_after(now, entry->timestamp + IRQ_BALANCE_INTERVAL))) {
...@@ -739,7 +740,6 @@ void __init setup_IO_APIC_irqs(void) ...@@ -739,7 +740,6 @@ void __init setup_IO_APIC_irqs(void)
if (irq_trigger(idx)) { if (irq_trigger(idx)) {
entry.trigger = 1; entry.trigger = 1;
entry.mask = 1; entry.mask = 1;
entry.dest.logical.logical_dest = TARGET_CPUS;
} }
irq = pin_2_irq(idx, apic, pin); irq = pin_2_irq(idx, apic, pin);
...@@ -747,7 +747,7 @@ void __init setup_IO_APIC_irqs(void) ...@@ -747,7 +747,7 @@ void __init setup_IO_APIC_irqs(void)
* skip adding the timer int on secondary nodes, which causes * skip adding the timer int on secondary nodes, which causes
* a small but painful rift in the time-space continuum * a small but painful rift in the time-space continuum
*/ */
if (clustered_apic_mode && (apic != 0) && (irq == 0)) if (multi_timer_check(apic, irq))
continue; continue;
else else
add_pin_to_irq(irq, apic, pin); add_pin_to_irq(irq, apic, pin);
...@@ -1135,7 +1135,7 @@ void disable_IO_APIC(void) ...@@ -1135,7 +1135,7 @@ void disable_IO_APIC(void)
static void __init setup_ioapic_ids_from_mpc (void) static void __init setup_ioapic_ids_from_mpc (void)
{ {
struct IO_APIC_reg_00 reg_00; struct IO_APIC_reg_00 reg_00;
unsigned long phys_id_present_map = phys_cpu_present_map; unsigned long phys_id_present_map;
int apic; int apic;
int i; int i;
unsigned char old_id; unsigned char old_id;
...@@ -1145,9 +1145,8 @@ static void __init setup_ioapic_ids_from_mpc (void) ...@@ -1145,9 +1145,8 @@ static void __init setup_ioapic_ids_from_mpc (void)
/* This gets done during IOAPIC enumeration for ACPI. */ /* This gets done during IOAPIC enumeration for ACPI. */
return; return;
if (clustered_apic_mode) phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
/* We don't have a good way to do this yet - hack */
phys_id_present_map = (u_long) 0xf;
/* /*
* Set the IOAPIC ID to the value stored in the MPC table. * Set the IOAPIC ID to the value stored in the MPC table.
*/ */
......
...@@ -30,7 +30,9 @@ ...@@ -30,7 +30,9 @@
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/io_apic.h> #include <asm/io_apic.h>
#include "mach_apic.h"
#include <mach_apic.h>
#include <mach_mpparse.h>
/* Have we found an MP table */ /* Have we found an MP table */
int smp_found_config; int smp_found_config;
...@@ -103,28 +105,12 @@ static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdat ...@@ -103,28 +105,12 @@ static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdat
void __init MP_processor_info (struct mpc_config_processor *m) void __init MP_processor_info (struct mpc_config_processor *m)
{ {
int ver, quad, logical_apicid; int ver, apicid;
if (!(m->mpc_cpuflag & CPU_ENABLED)) if (!(m->mpc_cpuflag & CPU_ENABLED))
return; return;
logical_apicid = m->mpc_apicid; apicid = mpc_apic_id(m, translation_table[mpc_record]->trans_quad);
if (clustered_apic_mode) {
quad = translation_table[mpc_record]->trans_quad;
logical_apicid = (quad << 4) +
(m->mpc_apicid ? m->mpc_apicid << 1 : 1);
printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n",
m->mpc_apicid,
(m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
(m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
m->mpc_apicver, quad, logical_apicid);
} else {
printk("Processor #%d %ld:%ld APIC version %d\n",
m->mpc_apicid,
(m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
(m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
m->mpc_apicver);
}
if (m->mpc_featureflag&(1<<0)) if (m->mpc_featureflag&(1<<0))
Dprintk(" Floating point unit present.\n"); Dprintk(" Floating point unit present.\n");
...@@ -177,7 +163,7 @@ void __init MP_processor_info (struct mpc_config_processor *m) ...@@ -177,7 +163,7 @@ void __init MP_processor_info (struct mpc_config_processor *m)
if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
Dprintk(" Bootup CPU\n"); Dprintk(" Bootup CPU\n");
boot_cpu_physical_apicid = m->mpc_apicid; boot_cpu_physical_apicid = m->mpc_apicid;
boot_cpu_logical_apicid = logical_apicid; boot_cpu_logical_apicid = apicid;
} }
num_processors++; num_processors++;
...@@ -190,11 +176,8 @@ void __init MP_processor_info (struct mpc_config_processor *m) ...@@ -190,11 +176,8 @@ void __init MP_processor_info (struct mpc_config_processor *m)
} }
ver = m->mpc_apicver; ver = m->mpc_apicver;
if (clustered_apic_mode) { phys_cpu_present_map |= apicid_to_cpu_present(apicid);
phys_cpu_present_map |= (logical_apicid&0xf) << (4*quad);
} else {
phys_cpu_present_map |= apicid_to_cpu_present(m->mpc_apicid);
}
/* /*
* Validate version * Validate version
*/ */
...@@ -209,28 +192,18 @@ void __init MP_processor_info (struct mpc_config_processor *m) ...@@ -209,28 +192,18 @@ void __init MP_processor_info (struct mpc_config_processor *m)
static void __init MP_bus_info (struct mpc_config_bus *m) static void __init MP_bus_info (struct mpc_config_bus *m)
{ {
char str[7]; char str[7];
int quad;
memcpy(str, m->mpc_bustype, 6); memcpy(str, m->mpc_bustype, 6);
str[6] = 0; str[6] = 0;
if (clustered_apic_mode) { mpc_oem_bus_info(m, str, translation_table[mpc_record]);
quad = translation_table[mpc_record]->trans_quad;
mp_bus_id_to_node[m->mpc_busid] = quad;
mp_bus_id_to_local[m->mpc_busid] = translation_table[mpc_record]->trans_local;
printk("Bus #%d is %s (node %d)\n", m->mpc_busid, str, quad);
} else {
Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
}
if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
} else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
} else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
if (clustered_apic_mode){ mpc_oem_pci_bus(m, translation_table[mpc_record]);
quad_local_to_mp_bus_id[quad][translation_table[mpc_record]->trans_local] = m->mpc_busid;
}
mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
mp_current_pci_id++; mp_current_pci_id++;
...@@ -318,6 +291,7 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \ ...@@ -318,6 +291,7 @@ static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
int count = sizeof (*oemtable); /* the header size */ int count = sizeof (*oemtable); /* the header size */
unsigned char *oemptr = ((unsigned char *)oemtable)+count; unsigned char *oemptr = ((unsigned char *)oemtable)+count;
mpc_record = 0;
printk("Found an OEM MPC table at %8p - parsing it ... \n", oemtable); printk("Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4)) if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
{ {
...@@ -394,7 +368,7 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) ...@@ -394,7 +368,7 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
str[12]=0; str[12]=0;
printk("Product ID: %s ",str); printk("Product ID: %s ",str);
summit_check(oem, str); mps_oem_check(mpc, oem, str);
printk("APIC at: 0x%lX\n",mpc->mpc_lapic); printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
...@@ -405,16 +379,10 @@ static int __init smp_read_mpc(struct mp_config_table *mpc) ...@@ -405,16 +379,10 @@ static int __init smp_read_mpc(struct mp_config_table *mpc)
if (!acpi_lapic) if (!acpi_lapic)
mp_lapic_addr = mpc->mpc_lapic; mp_lapic_addr = mpc->mpc_lapic;
if (clustered_apic_mode && mpc->mpc_oemptr) {
/* We need to process the oem mpc tables to tell us which quad things are in ... */
mpc_record = 0;
smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, mpc->mpc_oemsize);
mpc_record = 0;
}
/* /*
* Now process the configuration blocks. * Now process the configuration blocks.
*/ */
mpc_record = 0;
while (count < mpc->mpc_length) { while (count < mpc->mpc_length) {
switch(*mpt) { switch(*mpt) {
case MP_PROCESSOR: case MP_PROCESSOR:
......
...@@ -41,6 +41,8 @@ ...@@ -41,6 +41,8 @@
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
#include "setup_arch_pre.h" #include "setup_arch_pre.h"
int disable_pse __initdata = 0;
static inline char * __init machine_specific_memory_setup(void); static inline char * __init machine_specific_memory_setup(void);
/* /*
...@@ -48,6 +50,9 @@ static inline char * __init machine_specific_memory_setup(void); ...@@ -48,6 +50,9 @@ static inline char * __init machine_specific_memory_setup(void);
*/ */
char ignore_irq13; /* set if exception 16 works */ char ignore_irq13; /* set if exception 16 works */
/* cpu data as detected by the assembly code in head.S */
struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
/* common cpu data for all cpus */
struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 }; struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
unsigned long mmu_cr4_features; unsigned long mmu_cr4_features;
...@@ -523,6 +528,7 @@ static void __init parse_cmdline_early (char ** cmdline_p) ...@@ -523,6 +528,7 @@ static void __init parse_cmdline_early (char ** cmdline_p)
if (!memcmp(from+4, "nopentium", 9)) { if (!memcmp(from+4, "nopentium", 9)) {
from += 9+4; from += 9+4;
clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability); clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
disable_pse = 1;
} else if (!memcmp(from+4, "exactmap", 8)) { } else if (!memcmp(from+4, "exactmap", 8)) {
from += 8+4; from += 8+4;
e820.nr_map = 0; e820.nr_map = 0;
...@@ -837,6 +843,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -837,6 +843,7 @@ void __init setup_arch(char **cmdline_p)
{ {
unsigned long max_low_pfn; unsigned long max_low_pfn;
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
pre_setup_arch_hook(); pre_setup_arch_hook();
early_cpu_init(); early_cpu_init();
......
...@@ -609,6 +609,11 @@ __attribute__((regparm(3))) ...@@ -609,6 +609,11 @@ __attribute__((regparm(3)))
void do_notify_resume(struct pt_regs *regs, sigset_t *oldset, void do_notify_resume(struct pt_regs *regs, sigset_t *oldset,
__u32 thread_info_flags) __u32 thread_info_flags)
{ {
/* Pending single-step? */
if (thread_info_flags & _TIF_SINGLESTEP) {
regs->eflags |= TF_MASK;
clear_thread_flag(TIF_SINGLESTEP);
}
/* deal with pending signal delivery */ /* deal with pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING) if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs,oldset); do_signal(regs,oldset);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/smpboot.h> #include <asm/smpboot.h>
#include <mach_ipi.h>
/* /*
* Some notes on x86 processor bugs affecting SMP operation: * Some notes on x86 processor bugs affecting SMP operation:
...@@ -227,54 +228,6 @@ static inline void send_IPI_mask_sequence(int mask, int vector) ...@@ -227,54 +228,6 @@ static inline void send_IPI_mask_sequence(int mask, int vector)
local_irq_restore(flags); local_irq_restore(flags);
} }
static inline void send_IPI_mask(int mask, int vector)
{
if (clustered_apic_mode)
send_IPI_mask_sequence(mask, vector);
else
send_IPI_mask_bitmask(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
/*
* if there are no other CPUs in the system then
* we get an APIC send error if we try to broadcast.
* thus we have to avoid sending IPIs in this case.
*/
if (!(num_online_cpus() > 1))
return;
if (clustered_apic_mode) {
// Pointless. Use send_IPI_mask to do this instead
int cpu;
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
if (cpu_online(cpu) && cpu != smp_processor_id())
send_IPI_mask(1 << cpu, vector);
}
} else {
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
return;
}
}
static inline void send_IPI_all(int vector)
{
if (clustered_apic_mode) {
// Pointless. Use send_IPI_mask to do this instead
int cpu;
for (cpu = 0; cpu < NR_CPUS; ++cpu) {
if (!cpu_online(cpu))
continue;
send_IPI_mask(1 << cpu, vector);
}
} else {
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
}
}
/* /*
* Smarter SMP flushing macros. * Smarter SMP flushing macros.
* c/o Linus Torvalds. * c/o Linus Torvalds.
......
...@@ -51,7 +51,8 @@ ...@@ -51,7 +51,8 @@
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/arch_hooks.h> #include <asm/arch_hooks.h>
#include "smpboot_hooks.h" #include "smpboot_hooks.h"
#include "mach_apic.h"
#include <mach_apic.h>
/* Set if we find a B stepping CPU */ /* Set if we find a B stepping CPU */
static int __initdata smp_b_stepping; static int __initdata smp_b_stepping;
...@@ -848,11 +849,7 @@ static void __init do_boot_cpu (int apicid) ...@@ -848,11 +849,7 @@ static void __init do_boot_cpu (int apicid)
/* /*
* Starting actual IPI sequence... * Starting actual IPI sequence...
*/ */
wakeup_secondary_cpu(apicid, start_eip);
if (clustered_apic_mode)
boot_error = wakeup_secondary_via_NMI(apicid);
else
boot_error = wakeup_secondary_via_INIT(apicid, start_eip);
if (!boot_error) { if (!boot_error) {
/* /*
...@@ -1060,15 +1057,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus) ...@@ -1060,15 +1057,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid) if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_physical_apicid)
BUG(); BUG();
if (clustered_apic_mode && (numnodes > 1)) { setup_portio_remap();
printk("Remapping cross-quad port I/O for %d quads\n",
numnodes);
xquad_portio = ioremap (XQUAD_PORTIO_BASE,
numnodes * XQUAD_PORTIO_QUAD);
printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
(u_long) xquad_portio,
(u_long) numnodes * XQUAD_PORTIO_QUAD);
}
/* /*
* Scan the CPU present map and fire up the other CPUs via do_boot_cpu * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
......
...@@ -54,19 +54,18 @@ static int __init sysenter_setup(void) ...@@ -54,19 +54,18 @@ static int __init sysenter_setup(void)
0xc3 /* ret */ 0xc3 /* ret */
}; };
static const char sysent[] = { static const char sysent[] = {
0x9c, /* pushf */
0x51, /* push %ecx */ 0x51, /* push %ecx */
0x52, /* push %edx */ 0x52, /* push %edx */
0x55, /* push %ebp */ 0x55, /* push %ebp */
0x89, 0xe5, /* movl %esp,%ebp */ 0x89, 0xe5, /* movl %esp,%ebp */
0x0f, 0x34, /* sysenter */ 0x0f, 0x34, /* sysenter */
0x00, /* align return point */
/* System call restart point is here! (SYSENTER_RETURN - 2) */ /* System call restart point is here! (SYSENTER_RETURN - 2) */
0xeb, 0xfa, /* jmp to "movl %esp,%ebp" */ 0xeb, 0xfa, /* jmp to "movl %esp,%ebp" */
/* System call normal return point is here! (SYSENTER_RETURN in entry.S) */ /* System call normal return point is here! (SYSENTER_RETURN in entry.S) */
0x5d, /* pop %ebp */ 0x5d, /* pop %ebp */
0x5a, /* pop %edx */ 0x5a, /* pop %edx */
0x59, /* pop %ecx */ 0x59, /* pop %ecx */
0x9d, /* popf - restore TF */
0xc3 /* ret */ 0xc3 /* ret */
}; };
unsigned long page = get_zeroed_page(GFP_ATOMIC); unsigned long page = get_zeroed_page(GFP_ATOMIC);
......
...@@ -605,7 +605,7 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code) ...@@ -605,7 +605,7 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code)
* interface. * interface.
*/ */
if ((regs->xcs & 3) == 0) if ((regs->xcs & 3) == 0)
goto clear_TF; goto clear_TF_reenable;
if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE) if ((tsk->ptrace & (PT_DTRACE|PT_PTRACED)) == PT_DTRACE)
goto clear_TF; goto clear_TF;
} }
...@@ -637,6 +637,8 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code) ...@@ -637,6 +637,8 @@ asmlinkage void do_debug(struct pt_regs * regs, long error_code)
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1); handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
return; return;
clear_TF_reenable:
set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
clear_TF: clear_TF:
regs->eflags &= ~TF_MASK; regs->eflags &= ~TF_MASK;
return; return;
......
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
static inline unsigned long calculate_ldr(unsigned long old)
{
unsigned long id;
id = 1UL << smp_processor_id();
return ((old & ~APIC_LDR_MASK) | SET_APIC_LOGICAL_ID(id));
}
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#ifdef CONFIG_SMP
#define TARGET_CPUS (clustered_apic_mode ? 0xf : cpu_online_map)
#else
#define TARGET_CPUS 0x01
#endif
#define APIC_BROADCAST_ID 0x0F
#define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid))
static inline void summit_check(char *oem, char *productid)
{
}
static inline void clustered_apic_check(void)
{
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
(clustered_apic_mode ? "NUMA-Q" : "Flat"), nr_ioapics);
}
static inline int cpu_present_to_apicid(int mps_cpu)
{
if (clustered_apic_mode)
return ( ((mps_cpu/4)*16) + (1<<(mps_cpu%4)) );
else
return mps_cpu;
}
static inline unsigned long apicid_to_cpu_present(int apicid)
{
return (1ul << apicid);
}
#endif /* __ASM_MACH_APIC_H */
...@@ -16,7 +16,12 @@ ...@@ -16,7 +16,12 @@
static inline pte_t *lookup_address(unsigned long address) static inline pte_t *lookup_address(unsigned long address)
{ {
pgd_t *pgd = pgd_offset_k(address); pgd_t *pgd = pgd_offset_k(address);
pmd_t *pmd = pmd_offset(pgd, address); pmd_t *pmd;
if (pgd_none(*pgd))
return NULL;
pmd = pmd_offset(pgd, address);
if (pmd_none(*pmd))
return NULL;
if (pmd_large(*pmd)) if (pmd_large(*pmd))
return (pte_t *)pmd; return (pte_t *)pmd;
return pte_offset_kernel(pmd, address); return pte_offset_kernel(pmd, address);
...@@ -95,12 +100,13 @@ __change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage) ...@@ -95,12 +100,13 @@ __change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage)
address = (unsigned long)page_address(page); address = (unsigned long)page_address(page);
kpte = lookup_address(address); kpte = lookup_address(address);
if (!kpte)
return -EINVAL;
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
if ((pte_val(*kpte) & _PAGE_PSE) == 0) { if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
pte_t old = *kpte; pte_t old = *kpte;
pte_t standard = mk_pte(page, PAGE_KERNEL); pte_t standard = mk_pte(page, PAGE_KERNEL);
set_pte_atomic(kpte, mk_pte(page, prot)); set_pte_atomic(kpte, mk_pte(page, prot));
if (pte_same(old,standard)) if (pte_same(old,standard))
atomic_inc(&kpte_page->count); atomic_inc(&kpte_page->count);
...@@ -108,6 +114,7 @@ __change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage) ...@@ -108,6 +114,7 @@ __change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage)
struct page *split = split_large_page(address, prot); struct page *split = split_large_page(address, prot);
if (!split) if (!split)
return -ENOMEM; return -ENOMEM;
atomic_inc(&kpte_page->count);
set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL)); set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
} }
} else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
......
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#ifdef CONFIG_SMP
#define TARGET_CPUS (cpu_online_map)
#else
#define TARGET_CPUS 0x01
#endif
#define no_balance_irq (0)
#define APIC_BROADCAST_ID 0x0F
#define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid))
static inline int apic_id_registered(void)
{
return (test_bit(GET_APIC_ID(apic_read(APIC_ID)),
&phys_cpu_present_map));
}
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
static inline void init_apic_ldr(void)
{
unsigned long val;
apic_write_around(APIC_DFR, APIC_DFR_VALUE);
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
apic_write_around(APIC_LDR, val);
}
static inline ulong ioapic_phys_id_map(ulong phys_map)
{
return phys_map;
}
static inline void clustered_apic_check(void)
{
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"Flat", nr_ioapics);
}
static inline int multi_timer_check(int apic, int irq)
{
return 0;
}
static inline int cpu_present_to_apicid(int mps_cpu)
{
return mps_cpu;
}
static inline unsigned long apicid_to_cpu_present(int phys_apicid)
{
return (1ul << phys_apicid);
}
static inline int mpc_apic_id(struct mpc_config_processor *m, int quad)
{
printk("Processor #%d %ld:%ld APIC version %d\n",
m->mpc_apicid,
(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
m->mpc_apicver);
return (m->mpc_apicid);
}
#define wakeup_secondary_cpu(apicid, start_eip) \
wakeup_secondary_via_INIT(apicid, start_eip)
static inline void setup_portio_remap(void)
{
}
#endif /* __ASM_MACH_APIC_H */
#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
static inline void send_IPI_mask_bitmask(int mask, int vector);
static inline void __send_IPI_shortcut(unsigned int shortcut, int vector);
static inline void send_IPI_mask(int mask, int vector)
{
send_IPI_mask_bitmask(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
/*
* if there are no other CPUs in the system then we get an APIC send
* error if we try to broadcast, thus avoid sending IPIs in this case.
*/
if (!(num_online_cpus() > 1))
return;
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
return;
}
static inline void send_IPI_all(int vector)
{
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
}
#endif /* __ASM_MACH_IPI_H */
#ifndef __ASM_MACH_MPPARSE_H
#define __ASM_MACH_MPPARSE_H
static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
struct mpc_config_translation *translation)
{
Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
}
static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
struct mpc_config_translation *translation)
{
}
static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
}
#endif /* __ASM_MACH_MPPARSE_H */
#ifndef __ASM_MACH_APIC_H
#define __ASM_MACH_APIC_H
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
#define TARGET_CPUS (0xf)
#define no_balance_irq (1)
#define APIC_BROADCAST_ID 0x0F
#define check_apicid_used(bitmap, apicid) (bitmap & (1 << apicid))
static inline int apic_id_registered(void)
{
return (1);
}
static inline void init_apic_ldr(void)
{
/* Already done in NUMA-Q firmware */
}
static inline void clustered_apic_check(void)
{
printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
"NUMA-Q", nr_ioapics);
}
static inline int multi_timer_check(int apic, int irq)
{
return (apic != 0 && irq == 0);
}
static inline ulong ioapic_phys_id_map(ulong phys_map)
{
/* We don't have a good way to do this yet - hack */
return 0xf;
}
static inline int cpu_present_to_apicid(int mps_cpu)
{
return ( ((mps_cpu/4)*16) + (1<<(mps_cpu%4)) );
}
static inline int generate_logical_apicid(int quad, int phys_apicid)
{
return ( (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1) );
}
static inline int apicid_to_quad(int logical_apicid)
{
return (logical_apicid >> 4);
}
static inline unsigned long apicid_to_cpu_present(int logical_apicid)
{
return ( (logical_apicid&0xf) << (4*apicid_to_quad(logical_apicid)) );
}
static inline int mpc_apic_id(struct mpc_config_processor *m, int quad)
{
int logical_apicid = generate_logical_apicid(quad, m->mpc_apicid);
printk("Processor #%d %ld:%ld APIC version %d (quad %d, apic %d)\n",
m->mpc_apicid,
(m->mpc_cpufeature & CPU_FAMILY_MASK) >> 8,
(m->mpc_cpufeature & CPU_MODEL_MASK) >> 4,
m->mpc_apicver, quad, logical_apicid);
return logical_apicid;
}
#define wakeup_secondary_cpu(apicid, start_eip) \
wakeup_secondary_via_NMI(apicid)
static inline void setup_portio_remap(void)
{
if (numnodes <= 1)
return;
printk("Remapping cross-quad port I/O for %d quads\n", numnodes);
xquad_portio = ioremap (XQUAD_PORTIO_BASE, numnodes*XQUAD_PORTIO_QUAD);
printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
(u_long) xquad_portio, (u_long) numnodes*XQUAD_PORTIO_QUAD);
}
#endif /* __ASM_MACH_APIC_H */
#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
static inline void send_IPI_mask_sequence(int mask, int vector);
static inline void send_IPI_mask(int mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
unsigned long mask = cpu_online_map & ~(1 << smp_processor_id());
if (mask)
send_IPI_mask(mask, vector);
}
static inline void send_IPI_all(int vector)
{
send_IPI_mask(cpu_online_map, vector);
}
#endif /* __ASM_MACH_IPI_H */
#ifndef __ASM_MACH_MPPARSE_H
#define __ASM_MACH_MPPARSE_H
static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable,
unsigned short oemsize);
static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
struct mpc_config_translation *translation)
{
int quad = translation->trans_quad;
int local = translation->trans_local;
mp_bus_id_to_node[m->mpc_busid] = quad;
mp_bus_id_to_local[m->mpc_busid] = local;
printk("Bus #%d is %s (node %d)\n", m->mpc_busid, name, quad);
}
static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
struct mpc_config_translation *translation)
{
int quad = translation->trans_quad;
int local = translation->trans_local;
quad_local_to_mp_bus_id[quad][local] = m->mpc_busid;
}
static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
if (strncmp(oem, "IBM NUMA", 8))
printk("Warning! May not be a NUMA-Q system!\n");
if (mpc->mpc_oemptr)
smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
mpc->mpc_oemsize);
}
#endif /* __ASM_MACH_MPPARSE_H */
...@@ -26,12 +26,6 @@ static inline unsigned long calculate_ldr(unsigned long old) ...@@ -26,12 +26,6 @@ static inline unsigned long calculate_ldr(unsigned long old)
#define APIC_BROADCAST_ID (x86_summit ? 0xFF : 0x0F) #define APIC_BROADCAST_ID (x86_summit ? 0xFF : 0x0F)
#define check_apicid_used(bitmap, apicid) (0) #define check_apicid_used(bitmap, apicid) (0)
static inline void summit_check(char *oem, char *productid)
{
if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(str, "VIGIL SMP", 9))
x86_summit = 1;
}
static inline void clustered_apic_check(void) static inline void clustered_apic_check(void)
{ {
printk("Enabling APIC mode: %s. Using %d I/O APICs\n", printk("Enabling APIC mode: %s. Using %d I/O APICs\n",
...@@ -46,6 +40,12 @@ static inline int cpu_present_to_apicid(int mps_cpu) ...@@ -46,6 +40,12 @@ static inline int cpu_present_to_apicid(int mps_cpu)
return mps_cpu; return mps_cpu;
} }
static inline ulong ioapic_phys_id_map(ulong phys_map)
{
/* For clustered we don't have a good way to do this yet - hack */
return (x86_summit ? 0x0F : phys_map);
}
static inline unsigned long apicid_to_phys_cpu_present(int apicid) static inline unsigned long apicid_to_phys_cpu_present(int apicid)
{ {
if (x86_summit) if (x86_summit)
...@@ -54,4 +54,11 @@ static inline unsigned long apicid_to_phys_cpu_present(int apicid) ...@@ -54,4 +54,11 @@ static inline unsigned long apicid_to_phys_cpu_present(int apicid)
return (1ul << apicid); return (1ul << apicid);
} }
#define wakeup_secondary_cpu(apicid, start_eip) \
wakeup_secondary_via_INIT(apicid, start_eip)
static inline void setup_portio_remap(void)
{
}
#endif /* __ASM_MACH_APIC_H */ #endif /* __ASM_MACH_APIC_H */
#ifndef __ASM_MACH_IPI_H
#define __ASM_MACH_IPI_H
static inline void send_IPI_mask_sequence(int mask, int vector);
static inline void send_IPI_mask(int mask, int vector)
{
send_IPI_mask_sequence(mask, vector);
}
static inline void send_IPI_allbutself(int vector)
{
unsigned long mask = cpu_online_map & ~(1 << smp_processor_id());
if (mask)
send_IPI_mask(mask, vector);
}
static inline void send_IPI_all(int vector)
{
send_IPI_mask(cpu_online_map, vector);
}
#endif /* __ASM_MACH_IPI_H */
#ifndef __ASM_MACH_MPPARSE_H
#define __ASM_MACH_MPPARSE_H
static inline void mpc_oem_bus_info(struct mpc_config_bus *m, char *name,
struct mpc_config_translation *translation)
{
Dprintk("Bus #%d is %s\n", m->mpc_busid, name);
}
static inline void mpc_oem_pci_bus(struct mpc_config_bus *m,
struct mpc_config_translation *translation)
{
}
static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
char *productid)
{
if (!strncmp(oem, "IBM ENSW", 8) && !strncmp(str, "VIGIL SMP", 9))
x86_summit = 1;
}
#endif /* __ASM_MACH_MPPARSE_H */
...@@ -78,6 +78,7 @@ struct cpuinfo_x86 { ...@@ -78,6 +78,7 @@ struct cpuinfo_x86 {
*/ */
extern struct cpuinfo_x86 boot_cpu_data; extern struct cpuinfo_x86 boot_cpu_data;
extern struct cpuinfo_x86 new_cpu_data;
extern struct tss_struct init_tss[NR_CPUS]; extern struct tss_struct init_tss[NR_CPUS];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -109,6 +109,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -109,6 +109,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */ #define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */ #define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ #define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */ #define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
...@@ -116,6 +117,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -116,6 +117,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING) #define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_USEDFPU (1<<TIF_USEDFPU) #define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
......
...@@ -259,6 +259,22 @@ static int __init unknown_bootoption(char *param, char *val) ...@@ -259,6 +259,22 @@ static int __init unknown_bootoption(char *param, char *val)
return 0; return 0;
} }
static int __init init_setup(char *str)
{
unsigned int i;
execute_command = str;
/* In case LILO is going to boot us with default command line,
* it prepends "auto" before the whole cmdline which makes
* the shell think it should execute a script with such name.
* So we ignore all arguments entered _before_ init=... [MJ]
*/
for (i = 1; i < MAX_INIT_ARGS; i++)
argv_init[i] = NULL;
return 1;
}
__setup("init=", init_setup);
extern void setup_arch(char **); extern void setup_arch(char **);
extern void cpu_idle(void); extern void cpu_idle(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment