Commit d59b0473 authored by Paul Mackerras's avatar Paul Mackerras

Merge samba.org:/home/paulus/kernel/linux-2.5

into samba.org:/home/paulus/kernel/for-linus-ppc
parents 39ed3b25 003e16bb
...@@ -11,6 +11,9 @@ config ALPHA ...@@ -11,6 +11,9 @@ config ALPHA
now Hewlett-Packard. The Alpha Linux project has a home page at now Hewlett-Packard. The Alpha Linux project has a home page at
<http://www.alphalinux.org/>. <http://www.alphalinux.org/>.
config 64BIT
def_bool y
config MMU config MMU
bool bool
default y default y
......
...@@ -96,6 +96,7 @@ config ZBOOT_ROM ...@@ -96,6 +96,7 @@ config ZBOOT_ROM
directly from ROM or flash. If unsure, say N. directly from ROM or flash. If unsure, say N.
config ZBOOT_ROM_TEXT config ZBOOT_ROM_TEXT
depends on ZBOOT_ROM
hex "Compressed ROM boot loader base address" hex "Compressed ROM boot loader base address"
default "0" default "0"
help help
...@@ -103,6 +104,7 @@ config ZBOOT_ROM_TEXT ...@@ -103,6 +104,7 @@ config ZBOOT_ROM_TEXT
should not change this value. should not change this value.
config ZBOOT_ROM_BSS config ZBOOT_ROM_BSS
depends on ZBOOT_ROM
hex "Compressed ROM boot loader BSS address" hex "Compressed ROM boot loader BSS address"
default "0" default "0"
help help
...@@ -110,6 +112,12 @@ config ZBOOT_ROM_BSS ...@@ -110,6 +112,12 @@ config ZBOOT_ROM_BSS
while the decompressor is running. Unless you have special requirements, while the decompressor is running. Unless you have special requirements,
you should not change this value. you should not change this value.
config XIP_KERNEL
bool "Execute In Place (XIP) kernel image"
help
Select this option to create a kernel that can be programed into
the OS ROMs.
config HOTPLUG config HOTPLUG
bool "Support for hot-pluggable devices" bool "Support for hot-pluggable devices"
---help--- ---help---
......
...@@ -12,8 +12,6 @@ LDFLAGS_BLOB :=--format binary ...@@ -12,8 +12,6 @@ LDFLAGS_BLOB :=--format binary
AFLAGS_vmlinux.lds.o = -DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR) AFLAGS_vmlinux.lds.o = -DTEXTADDR=$(TEXTADDR) -DDATAADDR=$(DATAADDR)
OBJCOPYFLAGS :=-O binary -R .note -R .comment -S OBJCOPYFLAGS :=-O binary -R .note -R .comment -S
GZFLAGS :=-9 GZFLAGS :=-9
#CFLAGS +=-pipe
CFLAGS :=$(CFLAGS:-O2=-Os)
ifeq ($(CONFIG_FRAME_POINTER),y) ifeq ($(CONFIG_FRAME_POINTER),y)
CFLAGS +=-fno-omit-frame-pointer -mno-sched-prolog CFLAGS +=-fno-omit-frame-pointer -mno-sched-prolog
...@@ -29,21 +27,17 @@ CFLAGS_BOOT :=-mapcs-26 -mcpu=arm3 -mshort-load-bytes -msoft-float -Wa,-mno-fpu ...@@ -29,21 +27,17 @@ CFLAGS_BOOT :=-mapcs-26 -mcpu=arm3 -mshort-load-bytes -msoft-float -Wa,-mno-fpu
CFLAGS +=-mapcs-26 -mcpu=arm3 -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm CFLAGS +=-mapcs-26 -mcpu=arm3 -mshort-load-bytes -msoft-float -Wa,-mno-fpu -Uarm
AFLAGS +=-mapcs-26 -mcpu=arm3 -mno-fpu -msoft-float -Wa,-mno-fpu AFLAGS +=-mapcs-26 -mcpu=arm3 -mno-fpu -msoft-float -Wa,-mno-fpu
#Default value
DATAADDR := .
ifeq ($(CONFIG_CPU_26),y)
head-y := arch/arm26/machine/head.o arch/arm26/kernel/init_task.o head-y := arch/arm26/machine/head.o arch/arm26/kernel/init_task.o
LDFLAGS_BLOB += --oformat elf32-littlearm LDFLAGS_BLOB += --oformat elf32-littlearm
ifeq ($(CONFIG_ROM_KERNEL),y)
DATAADDR := 0x02080000 ifeq ($(CONFIG_XIP_KERNEL),y)
textaddr-y := 0x03800000 TEXTADDR := 0x03880000
else DATAADDR := 0x02080000
textaddr-y := 0x02080000 else
endif TEXTADDR := 0x02080000
DATAADDR := .
endif endif
TEXTADDR := $(textaddr-y)
ifeq ($(incdir-y),) ifeq ($(incdir-y),)
incdir-y := incdir-y :=
endif endif
...@@ -74,7 +68,7 @@ maketools: FORCE ...@@ -74,7 +68,7 @@ maketools: FORCE
bzImage: vmlinux bzImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/zImage $(Q)$(MAKE) $(build)=$(boot) $(boot)/zImage
zImage Image bootpImage: vmlinux zImage Image bootpImage xipImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zinstall install: vmlinux zinstall install: vmlinux
......
...@@ -36,7 +36,7 @@ endif ...@@ -36,7 +36,7 @@ endif
export ZTEXTADDR ZBSSADDR ZRELADDR INITRD_PHYS PARAMS_PHYS export ZTEXTADDR ZBSSADDR ZRELADDR INITRD_PHYS PARAMS_PHYS
targets := Image zImage bootpImage targets := Image zImage bootpImage xipImage
$(obj)/Image: vmlinux FORCE $(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
...@@ -49,6 +49,15 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE ...@@ -49,6 +49,15 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(obj)/compressed/vmlinux: vmlinux FORCE $(obj)/compressed/vmlinux: vmlinux FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@ $(Q)$(MAKE) $(build)=$(obj)/compressed $@
ifeq ($(CONFIG_XIP_KERNEL),y)
$(obj)/xipImage: vmlinux FORCE
$(OBJCOPY) -S -O binary -R .data -R .comment vmlinux vmlinux-text.bin
$(OBJCOPY) -S -O binary -R .init -R .text -R .comment -R __ex_table -R __ksymtab vmlinux vmlinux-data.bin
cat vmlinux-text.bin vmlinux-data.bin > $@
$(RM) -f vmlinux-text.bin vmlinux-data.bin
@echo ' Kernel: $@ is ready'
endif
.PHONY: initrd .PHONY: initrd
initrd: initrd:
@test "$(INITRD_PHYS)" != "" || \ @test "$(INITRD_PHYS)" != "" || \
......
...@@ -580,7 +580,7 @@ static void ecard_proc_init(void) ...@@ -580,7 +580,7 @@ static void ecard_proc_init(void)
#define ec_set_resource(ec,nr,st,sz,flg) \ #define ec_set_resource(ec,nr,st,sz,flg) \
do { \ do { \
(ec)->resource[nr].name = ec->dev.name; \ (ec)->resource[nr].name = ec->dev.bus_id; \
(ec)->resource[nr].start = st; \ (ec)->resource[nr].start = st; \
(ec)->resource[nr].end = (st) + (sz) - 1; \ (ec)->resource[nr].end = (st) + (sz) - 1; \
(ec)->resource[nr].flags = flg; \ (ec)->resource[nr].flags = flg; \
...@@ -621,7 +621,17 @@ static ssize_t ecard_show_irq(struct device *dev, char *buf) ...@@ -621,7 +621,17 @@ static ssize_t ecard_show_irq(struct device *dev, char *buf)
return sprintf(buf, "%u\n", ec->irq); return sprintf(buf, "%u\n", ec->irq);
} }
static DEVICE_ATTR(irq, S_IRUGO, ecard_show_irq, NULL); static ssize_t ecard_show_vendor(struct device *dev, char *buf)
{
struct expansion_card *ec = ECARD_DEV(dev);
return sprintf(buf, "%u\n", ec->cid.manufacturer);
}
static ssize_t ecard_show_device(struct device *dev, char *buf)
{
struct expansion_card *ec = ECARD_DEV(dev);
return sprintf(buf, "%u\n", ec->cid.product);
}
static ssize_t ecard_show_dma(struct device *dev, char *buf) static ssize_t ecard_show_dma(struct device *dev, char *buf)
{ {
...@@ -629,8 +639,6 @@ static ssize_t ecard_show_dma(struct device *dev, char *buf) ...@@ -629,8 +639,6 @@ static ssize_t ecard_show_dma(struct device *dev, char *buf)
return sprintf(buf, "%u\n", ec->dma); return sprintf(buf, "%u\n", ec->dma);
} }
static DEVICE_ATTR(dma, S_IRUGO, ecard_show_dma, NULL);
static ssize_t ecard_show_resources(struct device *dev, char *buf) static ssize_t ecard_show_resources(struct device *dev, char *buf)
{ {
struct expansion_card *ec = ECARD_DEV(dev); struct expansion_card *ec = ECARD_DEV(dev);
...@@ -646,6 +654,10 @@ static ssize_t ecard_show_resources(struct device *dev, char *buf) ...@@ -646,6 +654,10 @@ static ssize_t ecard_show_resources(struct device *dev, char *buf)
return str - buf; return str - buf;
} }
static DEVICE_ATTR(irq, S_IRUGO, ecard_show_irq, NULL);
static DEVICE_ATTR(vendor, S_IRUGO, ecard_show_vendor, NULL);
static DEVICE_ATTR(device, S_IRUGO, ecard_show_device, NULL);
static DEVICE_ATTR(dma, S_IRUGO, ecard_show_dma, NULL);
static DEVICE_ATTR(resource, S_IRUGO, ecard_show_resources, NULL); static DEVICE_ATTR(resource, S_IRUGO, ecard_show_resources, NULL);
/* /*
...@@ -717,8 +729,6 @@ ecard_probe(int slot, card_type_t type) ...@@ -717,8 +729,6 @@ ecard_probe(int slot, card_type_t type)
} }
snprintf(ec->dev.bus_id, sizeof(ec->dev.bus_id), "ecard%d", slot); snprintf(ec->dev.bus_id, sizeof(ec->dev.bus_id), "ecard%d", slot);
snprintf(ec->dev.name, sizeof(ec->dev.name), "ecard %04x:%04x",
ec->cid.manufacturer, ec->cid.product);
ec->dev.parent = NULL; ec->dev.parent = NULL;
ec->dev.bus = &ecard_bus_type; ec->dev.bus = &ecard_bus_type;
ec->dev.dma_mask = &ec->dma_mask; ec->dev.dma_mask = &ec->dma_mask;
...@@ -745,6 +755,8 @@ ecard_probe(int slot, card_type_t type) ...@@ -745,6 +755,8 @@ ecard_probe(int slot, card_type_t type)
device_create_file(&ec->dev, &dev_attr_dma); device_create_file(&ec->dev, &dev_attr_dma);
device_create_file(&ec->dev, &dev_attr_irq); device_create_file(&ec->dev, &dev_attr_irq);
device_create_file(&ec->dev, &dev_attr_resource); device_create_file(&ec->dev, &dev_attr_resource);
device_create_file(&ec->dev, &dev_attr_vendor);
device_create_file(&ec->dev, &dev_attr_device);
return 0; return 0;
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
@ @
@ Stack format (ensured by USER_* and SVC_*) @ Stack format (ensured by USER_* and SVC_*)
@ @
#define S_FRAME_SIZE 72 #define S_FRAME_SIZE 72 @ FIXME: Really?
#define S_OLD_R0 64 #define S_OLD_R0 64
#define S_PSR 60 #define S_PSR 60
#define S_PC 60 #define S_PC 60
...@@ -77,11 +77,11 @@ ...@@ -77,11 +77,11 @@
.endm .endm
.macro slow_restore_user_regs .macro slow_restore_user_regs
ldmia sp, {r0 - lr}^ ldmia sp, {r0 - lr}^ @ restore the user regs
mov r0, r0 mov r0, r0 @ no-op
ldr lr, [sp, #15*4] ldr lr, [sp, #15*4] @ get user PC
add sp, sp, #15*4+8 add sp, sp, #15*4+8 @ free stack
movs pc, lr movs pc, lr @ return
.endm .endm
.macro fast_restore_user_regs .macro fast_restore_user_regs
...@@ -514,31 +514,37 @@ Lfiqmsg: .ascii "*** Unexpected FIQ\n\0" ...@@ -514,31 +514,37 @@ Lfiqmsg: .ascii "*** Unexpected FIQ\n\0"
* Handles floating point instructions * Handles floating point instructions
*/ */
vector_undefinstr: vector_undefinstr:
tst lr,#3 tst lr, #MODE_SVC26 @ did we come from a non-user mode?
bne __und_svc bne __und_svc @ yes - deal with it.
/* Otherwise, fall through for the user-space (common) case. */
save_user_regs save_user_regs
zero_fp zero_fp @ zero frame pointer
teqp pc, #PSR_I_BIT | MODE_SVC26 teqp pc, #PSR_I_BIT | MODE_SVC26 @ disable IRQs
.Lbug_undef: .Lbug_undef:
ldr r4, .LC2 ldr r4, .LC2
ldr pc, [r4] @ Call FP module USR entry point ldr pc, [r4] @ Call FP module entry point
/* FIXME - should we trap for a null pointer here? */
.globl fpundefinstr
fpundefinstr: @ Called by FP module on undefined instr /* The SVC mode case */
__und_svc: SVC_SAVE_ALL @ Non-user mode
mask_pc r0, lr
and r2, lr, #3
sub r0, r0, #4
mov r1, sp
bl do_undefinstr
SVC_RESTORE_ALL
/* We get here if the FP emulator doesnt handle the undef instr.
* If the insn WAS handled, the emulator jumps to ret_from_exception by itself/
*/
.globl fpundefinstr
fpundefinstr:
mov r0, lr mov r0, lr
mov r1, sp mov r1, sp
teqp pc, #MODE_SVC26 teqp pc, #MODE_SVC26
bl do_undefinstr bl do_undefinstr
b ret_from_exception @ Normal FP exit b ret_from_exception @ Normal FP exit
__und_svc: SVC_SAVE_ALL @ Non-user mode
mask_pc r0, lr
and r2, lr, #3
sub r0, r0, #4
mov r1, sp
bl do_undefinstr
SVC_RESTORE_ALL
#if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE #if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE
/* The FPE is always present */ /* The FPE is always present */
.equ fpe_not_present, 0 .equ fpe_not_present, 0
...@@ -548,6 +554,7 @@ __und_svc: SVC_SAVE_ALL @ Non-user mode ...@@ -548,6 +554,7 @@ __und_svc: SVC_SAVE_ALL @ Non-user mode
* a WFS, we just perform a normal return as if we had emulated the * a WFS, we just perform a normal return as if we had emulated the
* operation. This is a hack to allow some basic userland binaries * operation. This is a hack to allow some basic userland binaries
* to run so that the emulator module proper can be loaded. --philb * to run so that the emulator module proper can be loaded. --philb
* FIXME - probably a broken useless hack...
*/ */
fpe_not_present: fpe_not_present:
adr r10, wfs_mask_data adr r10, wfs_mask_data
...@@ -587,14 +594,14 @@ wfs_mask_data: .word 0x0e200110 @ WFS/RFS ...@@ -587,14 +594,14 @@ wfs_mask_data: .word 0x0e200110 @ WFS/RFS
* Prefetch abort handler * Prefetch abort handler
*----------------------------------------------------------------------------- *-----------------------------------------------------------------------------
*/ */
#define DEBUG_UNDEF
/* remember: lr = USR pc */ /* remember: lr = USR pc */
vector_prefetch: vector_prefetch:
sub lr, lr, #4 sub lr, lr, #4
tst lr, #3 tst lr, #MODE_SVC26
bne __pabt_invalid bne __pabt_invalid
save_user_regs save_user_regs
teqp pc, #MODE_SVC26 teqp pc, #MODE_SVC26 @ Enable IRQs...
mask_pc r0, lr @ Address of abort mask_pc r0, lr @ Address of abort
mov r1, sp @ Tasks registers mov r1, sp @ Tasks registers
bl do_PrefetchAbort bl do_PrefetchAbort
...@@ -604,7 +611,7 @@ vector_prefetch: ...@@ -604,7 +611,7 @@ vector_prefetch:
adr r0, t adr r0, t
bl printk bl printk
#endif #endif
ldr lr, [sp,#S_PC] @ program to test this on. I think its ldr lr, [sp,#S_PC] @ FIXME program to test this on. I think its
b .Lbug_undef @ broken at the moment though!) b .Lbug_undef @ broken at the moment though!)
__pabt_invalid: SVC_SAVE_ALL __pabt_invalid: SVC_SAVE_ALL
...@@ -707,7 +714,7 @@ vector_IRQ: ldr r13, .LCirq @ I will leave this one in just in case... ...@@ -707,7 +714,7 @@ vector_IRQ: ldr r13, .LCirq @ I will leave this one in just in case...
bne asm_do_IRQ bne asm_do_IRQ
mov why, #0 mov why, #0
get_thread_info r5 get_thread_info tsk @ FIXME - was r5, but seemed wrong.
b ret_to_user b ret_to_user
irq_prio_table irq_prio_table
......
...@@ -36,6 +36,9 @@ ...@@ -36,6 +36,9 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/irqchip.h> #include <asm/irqchip.h>
//FIXME - this ought to be in a header IMO
void __init arc_init_irq(void);
/* /*
* Maximum IRQ count. Currently, this is arbitary. However, it should * Maximum IRQ count. Currently, this is arbitary. However, it should
* not be set too low to prevent false triggering. Conversely, if it * not be set too low to prevent false triggering. Conversely, if it
......
...@@ -58,6 +58,10 @@ extern void squash_mem_tags(struct tag *tag); ...@@ -58,6 +58,10 @@ extern void squash_mem_tags(struct tag *tag);
extern void bootmem_init(struct meminfo *); extern void bootmem_init(struct meminfo *);
extern int root_mountflags; extern int root_mountflags;
extern int _stext, _text, _etext, _edata, _end; extern int _stext, _text, _etext, _edata, _end;
#ifdef CONFIG_XIP_KERNEL
extern int _endtext, _sdata;
#endif
unsigned int processor_id; unsigned int processor_id;
unsigned int __machine_arch_type; unsigned int __machine_arch_type;
...@@ -121,6 +125,7 @@ static void __init setup_processor(void) ...@@ -121,6 +125,7 @@ static void __init setup_processor(void)
for (list = &__proc_info_begin; list < &__proc_info_end ; list++) for (list = &__proc_info_begin; list < &__proc_info_end ; list++)
if ((processor_id & list->cpu_mask) == list->cpu_val) if ((processor_id & list->cpu_mask) == list->cpu_val)
break; break;
/* /*
* If processor type is unrecognised, then we * If processor type is unrecognised, then we
* can do nothing... * can do nothing...
...@@ -220,7 +225,11 @@ request_standard_resources(struct meminfo *mi) ...@@ -220,7 +225,11 @@ request_standard_resources(struct meminfo *mi)
kernel_code.start = init_mm.start_code; kernel_code.start = init_mm.start_code;
kernel_code.end = init_mm.end_code - 1; kernel_code.end = init_mm.end_code - 1;
#ifdef CONFIG_XIP_KERNEL
kernel_data.start = init_mm.start_data;
#else
kernel_data.start = init_mm.end_code; kernel_data.start = init_mm.end_code;
#endif
kernel_data.end = init_mm.brk - 1; kernel_data.end = init_mm.brk - 1;
for (i = 0; i < mi->nr_banks; i++) { for (i = 0; i < mi->nr_banks; i++) {
...@@ -456,7 +465,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -456,7 +465,10 @@ void __init setup_arch(char **cmdline_p)
else else
machine_name = "UNKNOWN"; machine_name = "UNKNOWN";
//FIXME - this may need altering when we get ROM images working //FIXME - the tag struct is always copied here but this is a block
// of RAM that is accidentally reserved along with video RAM. perhaps
// it would be a good idea to explicitly reserve this?
tags = (struct tag *)0x0207c000; tags = (struct tag *)0x0207c000;
/* /*
...@@ -474,7 +486,12 @@ void __init setup_arch(char **cmdline_p) ...@@ -474,7 +486,12 @@ void __init setup_arch(char **cmdline_p)
} }
init_mm.start_code = (unsigned long) &_text; init_mm.start_code = (unsigned long) &_text;
#ifndef CONFIG_XIP_KERNEL
init_mm.end_code = (unsigned long) &_etext; init_mm.end_code = (unsigned long) &_etext;
#else
init_mm.end_code = (unsigned long) &_endtext;
init_mm.start_data = (unsigned long) &_sdata;
#endif
init_mm.end_data = (unsigned long) &_edata; init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end; init_mm.brk = (unsigned long) &_end;
......
/* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
* borrowed from Russels ARM port by Ian Molton
*/
#include <asm-generic/vmlinux.lds.h>
OUTPUT_ARCH(arm)
ENTRY(stext)
jiffies = jiffies_64;
SECTIONS
{
. = TEXTADDR;
.init : { /* Init code and data */
_stext = .;
__init_begin = .;
_sinittext = .;
*(.init.text)
_einittext = .;
__proc_info_begin = .;
*(.proc.info)
__proc_info_end = .;
__arch_info_begin = .;
*(.arch.info)
__arch_info_end = .;
__tagtable_begin = .;
*(.taglist)
__tagtable_end = .;
. = ALIGN(16);
__setup_start = .;
*(.init.setup)
__setup_end = .;
__early_begin = .;
*(__early_param)
__early_end = .;
__start___param = .;
*(__param)
__stop___param = .;
__initcall_start = .;
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
__initcall_end = .;
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
. = ALIGN(32);
__initramfs_start = .;
usr/built-in.o(.init.ramfs)
__initramfs_end = .;
. = ALIGN(32768);
__init_end = .;
}
/DISCARD/ : { /* Exit code and data */
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
}
.text : { /* Real text segment */
_text = .; /* Text and read-only data */
*(.text)
*(.fixup)
*(.gnu.warning)
*(.rodata)
*(.rodata.*)
*(.glue_7)
*(.glue_7t)
*(.got) /* Global offset table */
_etext = .; /* End of text section */
}
. = ALIGN(16);
__ex_table : { /* Exception table */
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
RODATA
_endtext = .;
. = DATAADDR;
_sdata = .;
.data : {
/*
* first, the init thread union, aligned
* to an 8192 byte boundary.
*/
*(.init.task)
/*
* The cacheline aligned data
*/
. = ALIGN(32);
*(.data.cacheline_aligned)
/*
* and the usual data section
*/
*(.data)
CONSTRUCTORS
*(.init.data)
_edata = .;
}
.bss : {
__bss_start = .; /* BSS */
*(.bss)
*(COMMON)
_end = . ;
}
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
}
/* ld script to make ARM Linux kernel /* ld script to make ARM Linux kernel
* taken from the i386 version by Russell King * taken from the i386 version by Russell King
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
* borrowed from Russels ARM port by Ian Molton * borrowed from Russels ARM port by Ian Molton and subsequently modified.
*/ */
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
......
#include <linux/config.h> #include <linux/config.h>
#ifdef CONFIG_ROM_KERNEL #ifdef CONFIG_XIP_KERNEL
#include "vmlinux-armo-rom.lds.in" #include "vmlinux-arm26-xip.lds.in"
#else #else
#include "vmlinux-armo.lds.in" #include "vmlinux-arm26.lds.in"
#endif #endif
...@@ -24,12 +24,13 @@ ...@@ -24,12 +24,13 @@
ENTRY(stext) ENTRY(stext)
__entry: cmp pc, #0x02000000 __entry: cmp pc, #0x02000000
ldrlt pc, LC0 @ if 0x01800000, call at 0x02080000 ldrlt pc, LC0 @ if 0x01800000, call at 0x02080000
teq r0, #0 @ Check for old calling method teq r0, #0 @ Check for old calling method
blne oldparams @ Move page if old blne oldparams @ Move page if old
adr r0, LC0 adr r0, LC0
ldmib r0, {r2-r5, sp} @ Setup stack ldmib r0, {r2-r5, sp} @ Setup stack (and fetch other values)
mov r0, #0
1: cmp r2, r3 @ Clear BSS mov r0, #0 @ Clear BSS
1: cmp r2, r3
strcc r0, [r2], #4 strcc r0, [r2], #4
bcc 1b bcc 1b
...@@ -38,6 +39,17 @@ __entry: cmp pc, #0x02000000 ...@@ -38,6 +39,17 @@ __entry: cmp pc, #0x02000000
bl detect_arch_type bl detect_arch_type
str r0, [r5] str r0, [r5]
#ifdef CONFIG_XIP_KERNEL
ldr r3, ETEXT @ data section copy
ldr r4, SDATA
ldr r5, EDATA
1:
ldr r6, [r3], #4
str r6, [r4], #4
cmp r4, r5
blt 1b
#endif
mov fp, #0 mov fp, #0
b start_kernel b start_kernel
...@@ -47,8 +59,14 @@ LC0: .word _stext ...@@ -47,8 +59,14 @@ LC0: .word _stext
.word processor_id @ r4 .word processor_id @ r4
.word __machine_arch_type @ r5 .word __machine_arch_type @ r5
.word init_thread_union+8192 @ sp .word init_thread_union+8192 @ sp
arm2_id: .long 0x41560200 #ifdef CONFIG_XIP_KERNEL
arm250_id: .long 0x41560250 ETEXT: .word _endtext
SDATA: .word _sdata
EDATA: .word __bss_start
#endif
arm2_id: .long 0x41560200 @ ARM2 and 250 dont have a CPUID
arm250_id: .long 0x41560250 @ So we create some after probing for them
.align .align
oldparams: mov r4, #0x02000000 oldparams: mov r4, #0x02000000
......
...@@ -123,7 +123,7 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, ...@@ -123,7 +123,7 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
show_pte(tsk->mm, addr); show_pte(tsk->mm, addr);
show_regs(regs); show_regs(regs);
//dump_backtrace(regs, tsk); // FIXME ARM32 dropped this - why? //dump_backtrace(regs, tsk); // FIXME ARM32 dropped this - why?
while(1); while(1); //FIXME - hack to stop debug going nutso
#endif #endif
tsk->thread.address = addr; tsk->thread.address = addr;
...@@ -212,7 +212,7 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -212,7 +212,7 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
tsk = current; tsk = current;
mm = tsk->mm; mm = tsk->mm;
printk("do_page_fault: pid: %d\n", tsk->pid); printk("do_page_fault: pid: %d %08x\n", tsk->pid, addr);
/* /*
* If we're in an interrupt or have no user * If we're in an interrupt or have no user
* context, we must not take the fault.. * context, we must not take the fault..
...@@ -241,6 +241,7 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -241,6 +241,7 @@ int do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
/* /*
* If we are in kernel mode at this point, we * If we are in kernel mode at this point, we
* have no context to handle this fault with. * have no context to handle this fault with.
* FIXME - is this test right?
*/ */
if (!user_mode(regs)){ if (!user_mode(regs)){
goto no_context; goto no_context;
......
...@@ -32,15 +32,18 @@ ...@@ -32,15 +32,18 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/tlb.h> #include <asm/tlb.h>
//#include <asm/arch.h>
#include <asm/map.h> #include <asm/map.h>
#define TABLE_SIZE PTRS_PER_PTE * sizeof(pte_t)) #define TABLE_SIZE PTRS_PER_PTE * sizeof(pte_t))
struct mmu_gather mmu_gathers[NR_CPUS]; struct mmu_gather mmu_gathers[NR_CPUS];
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern char _stext, _text, _etext, _end, __init_begin, __init_end; extern char _stext, _text, _etext, _end, __init_begin, __init_end;
#ifdef CONFIG_XIP_KERNEL
extern char _endtext, _sdata;
#endif
extern unsigned long phys_initrd_start; extern unsigned long phys_initrd_start;
extern unsigned long phys_initrd_size; extern unsigned long phys_initrd_size;
...@@ -152,6 +155,7 @@ static void __init ...@@ -152,6 +155,7 @@ static void __init
find_memend_and_nodes(struct meminfo *mi, struct node_info *np) find_memend_and_nodes(struct meminfo *mi, struct node_info *np)
{ {
unsigned int memend_pfn = 0; unsigned int memend_pfn = 0;
numnodes = 1; numnodes = 1;
np->bootmap_pages = 0; np->bootmap_pages = 0;
...@@ -186,45 +190,6 @@ find_memend_and_nodes(struct meminfo *mi, struct node_info *np) ...@@ -186,45 +190,6 @@ find_memend_and_nodes(struct meminfo *mi, struct node_info *np)
} }
/*
* Reserve the various regions of node 0
*/
static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages)
{
pg_data_t *pgdat = NODE_DATA(0);
/*
* Register the kernel text and data with bootmem.
* Note that this can only be in node 0.
*/
reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
/*
* And don't forget to reserve the allocator bitmap,
* which will be freed later.
*/
reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT,
bootmap_pages << PAGE_SHIFT);
/*
* These should likewise go elsewhere. They pre-reserve
* the screen memory region at the start of main system
* memory.
*/
reserve_bootmem_node(pgdat, 0x02000000, 0x00080000);
#ifdef CONFIG_BLK_DEV_INITRD
initrd_start = phys_initrd_start;
initrd_end = initrd_start + phys_initrd_size;
/* Achimedes machines only have one node, so initrd is in node 0 */
reserve_bootmem_node(pgdat, __pa(initrd_start),
initrd_end - initrd_start);
#endif
}
/* /*
* Initialise the bootmem allocator for all nodes. This is called * Initialise the bootmem allocator for all nodes. This is called
* early during the architecture specific initialisation. * early during the architecture specific initialisation.
...@@ -233,6 +198,7 @@ void __init bootmem_init(struct meminfo *mi) ...@@ -233,6 +198,7 @@ void __init bootmem_init(struct meminfo *mi)
{ {
struct node_info node_info; struct node_info node_info;
unsigned int bootmap_pfn; unsigned int bootmap_pfn;
pg_data_t *pgdat = NODE_DATA(0);
find_memend_and_nodes(mi, &node_info); find_memend_and_nodes(mi, &node_info);
...@@ -247,18 +213,54 @@ void __init bootmem_init(struct meminfo *mi) ...@@ -247,18 +213,54 @@ void __init bootmem_init(struct meminfo *mi)
/* /*
* Initialise the bootmem allocator. * Initialise the bootmem allocator.
*/ */
init_bootmem_node(NODE_DATA(node), bootmap_pfn, node_info.start, node_info.end); init_bootmem_node(pgdat, bootmap_pfn, node_info.start, node_info.end);
/* /*
* Register all available RAM in this node with the bootmem allocator. * Register all available RAM in this node with the bootmem allocator.
*/ */
free_bootmem_node(NODE_DATA(node), mi->bank->start, mi->bank->size); free_bootmem_node(pgdat, mi->bank->start, mi->bank->size);
/*
* Register the kernel text and data with bootmem.
* Note: with XIP we dont register .text since
* its in ROM.
*/
#ifdef CONFIG_XIP_KERNEL
reserve_bootmem_node(pgdat, __pa(&_sdata), &_end - &_sdata);
#else
reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
#endif
/* /*
* Reserve ram for stuff like initrd, video, kernel, etc. * And don't forget to reserve the allocator bitmap,
*/ * which will be freed later.
*/
reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT,
node_info.bootmap_pages << PAGE_SHIFT);
/*
* These should likewise go elsewhere. They pre-reserve
* the screen memory region at the start of main system
* memory. FIXME - screen RAM is not 512K!
*/
reserve_bootmem_node(pgdat, 0x02000000, 0x00080000);
#ifdef CONFIG_BLK_DEV_INITRD
initrd_start = phys_initrd_start;
initrd_end = initrd_start + phys_initrd_size;
/* Achimedes machines only have one node, so initrd is in node 0 */
#ifdef CONFIG_XIP_KERNEL
/* Only reserve initrd space if it is in RAM */
if(initrd_start && initrd_start < 0x03000000){
#else
if(initrd_start){
#endif
reserve_bootmem_node(pgdat, __pa(initrd_start),
initrd_end - initrd_start);
}
#endif /* CONFIG_BLK_DEV_INITRD */
reserve_node_zero(bootmap_pfn, node_info.bootmap_pages);
} }
...@@ -299,16 +301,15 @@ void __init paging_init(struct meminfo *mi) ...@@ -299,16 +301,15 @@ void __init paging_init(struct meminfo *mi)
pgdat = NODE_DATA(0); pgdat = NODE_DATA(0);
bdata = pgdat->bdata; bdata = pgdat->bdata;
zone_size[0] = bdata->node_low_pfn - zone_size[0] = bdata->node_low_pfn -
(bdata->node_boot_start >> PAGE_SHIFT); (bdata->node_boot_start >> PAGE_SHIFT);
if (!zone_size[0]) if (!zone_size[0])
BUG(); BUG();
free_area_init_node(0, pgdat, 0, zone_size, free_area_init_node(0, pgdat, 0, zone_size,
bdata->node_boot_start >> PAGE_SHIFT, 0); bdata->node_boot_start >> PAGE_SHIFT, zhole_size);
mem_map = contig_page_data.node_mem_map; mem_map = NODE_DATA(0)->node_mem_map;
/* /*
* finish off the bad pages once * finish off the bad pages once
...@@ -345,8 +346,15 @@ void __init mem_init(void) ...@@ -345,8 +346,15 @@ void __init mem_init(void)
pg_data_t *pgdat = NODE_DATA(0); pg_data_t *pgdat = NODE_DATA(0);
extern int sysctl_overcommit_memory; extern int sysctl_overcommit_memory;
datapages = &_end - &_etext;
/* Note: data pages includes BSS */
#ifdef CONFIG_XIP_KERNEL
codepages = &_endtext - &_text;
datapages = &_end - &_sdata;
#else
codepages = &_etext - &_text; codepages = &_etext - &_text;
datapages = &_end - &_etext;
#endif
initpages = &__init_end - &__init_begin; initpages = &__init_end - &__init_begin;
high_memory = (void *)__va(meminfo.end); high_memory = (void *)__va(meminfo.end);
...@@ -356,15 +364,14 @@ void __init mem_init(void) ...@@ -356,15 +364,14 @@ void __init mem_init(void)
if (pgdat->node_spanned_pages != 0) if (pgdat->node_spanned_pages != 0)
totalram_pages += free_all_bootmem_node(pgdat); totalram_pages += free_all_bootmem_node(pgdat);
printk(KERN_INFO "Memory:");
num_physpages = meminfo.bank[0].size >> PAGE_SHIFT; num_physpages = meminfo.bank[0].size >> PAGE_SHIFT;
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); printk(KERN_INFO "Memory: %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
printk(KERN_NOTICE "Memory: %luKB available (%dK code, " printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
"%dK data, %dK init)\n", "%dK data, %dK init)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10), (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
codepages >> 10, datapages >> 10, initpages >> 10); codepages >> 10, datapages >> 10, initpages >> 10);
/* /*
* Turn on overcommit on tiny machines * Turn on overcommit on tiny machines
*/ */
...@@ -374,11 +381,12 @@ void __init mem_init(void) ...@@ -374,11 +381,12 @@ void __init mem_init(void)
} }
} }
void free_initmem(void) void free_initmem(void){
{ #ifndef CONFIG_XIP_KERNEL
free_area((unsigned long)(&__init_begin), free_area((unsigned long)(&__init_begin),
(unsigned long)(&__init_end), (unsigned long)(&__init_end),
"init"); "init");
#endif
} }
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
...@@ -387,7 +395,12 @@ static int keep_initrd; ...@@ -387,7 +395,12 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end) void free_initrd_mem(unsigned long start, unsigned long end)
{ {
#ifdef CONFIG_XIP_KERNEL
/* Only bin initrd if it is in RAM... */
if(!keep_initrd && start < 0x03000000)
#else
if (!keep_initrd) if (!keep_initrd)
#endif
free_area(start, end, "initrd"); free_area(start, end, "initrd");
} }
......
...@@ -18,6 +18,9 @@ config IA64 ...@@ -18,6 +18,9 @@ config IA64
page at <http://www.linuxia64.org/> and a mailing list at page at <http://www.linuxia64.org/> and a mailing list at
linux-ia64@linuxia64.org. linux-ia64@linuxia64.org.
config 64BIT
def_bool y
config MMU config MMU
bool bool
default y default y
......
...@@ -11,6 +11,9 @@ config MIPS64 ...@@ -11,6 +11,9 @@ config MIPS64
64-bit processing, otherwise say N. You must say Y for kernels for 64-bit processing, otherwise say N. You must say Y for kernels for
SGI IP27 (Origin 200 and 2000). If in doubt say N. SGI IP27 (Origin 200 and 2000). If in doubt say N.
config 64BIT
def_bool MIPS64
config MIPS32 config MIPS32
bool bool
depends on MIPS64 = 'n' depends on MIPS64 = 'n'
......
...@@ -103,6 +103,9 @@ config PARISC64 ...@@ -103,6 +103,9 @@ config PARISC64
enable this option otherwise. The 64bit kernel is significantly bigger enable this option otherwise. The 64bit kernel is significantly bigger
and slower than the 32bit one. and slower than the 32bit one.
config 64BIT
def_bool PARISC64
config PDC_NARROW config PDC_NARROW
bool "32-bit firmware" bool "32-bit firmware"
depends on PARISC64 depends on PARISC64
......
...@@ -3,6 +3,9 @@ ...@@ -3,6 +3,9 @@
# see Documentation/kbuild/kconfig-language.txt. # see Documentation/kbuild/kconfig-language.txt.
# #
config 64BIT
def_bool y
config MMU config MMU
bool bool
default y default y
......
...@@ -40,6 +40,9 @@ config ARCH_S390X ...@@ -40,6 +40,9 @@ config ARCH_S390X
Select this option if you have a 64 bit IBM zSeries machine Select this option if you have a 64 bit IBM zSeries machine
and want to use the 64 bit addressing mode. and want to use the 64 bit addressing mode.
config 64BIT
def_bool ARCH_S390X
config ARCH_S390_31 config ARCH_S390_31
bool bool
depends on ARCH_S390X = 'n' depends on ARCH_S390X = 'n'
......
...@@ -5,6 +5,9 @@ ...@@ -5,6 +5,9 @@
mainmenu "Linux/UltraSPARC Kernel Configuration" mainmenu "Linux/UltraSPARC Kernel Configuration"
config 64BIT
def_bool y
config MMU config MMU
bool bool
default y default y
......
...@@ -16,6 +16,9 @@ config X86_64 ...@@ -16,6 +16,9 @@ config X86_64
Port to the x86-64 architecture. x86-64 is a 64-bit extension to the Port to the x86-64 architecture. x86-64 is a 64-bit extension to the
classical 32-bit x86 architecture. For details see http://www.x86-64.org classical 32-bit x86 architecture. For details see http://www.x86-64.org
config 64BIT
def_bool y
config X86 config X86
bool bool
default y default y
......
...@@ -41,15 +41,16 @@ CFLAGS += -mno-red-zone ...@@ -41,15 +41,16 @@ CFLAGS += -mno-red-zone
CFLAGS += -mcmodel=kernel CFLAGS += -mcmodel=kernel
CFLAGS += -pipe CFLAGS += -pipe
# this makes reading assembly source easier, but produces worse code # this makes reading assembly source easier, but produces worse code
# actually it makes the kernel smaller too.
CFLAGS += -fno-reorder-blocks CFLAGS += -fno-reorder-blocks
# should lower this a lot and see how much .text is saves # should lower this a lot and see how much .text is saves
CFLAGS += -finline-limit=2000 CFLAGS += -finline-limit=2000
CFLAGS += -Wno-sign-compare CFLAGS += -Wno-sign-compare
#CFLAGS += -g
# don't enable this when you use kgdb: # don't enable this when you use kgdb:
ifneq ($(CONFIG_X86_REMOTE_DEBUG),y) ifneq ($(CONFIG_X86_REMOTE_DEBUG),y)
CFLAGS += -fno-asynchronous-unwind-tables CFLAGS += -fno-asynchronous-unwind-tables
endif endif
#CFLAGS += -funit-at-a-time
head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o head-y := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
......
...@@ -673,12 +673,10 @@ static int mtrr_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg) ...@@ -673,12 +673,10 @@ static int mtrr_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg)
return err; return err;
} }
#define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler), NULL }, #define HANDLE_IOCTL(cmd,handler) { (cmd), (ioctl_trans_handler_t)(handler) },
#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl) #define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd,sys_ioctl)
#define IOCTL_TABLE_START struct ioctl_trans ioctl_start[] = {
#define IOCTL_TABLE_END };
IOCTL_TABLE_START struct ioctl_trans ioctl_start[] = {
#include <linux/compat_ioctl.h> #include <linux/compat_ioctl.h>
#define DECLARES #define DECLARES
#include "compat_ioctl.c" #include "compat_ioctl.c"
...@@ -761,6 +759,7 @@ HANDLE_IOCTL(MTRRIOC32_SET_PAGE_ENTRY, mtrr_ioctl32) ...@@ -761,6 +759,7 @@ HANDLE_IOCTL(MTRRIOC32_SET_PAGE_ENTRY, mtrr_ioctl32)
HANDLE_IOCTL(MTRRIOC32_DEL_PAGE_ENTRY, mtrr_ioctl32) HANDLE_IOCTL(MTRRIOC32_DEL_PAGE_ENTRY, mtrr_ioctl32)
HANDLE_IOCTL(MTRRIOC32_GET_PAGE_ENTRY, mtrr_ioctl32) HANDLE_IOCTL(MTRRIOC32_GET_PAGE_ENTRY, mtrr_ioctl32)
HANDLE_IOCTL(MTRRIOC32_KILL_PAGE_ENTRY, mtrr_ioctl32) HANDLE_IOCTL(MTRRIOC32_KILL_PAGE_ENTRY, mtrr_ioctl32)
IOCTL_TABLE_END };
int ioctl_table_size = ARRAY_SIZE(ioctl_start); int ioctl_table_size = ARRAY_SIZE(ioctl_start);
...@@ -80,6 +80,10 @@ static int ia32_copy_siginfo_to_user(siginfo_t32 *to, siginfo_t *from) ...@@ -80,6 +80,10 @@ static int ia32_copy_siginfo_to_user(siginfo_t32 *to, siginfo_t *from)
default: default:
err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_uid, &to->si_uid);
break; break;
case __SI_POLL >> 16:
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
/* case __SI_RT: This is not generated by the kernel as of now. */ /* case __SI_RT: This is not generated by the kernel as of now. */
} }
return err; return err;
......
...@@ -475,6 +475,7 @@ ia32_sys_call_table: ...@@ -475,6 +475,7 @@ ia32_sys_call_table:
.quad compat_fstatfs64 /* fstatfs64 */ .quad compat_fstatfs64 /* fstatfs64 */
.quad sys_tgkill .quad sys_tgkill
.quad compat_sys_utimes .quad compat_sys_utimes
.quad sys32_fadvise64_64
/* don't forget to change IA32_NR_syscalls */ /* don't forget to change IA32_NR_syscalls */
ia32_syscall_end: ia32_syscall_end:
.rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8 .rept IA32_NR_syscalls-(ia32_syscall_end-ia32_sys_call_table)/8
......
...@@ -1170,35 +1170,6 @@ sys32_rt_sigqueueinfo(int pid, int sig, siginfo_t32 *uinfo) ...@@ -1170,35 +1170,6 @@ sys32_rt_sigqueueinfo(int pid, int sig, siginfo_t32 *uinfo)
return ret; return ret;
} }
asmlinkage long sys_utimes(char *, struct timeval *);
asmlinkage long
sys32_utimes(char *filename, struct compat_timeval *tvs)
{
char *kfilename;
struct timeval ktvs[2];
mm_segment_t old_fs;
int ret;
kfilename = getname(filename);
ret = PTR_ERR(kfilename);
if (!IS_ERR(kfilename)) {
if (tvs) {
if (get_tv32(&ktvs[0], tvs) ||
get_tv32(&ktvs[1], 1+tvs))
return -EFAULT;
}
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_utimes(kfilename, &ktvs[0]);
set_fs(old_fs);
putname(kfilename);
}
return ret;
}
/* These are here just in case some old ia32 binary calls it. */ /* These are here just in case some old ia32 binary calls it. */
asmlinkage long asmlinkage long
sys32_pause(void) sys32_pause(void)
...@@ -2027,6 +1998,17 @@ sys32_timer_create(u32 clock, struct sigevent32 *se32, timer_t *timer_id) ...@@ -2027,6 +1998,17 @@ sys32_timer_create(u32 clock, struct sigevent32 *se32, timer_t *timer_id)
return err; return err;
} }
extern long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice);
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
__u32 len_low, __u32 len_high, int advice)
{
return sys_fadvise64_64(fd,
(((u64)offset_high)<<32) | offset_low,
(((u64)len_high)<<32) | len_low,
advice);
}
long sys32_vm86_warning(void) long sys32_vm86_warning(void)
{ {
printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n", printk(KERN_INFO "%s: vm86 mode not supported on 64 bit kernel\n",
......
...@@ -47,6 +47,9 @@ ...@@ -47,6 +47,9 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
extern int acpi_disabled; extern int acpi_disabled;
int acpi_lapic = 0;
int acpi_ioapic = 0;
extern int disable_apic;
#define PREFIX "ACPI: " #define PREFIX "ACPI: "
...@@ -76,8 +79,6 @@ __acpi_map_table ( ...@@ -76,8 +79,6 @@ __acpi_map_table (
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
int acpi_lapic;
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
...@@ -165,8 +166,6 @@ acpi_parse_lapic_nmi ( ...@@ -165,8 +166,6 @@ acpi_parse_lapic_nmi (
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
int acpi_ioapic;
static int __init static int __init
acpi_parse_ioapic ( acpi_parse_ioapic (
acpi_table_entry_header *header) acpi_table_entry_header *header)
...@@ -292,12 +291,34 @@ acpi_find_rsdp (void) ...@@ -292,12 +291,34 @@ acpi_find_rsdp (void)
return rsdp_phys; return rsdp_phys;
} }
/*
* acpi_boot_init()
* called from setup_arch(), always.
* 1. maps ACPI tables for later use
* 2. enumerates lapics
* 3. enumerates io-apics
*
* side effects:
* acpi_lapic = 1 if LAPIC found
* acpi_ioapic = 1 if IOAPIC found
* if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
* if acpi_blacklisted() acpi_disabled = 1;
* acpi_irq_model=...
* ...
*
* return value: (currently ignored)
* 0: success
* !0: failure
*/
int __init int __init
acpi_boot_init (void) acpi_boot_init (void)
{ {
int result = 0; int result = 0;
if (acpi_disabled)
return 1;
/* /*
* The default interrupt routing model is PIC (8259). This gets * The default interrupt routing model is PIC (8259). This gets
* overriden if IOAPICs are enumerated (below). * overriden if IOAPICs are enumerated (below).
...@@ -316,9 +337,7 @@ acpi_boot_init (void) ...@@ -316,9 +337,7 @@ acpi_boot_init (void)
printk(KERN_WARNING PREFIX "BIOS listed in blacklist, disabling ACPI support\n"); printk(KERN_WARNING PREFIX "BIOS listed in blacklist, disabling ACPI support\n");
acpi_disabled = 1; acpi_disabled = 1;
return result; return result;
} else }
printk(KERN_NOTICE PREFIX "BIOS not listed in blacklist\n");
extern int disable_apic; extern int disable_apic;
if (disable_apic) if (disable_apic)
...@@ -391,6 +410,25 @@ acpi_boot_init (void) ...@@ -391,6 +410,25 @@ acpi_boot_init (void)
* -------- * --------
*/ */
/*
* ACPI interpreter is required to complete interrupt setup,
* so if it is off, don't enumerate the io-apics with ACPI.
* If MPS is present, it will handle them,
* otherwise the system will stay in PIC mode
*/
if (acpi_disabled) {
return 1;
}
/*
* if "noapic" boot option, don't look for IO-APICs
*/
if (disable_apic) {
printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
"due to 'noapic' option.\n");
return 1;
}
result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic); result = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic);
if (!result) { if (!result) {
printk(KERN_ERR PREFIX "No IOAPIC entries present\n"); printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
......
...@@ -38,8 +38,6 @@ int disable_apic_timer __initdata; ...@@ -38,8 +38,6 @@ int disable_apic_timer __initdata;
/* Using APIC to generate smp_local_timer_interrupt? */ /* Using APIC to generate smp_local_timer_interrupt? */
int using_apic_timer = 0; int using_apic_timer = 0;
int dont_enable_local_apic __initdata = 0;
static DEFINE_PER_CPU(int, prof_multiplier) = 1; static DEFINE_PER_CPU(int, prof_multiplier) = 1;
static DEFINE_PER_CPU(int, prof_old_multiplier) = 1; static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
static DEFINE_PER_CPU(int, prof_counter) = 1; static DEFINE_PER_CPU(int, prof_counter) = 1;
...@@ -464,7 +462,6 @@ static struct { ...@@ -464,7 +462,6 @@ static struct {
static int lapic_suspend(struct sys_device *dev, u32 state) static int lapic_suspend(struct sys_device *dev, u32 state)
{ {
unsigned int l, h;
unsigned long flags; unsigned long flags;
if (!apic_pm_state.active) if (!apic_pm_state.active)
...@@ -486,9 +483,6 @@ static int lapic_suspend(struct sys_device *dev, u32 state) ...@@ -486,9 +483,6 @@ static int lapic_suspend(struct sys_device *dev, u32 state)
local_save_flags(flags); local_save_flags(flags);
local_irq_disable(); local_irq_disable();
disable_local_APIC(); disable_local_APIC();
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_ENABLE;
wrmsr(MSR_IA32_APICBASE, l, h);
local_irq_restore(flags); local_irq_restore(flags);
return 0; return 0;
} }
...@@ -1017,6 +1011,12 @@ static __init int setup_disableapic(char *str) ...@@ -1017,6 +1011,12 @@ static __init int setup_disableapic(char *str)
return 0; return 0;
} }
static __init int setup_nolapic(char *str)
{
disable_apic = 1;
return 0;
}
static __init int setup_noapictimer(char *str) static __init int setup_noapictimer(char *str)
{ {
disable_apic_timer = 1; disable_apic_timer = 1;
...@@ -1024,5 +1024,7 @@ static __init int setup_noapictimer(char *str) ...@@ -1024,5 +1024,7 @@ static __init int setup_noapictimer(char *str)
} }
__setup("disableapic", setup_disableapic); __setup("disableapic", setup_disableapic);
__setup("nolapic", setup_nolapic); /* same as disableapic, for compatibility */
__setup("noapictimer", setup_noapictimer); __setup("noapictimer", setup_noapictimer);
/* no "lapic" flag - we only use the lapic when the BIOS tells us so. */
...@@ -222,10 +222,23 @@ int apply_relocate(Elf_Shdr *sechdrs, ...@@ -222,10 +222,23 @@ int apply_relocate(Elf_Shdr *sechdrs,
return -ENOSYS; return -ENOSYS;
} }
extern void apply_alternatives(void *start, void *end);
int module_finalize(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs, const Elf_Shdr *sechdrs,
struct module *me) struct module *me)
{ {
const Elf_Shdr *s;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
/* look for .altinstructions to patch */
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
void *seg;
if (strcmp(".altinstructions", secstrings + s->sh_name))
continue;
seg = (void *)s->sh_addr;
apply_alternatives(seg, seg + s->sh_size);
}
return 0; return 0;
} }
......
...@@ -429,8 +429,6 @@ static inline int pci_map_cont(struct scatterlist *sg, int start, int stopat, ...@@ -429,8 +429,6 @@ static inline int pci_map_cont(struct scatterlist *sg, int start, int stopat,
return __pci_map_cont(sg, start, stopat, sout, pages); return __pci_map_cont(sg, start, stopat, sout, pages);
} }
#define PCI_NO_MERGE 0
/* /*
* DMA map all entries in a scatterlist. * DMA map all entries in a scatterlist.
* Merge chunks that have page aligned sizes into a continuous mapping. * Merge chunks that have page aligned sizes into a continuous mapping.
...@@ -463,7 +461,7 @@ int pci_map_sg(struct pci_dev *dev, struct scatterlist *sg, int nents, int dir) ...@@ -463,7 +461,7 @@ int pci_map_sg(struct pci_dev *dev, struct scatterlist *sg, int nents, int dir)
struct scatterlist *ps = &sg[i-1]; struct scatterlist *ps = &sg[i-1];
/* Can only merge when the last chunk ends on a page /* Can only merge when the last chunk ends on a page
boundary. */ boundary. */
if (PCI_NO_MERGE || !need || (i-1 > start && ps->offset) || if (!force_iommu || !need || (i-1 > start && ps->offset) ||
(ps->offset + ps->length) % PAGE_SIZE) { (ps->offset + ps->length) % PAGE_SIZE) {
if (pci_map_cont(sg, start, i, sg+out, pages, if (pci_map_cont(sg, start, i, sg+out, pages,
need) < 0) need) < 0)
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/kallsyms.h>
#include <asm/mtrr.h> #include <asm/mtrr.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
...@@ -197,6 +198,12 @@ static __init void parse_cmdline_early (char ** cmdline_p) ...@@ -197,6 +198,12 @@ static __init void parse_cmdline_early (char ** cmdline_p)
if (!memcmp(from, "acpi=off", 8)) if (!memcmp(from, "acpi=off", 8))
acpi_disabled = 1; acpi_disabled = 1;
if (!memcmp(from, "acpi=force", 10)) {
/* add later when we do DMI horrors: */
/* acpi_force = 1; */
acpi_disabled = 0;
}
if (!memcmp(from, "disableapic", 11)) if (!memcmp(from, "disableapic", 11))
disable_apic = 1; disable_apic = 1;
...@@ -240,6 +247,71 @@ static void __init contig_initmem_init(void) ...@@ -240,6 +247,71 @@ static void __init contig_initmem_init(void)
} }
#endif #endif
/* Use inline assembly to define this because the nops are defined
as inline assembly strings in the include files and we cannot
get them easily into strings. */
asm("\t.data\nk8nops: "
K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
K8_NOP7 K8_NOP8);
extern unsigned char k8nops[];
static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
NULL,
k8nops,
k8nops + 1,
k8nops + 1 + 2,
k8nops + 1 + 2 + 3,
k8nops + 1 + 2 + 3 + 4,
k8nops + 1 + 2 + 3 + 4 + 5,
k8nops + 1 + 2 + 3 + 4 + 5 + 6,
k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
/* Replace instructions with better alternatives for this CPU type.
This runs before SMP is initialized to avoid SMP problems with
self modifying code. This implies that assymetric systems where
APs have less capabilities than the boot processor are not handled.
In this case boot with "noreplacement". */
void apply_alternatives(void *start, void *end)
{
struct alt_instr *a;
int diff, i, k;
for (a = start; (void *)a < end; a++) {
if (!boot_cpu_has(a->cpuid))
continue;
BUG_ON(a->replacementlen > a->instrlen);
__inline_memcpy(a->instr, a->replacement, a->replacementlen);
diff = a->instrlen - a->replacementlen;
/* Pad the rest with nops */
for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
k = diff;
if (k > ASM_NOP_MAX)
k = ASM_NOP_MAX;
__inline_memcpy(a->instr + i, k8_nops[k], k);
}
}
}
static int no_replacement __initdata = 0;
void __init alternative_instructions(void)
{
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
if (no_replacement)
return;
apply_alternatives(__alt_instructions, __alt_instructions_end);
}
static int __init noreplacement_setup(char *s)
{
no_replacement = 1;
return 0;
}
__setup("noreplacement", noreplacement_setup);
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
...@@ -382,7 +454,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -382,7 +454,7 @@ void __init setup_arch(char **cmdline_p)
/* Will likely break when you have unassigned resources with more /* Will likely break when you have unassigned resources with more
than 4GB memory and bridges that don't support more than 4GB. than 4GB memory and bridges that don't support more than 4GB.
Doing it properly would require to allocate GFP_DMA memory Doing it properly would require to use pci_alloc_consistent
in this case. */ in this case. */
low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff; low_mem_size = ((end_pfn << PAGE_SHIFT) + 0xfffff) & ~0xfffff;
if (low_mem_size > pci_mem_start) if (low_mem_size > pci_mem_start)
...@@ -455,11 +527,17 @@ static void __init display_cacheinfo(struct cpuinfo_x86 *c) ...@@ -455,11 +527,17 @@ static void __init display_cacheinfo(struct cpuinfo_x86 *c)
static int __init init_amd(struct cpuinfo_x86 *c) static int __init init_amd(struct cpuinfo_x86 *c)
{ {
int r; int r;
int level;
/* Bit 31 in normal CPUID used for nonstandard 3DNow ID; /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
clear_bit(0*32+31, &c->x86_capability); clear_bit(0*32+31, &c->x86_capability);
/* C-stepping K8? */
level = cpuid_eax(1);
if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
set_bit(X86_FEATURE_K8_C, &c->x86_capability);
r = get_model_name(c); r = get_model_name(c);
if (!r) { if (!r) {
switch (c->x86) { switch (c->x86) {
......
...@@ -679,8 +679,8 @@ void math_error(void *rip) ...@@ -679,8 +679,8 @@ void math_error(void *rip)
default: default:
break; break;
case 0x001: /* Invalid Op */ case 0x001: /* Invalid Op */
case 0x040: /* Stack Fault */ case 0x041: /* Stack Fault */
case 0x240: /* Stack Fault | Direction */ case 0x241: /* Stack Fault | Direction */
info.si_code = FPE_FLTINV; info.si_code = FPE_FLTINV;
break; break;
case 0x002: /* Denormalize */ case 0x002: /* Denormalize */
......
...@@ -106,6 +106,15 @@ SECTIONS ...@@ -106,6 +106,15 @@ SECTIONS
.con_initcall.init : { *(.con_initcall.init) } .con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .; __con_initcall_end = .;
SECURITY_INIT SECURITY_INIT
. = ALIGN(8);
__alt_instructions = .;
.altinstructions : { *(.altinstructions) }
__alt_instructions_end = .;
.altinstr_replacement : { *(.altinstr_replacement) }
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : { *(.exit.text) }
.exit.data : { *(.exit.data) }
. = ALIGN(4096); . = ALIGN(4096);
__initramfs_start = .; __initramfs_start = .;
.init.ramfs : { *(.init.ramfs) } .init.ramfs : { *(.init.ramfs) }
...@@ -127,8 +136,6 @@ SECTIONS ...@@ -127,8 +136,6 @@ SECTIONS
/* Sections to be discarded */ /* Sections to be discarded */
/DISCARD/ : { /DISCARD/ : {
*(.exit.data)
/* *(.exit.text) */
*(.exitcall.exit) *(.exitcall.exit)
*(.eh_frame) *(.eh_frame)
} }
......
/* /*
* Zero a page. * Zero a page.
* rdi page * rdi page
...@@ -24,4 +23,28 @@ clear_page: ...@@ -24,4 +23,28 @@ clear_page:
jnz .Lloop jnz .Lloop
nop nop
ret ret
clear_page_end:
/* C stepping K8 run faster using the string instructions.
It is also a lot simpler. Use this when possible */
#include <asm/cpufeature.h>
.section .altinstructions,"a"
.align 8
.quad clear_page
.quad clear_page_c
.byte X86_FEATURE_K8_C
.byte clear_page_end-clear_page
.byte clear_page_c_end-clear_page_c
.previous
.section .altinstr_replacement,"ax"
clear_page_c:
movl $4096/8,%ecx
xorl %eax,%eax
rep
stosq
ret
clear_page_c_end:
.previous
...@@ -89,3 +89,26 @@ copy_page: ...@@ -89,3 +89,26 @@ copy_page:
movq 2*8(%rsp),%r13 movq 2*8(%rsp),%r13
addq $3*8,%rsp addq $3*8,%rsp
ret ret
/* C stepping K8 run faster using the string copy instructions.
It is also a lot simpler. Use this when possible */
#include <asm/cpufeature.h>
.section .altinstructions,"a"
.align 8
.quad copy_page
.quad copy_page_c
.byte X86_FEATURE_K8_C
.byte copy_page_c_end-copy_page_c
.byte copy_page_c_end-copy_page_c
.previous
.section .altinstr_replacement,"ax"
copy_page_c:
movl $4096/8,%ecx
rep
movsq
ret
copy_page_c_end:
.previous
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/current.h> #include <asm/current.h>
#include <asm/offset.h> #include <asm/offset.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/cpufeature.h>
/* Standard copy_to_user with segment limit checking */ /* Standard copy_to_user with segment limit checking */
.globl copy_to_user .globl copy_to_user
...@@ -20,7 +21,23 @@ copy_to_user: ...@@ -20,7 +21,23 @@ copy_to_user:
jc bad_to_user jc bad_to_user
cmpq threadinfo_addr_limit(%rax),%rcx cmpq threadinfo_addr_limit(%rax),%rcx
jae bad_to_user jae bad_to_user
jmp copy_user_generic 2:
.byte 0xe9 /* 32bit jump */
.long .Lcug-1f
1:
.section .altinstr_replacement,"ax"
3: .byte 0xe9 /* replacement jmp with 8 bit immediate */
.long copy_user_generic_c-1b /* offset */
.previous
.section .altinstructions,"a"
.align 8
.quad 2b
.quad 3b
.byte X86_FEATURE_K8_C
.byte 5
.byte 5
.previous
/* Standard copy_from_user with segment limit checking */ /* Standard copy_from_user with segment limit checking */
.globl copy_from_user .globl copy_from_user
...@@ -46,6 +63,7 @@ bad_to_user: ...@@ -46,6 +63,7 @@ bad_to_user:
ret ret
.previous .previous
/* /*
* copy_user_generic - memory copy with exception handling. * copy_user_generic - memory copy with exception handling.
* *
...@@ -60,9 +78,22 @@ bad_to_user: ...@@ -60,9 +78,22 @@ bad_to_user:
.globl copy_user_generic .globl copy_user_generic
.p2align 4 .p2align 4
copy_user_generic: copy_user_generic:
/* Put the first cacheline into cache. This should handle .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */
the small movements in ioctls etc., but not penalize the bigger .byte 0x66,0x90
filesystem data copies too much. */ 1:
.section .altinstr_replacement,"ax"
2: .byte 0xe9 /* near jump with 32bit immediate */
.long copy_user_generic_c-1b /* offset */
.previous
.section .altinstructions,"a"
.align 8
.quad copy_user_generic
.quad 2b
.byte X86_FEATURE_K8_C
.byte 5
.byte 5
.previous
.Lcug:
pushq %rbx pushq %rbx
xorl %eax,%eax /*zero for the exception handler */ xorl %eax,%eax /*zero for the exception handler */
...@@ -232,3 +263,34 @@ copy_user_generic: ...@@ -232,3 +263,34 @@ copy_user_generic:
.Le_zero: .Le_zero:
movq %rdx,%rax movq %rdx,%rax
jmp .Lende jmp .Lende
/* C stepping K8 run faster using the string copy instructions.
This is also a lot simpler. Use them when possible.
Patch in jmps to this code instead of copying it fully
to avoid unwanted aliasing in the exception tables. */
/* rdi destination
* rsi source
* rdx count
*
* Output:
* eax uncopied bytes or 0 if successfull.
*/
copy_user_generic_c:
movl %edx,%ecx
shrl $3,%ecx
andl $7,%edx
1: rep
movsq
movl %edx,%ecx
2: rep
movsb
4: movl %ecx,%eax
ret
3: lea (%rdx,%rcx,8),%rax
ret
.section __ex_table,"a"
.quad 1b,3b
.quad 2b,4b
.previous
/* Copyright 2002 Andi Kleen */ /* Copyright 2002 Andi Kleen */
#include <asm/cpufeature.h>
/* /*
* memcpy - Copy a memory block. * memcpy - Copy a memory block.
* *
...@@ -86,4 +87,35 @@ memcpy: ...@@ -86,4 +87,35 @@ memcpy:
.Lende: .Lende:
popq %rbx popq %rbx
ret ret
.Lfinal:
/* C stepping K8 run faster using the string copy instructions.
It is also a lot simpler. Use this when possible */
.section .altinstructions,"a"
.align 8
.quad memcpy
.quad memcpy_c
.byte X86_FEATURE_K8_C
.byte .Lfinal-memcpy
.byte memcpy_c_end-memcpy_c
.previous
.section .altinstr_replacement,"ax"
/* rdi destination
* rsi source
* rdx count
*/
memcpy_c:
movq %rdi,%rax
movl %edx,%ecx
shrl $3,%ecx
andl $7,%edx
rep
movsq
movl %edx,%ecx
rep
movsb
ret
memcpy_c_end:
.previous
/* Copyright 2002 Andi Kleen, SuSE Labs */ /* Copyright 2002 Andi Kleen, SuSE Labs */
/* /*
* ISO C memset - set a memory block to a byte value. * ISO C memset - set a memory block to a byte value.
* *
...@@ -85,3 +84,42 @@ __memset: ...@@ -85,3 +84,42 @@ __memset:
addq %r8,%rdi addq %r8,%rdi
subq %r8,%r11 subq %r8,%r11
jmp .Lafter_bad_alignment jmp .Lafter_bad_alignment
/* C stepping K8 run faster using the string instructions.
It is also a lot simpler. Use this when possible */
#include <asm/cpufeature.h>
.section .altinstructions,"a"
.align 8
.quad memset
.quad memset_c
.byte X86_FEATURE_K8_C
.byte memset_c_end-memset_c
.byte memset_c_end-memset_c
.previous
.section .altinstr_replacement,"ax"
/* rdi destination
* rsi value
* rdx count
*/
memset_c:
movq %rdi,%r9
movl %edx,%r8d
andl $7,%r8d
movl %edx,%ecx
shrl $3,%ecx
/* expand byte value */
movzbl %sil,%esi
movabs $0x0101010101010101,%rax
mul %esi /* with rax, clobbers rdx */
rep
stosq
movl %r8d,%ecx
rep
stosb
movq %r9,%rax
ret
memset_c_end:
.previous
...@@ -68,7 +68,7 @@ unsigned long __clear_user(void *addr, unsigned long size) ...@@ -68,7 +68,7 @@ unsigned long __clear_user(void *addr, unsigned long size)
asm volatile( asm volatile(
" testq %[size8],%[size8]\n" " testq %[size8],%[size8]\n"
" jz 4f\n" " jz 4f\n"
"0: movnti %[zero],(%[dst])\n" "0: movq %[zero],(%[dst])\n"
" addq %[eight],%[dst]\n" " addq %[eight],%[dst]\n"
" decl %%ecx ; jnz 0b\n" " decl %%ecx ; jnz 0b\n"
"4: movq %[size1],%%rcx\n" "4: movq %[size1],%%rcx\n"
...@@ -77,7 +77,7 @@ unsigned long __clear_user(void *addr, unsigned long size) ...@@ -77,7 +77,7 @@ unsigned long __clear_user(void *addr, unsigned long size)
"1: movb %b[zero],(%[dst])\n" "1: movb %b[zero],(%[dst])\n"
" incq %[dst]\n" " incq %[dst]\n"
" decl %%ecx ; jnz 1b\n" " decl %%ecx ; jnz 1b\n"
"2: sfence\n" "2:\n"
".section .fixup,\"ax\"\n" ".section .fixup,\"ax\"\n"
"3: lea 0(%[size1],%[size8],8),%[size8]\n" "3: lea 0(%[size1],%[size8],8),%[size8]\n"
" jmp 2b\n" " jmp 2b\n"
......
...@@ -60,9 +60,13 @@ ...@@ -60,9 +60,13 @@
_IOWR('#', 0x14, struct video1394_mmap) _IOWR('#', 0x14, struct video1394_mmap)
#define VIDEO1394_IOC_UNTALK_CHANNEL \ #define VIDEO1394_IOC_UNTALK_CHANNEL \
_IOW ('#', 0x15, int) _IOW ('#', 0x15, int)
/*
* This one is broken: it really wanted
* "sizeof (struct video1394_wait) + sizeof (struct video1394_queue_variable)"
* but got just a "size_t"
*/
#define VIDEO1394_IOC_TALK_QUEUE_BUFFER \ #define VIDEO1394_IOC_TALK_QUEUE_BUFFER \
_IOW ('#', 0x16, sizeof (struct video1394_wait) + \ _IOW ('#', 0x16, size_t)
sizeof (struct video1394_queue_variable))
#define VIDEO1394_IOC_TALK_WAIT_BUFFER \ #define VIDEO1394_IOC_TALK_WAIT_BUFFER \
_IOW ('#', 0x17, struct video1394_wait) _IOW ('#', 0x17, struct video1394_wait)
#define VIDEO1394_IOC_LISTEN_POLL_BUFFER \ #define VIDEO1394_IOC_LISTEN_POLL_BUFFER \
......
...@@ -366,7 +366,7 @@ source "drivers/scsi/aic7xxx/Kconfig.aic79xx" ...@@ -366,7 +366,7 @@ source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
# All the I2O code and drivers do not seem to be 64bit safe. # All the I2O code and drivers do not seem to be 64bit safe.
config SCSI_DPT_I2O config SCSI_DPT_I2O
tristate "Adaptec I2O RAID support " tristate "Adaptec I2O RAID support "
depends on !X86_64 && SCSI && BROKEN depends on !64BIT && SCSI && BROKEN
help help
This driver supports all of Adaptec's I2O based RAID controllers as This driver supports all of Adaptec's I2O based RAID controllers as
well as the DPT SmartRaid V cards. This is an Adaptec maintained well as the DPT SmartRaid V cards. This is an Adaptec maintained
......
...@@ -163,27 +163,27 @@ struct expansion_card { ...@@ -163,27 +163,27 @@ struct expansion_card {
struct resource resource[ECARD_NUM_RESOURCES]; struct resource resource[ECARD_NUM_RESOURCES];
/* Public data */ /* Public data */
volatile unsigned char *irqaddr; /* address of IRQ register */ volatile unsigned char *irqaddr; /* address of IRQ register */
volatile unsigned char *fiqaddr; /* address of FIQ register */ volatile unsigned char *fiqaddr; /* address of FIQ register */
unsigned char irqmask; /* IRQ mask */ unsigned char irqmask; /* IRQ mask */
unsigned char fiqmask; /* FIQ mask */ unsigned char fiqmask; /* FIQ mask */
unsigned char claimed; /* Card claimed? */ unsigned char claimed; /* Card claimed? */
void *irq_data; /* Data for use for IRQ by card */ void *irq_data; /* Data for use for IRQ by card */
void *fiq_data; /* Data for use for FIQ by card */ void *fiq_data; /* Data for use for FIQ by card */
const expansioncard_ops_t *ops; /* Enable/Disable Ops for card */ const expansioncard_ops_t *ops; /* Enable/Disable Ops for card */
CONST unsigned int slot_no; /* Slot number */ CONST unsigned int slot_no; /* Slot number */
CONST unsigned int dma; /* DMA number (for request_dma) */ CONST unsigned int dma; /* DMA number (for request_dma) */
CONST unsigned int irq; /* IRQ number (for request_irq) */ CONST unsigned int irq; /* IRQ number (for request_irq) */
CONST unsigned int fiq; /* FIQ number (for request_irq) */ CONST unsigned int fiq; /* FIQ number (for request_irq) */
CONST card_type_t type; /* Type of card */ CONST card_type_t type; /* Type of card */
CONST struct in_ecid cid; /* Card Identification */ CONST struct in_ecid cid; /* Card Identification */
/* Private internal data */ /* Private internal data */
const char *card_desc; /* Card description */ const char *card_desc; /* Card description */
CONST unsigned int podaddr; /* Base Linux address for card */ CONST unsigned int podaddr; /* Base Linux address for card */
CONST loader_t loader; /* loader program */ CONST loader_t loader; /* loader program */
u64 dma_mask; u64 dma_mask;
}; };
......
//FIXME - nicked from arm32 - check it is correct...
#include <asm-generic/local.h>
//FIXME - nicked from arm32 - check its correct.
#include <asm-generic/sections.h>
...@@ -19,8 +19,10 @@ ...@@ -19,8 +19,10 @@
/* /*
* These are the values used to represent the user `fs' and the kernel `ds' * These are the values used to represent the user `fs' and the kernel `ds'
* FIXME - the KERNEL_DS should end at 0x03000000 but we want to access ROM at
* 0x03400000. ideally we want to forbid access to the IO space inbetween.
*/ */
#define KERNEL_DS 0x03000000 #define KERNEL_DS 0x03FFFFFF
#define USER_DS 0x02000000 #define USER_DS 0x02000000
extern uaccess_t uaccess_user, uaccess_kernel; extern uaccess_t uaccess_user, uaccess_kernel;
...@@ -28,7 +30,7 @@ extern uaccess_t uaccess_user, uaccess_kernel; ...@@ -28,7 +30,7 @@ extern uaccess_t uaccess_user, uaccess_kernel;
static inline void set_fs (mm_segment_t fs) static inline void set_fs (mm_segment_t fs)
{ {
current_thread_info()->addr_limit = fs; current_thread_info()->addr_limit = fs;
current->thread.uaccess = fs == USER_DS ? &uaccess_user : &uaccess_kernel; current->thread.uaccess = (fs == USER_DS ? &uaccess_user : &uaccess_kernel);
} }
#define __range_ok(addr,size) ({ \ #define __range_ok(addr,size) ({ \
......
...@@ -195,7 +195,7 @@ __asm__ __volatile__(LOCK "andl %0,%1" \ ...@@ -195,7 +195,7 @@ __asm__ __volatile__(LOCK "andl %0,%1" \
#define atomic_set_mask(mask, addr) \ #define atomic_set_mask(mask, addr) \
__asm__ __volatile__(LOCK "orl %0,%1" \ __asm__ __volatile__(LOCK "orl %0,%1" \
: : "r" ((unsigned)mask),"m" (*addr) : "memory") : : "r" ((unsigned)mask),"m" (*(addr)) : "memory")
/* Atomic operations are already serializing on x86 */ /* Atomic operations are already serializing on x86 */
#define smp_mb__before_atomic_dec() barrier() #define smp_mb__before_atomic_dec() barrier()
......
...@@ -270,7 +270,7 @@ static __inline__ int variable_test_bit(int nr, volatile const void * addr) ...@@ -270,7 +270,7 @@ static __inline__ int variable_test_bit(int nr, volatile const void * addr)
* Returns the bit-number of the first zero bit, not the number of the byte * Returns the bit-number of the first zero bit, not the number of the byte
* containing a bit. * containing a bit.
*/ */
static __inline__ int find_first_zero_bit(void * addr, unsigned size) static __inline__ int find_first_zero_bit(const unsigned long * addr, unsigned size)
{ {
int d0, d1, d2; int d0, d1, d2;
int res; int res;
...@@ -299,7 +299,7 @@ static __inline__ int find_first_zero_bit(void * addr, unsigned size) ...@@ -299,7 +299,7 @@ static __inline__ int find_first_zero_bit(void * addr, unsigned size)
* @offset: The bitnumber to start searching at * @offset: The bitnumber to start searching at
* @size: The maximum size to search * @size: The maximum size to search
*/ */
static __inline__ int find_next_zero_bit (void * addr, int size, int offset) static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset)
{ {
unsigned long * p = ((unsigned long *) addr) + (offset >> 6); unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
unsigned long set = 0; unsigned long set = 0;
...@@ -321,7 +321,7 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset) ...@@ -321,7 +321,7 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
/* /*
* No zero yet, search remaining full words for a zero * No zero yet, search remaining full words for a zero
*/ */
res = find_first_zero_bit (p, size - 64 * (p - (unsigned long *) addr)); res = find_first_zero_bit ((const unsigned long *)p, size - 64 * (p - (unsigned long *) addr));
return (offset + set + res); return (offset + set + res);
} }
...@@ -334,7 +334,7 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset) ...@@ -334,7 +334,7 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
* Returns the bit-number of the first set bit, not the number of the byte * Returns the bit-number of the first set bit, not the number of the byte
* containing a bit. * containing a bit.
*/ */
static __inline__ int find_first_bit(void * addr, unsigned size) static __inline__ int find_first_bit(const unsigned long * addr, unsigned size)
{ {
int d0, d1; int d0, d1;
int res; int res;
...@@ -361,7 +361,7 @@ static __inline__ int find_first_bit(void * addr, unsigned size) ...@@ -361,7 +361,7 @@ static __inline__ int find_first_bit(void * addr, unsigned size)
* @offset: The bitnumber to start searching at * @offset: The bitnumber to start searching at
* @size: The maximum size to search * @size: The maximum size to search
*/ */
static __inline__ int find_next_bit(void * addr, int size, int offset) static __inline__ int find_next_bit(const unsigned long * addr, int size, int offset)
{ {
unsigned int * p = ((unsigned int *) addr) + (offset >> 5); unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
int set = 0, bit = offset & 31, res; int set = 0, bit = offset & 31, res;
...@@ -382,7 +382,7 @@ static __inline__ int find_next_bit(void * addr, int size, int offset) ...@@ -382,7 +382,7 @@ static __inline__ int find_next_bit(void * addr, int size, int offset)
/* /*
* No set bit yet, search remaining full words for a bit * No set bit yet, search remaining full words for a bit
*/ */
res = find_first_bit (p, size - 32 * (p - (unsigned int *) addr)); res = find_first_bit ((const unsigned long *)p, size - 32 * (p - (unsigned int *) addr));
return (offset + set + res); return (offset + set + res);
} }
...@@ -442,7 +442,7 @@ static __inline__ unsigned long __ffs(unsigned long word) ...@@ -442,7 +442,7 @@ static __inline__ unsigned long __ffs(unsigned long word)
#ifdef __KERNEL__ #ifdef __KERNEL__
static inline int sched_find_first_bit(unsigned long *b) static inline int sched_find_first_bit(const unsigned long *b)
{ {
if (b[0]) if (b[0])
return __ffs(b[0]); return __ffs(b[0]);
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/pda.h> #include <asm/pda.h>
extern void alternative_instructions(void);
static void __init check_bugs(void) static void __init check_bugs(void)
{ {
identify_cpu(&boot_cpu_data); identify_cpu(&boot_cpu_data);
...@@ -23,4 +25,5 @@ static void __init check_bugs(void) ...@@ -23,4 +25,5 @@ static void __init check_bugs(void)
printk("CPU: "); printk("CPU: ");
print_cpu_info(&boot_cpu_data); print_cpu_info(&boot_cpu_data);
#endif #endif
alternative_instructions();
} }
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
#define X86_FEATURE_K8_C (3*32+ 4) /* C stepping K8 */
#define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability)
#define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability)
......
...@@ -277,6 +277,7 @@ ...@@ -277,6 +277,7 @@
#define __NR_ia32_fstatfs64 269 #define __NR_ia32_fstatfs64 269
#define __NR_ia32_tgkill 270 #define __NR_ia32_tgkill 270
#define __NR_ia32_utimes 271 #define __NR_ia32_utimes 271
#define __NR_ia32_fadvise64_64 272
#define IA32_NR_syscalls 275 /* must be > than biggest syscall! */ #define IA32_NR_syscalls 275 /* must be > than biggest syscall! */
......
...@@ -11,16 +11,15 @@ struct x8664_pda { ...@@ -11,16 +11,15 @@ struct x8664_pda {
struct task_struct *pcurrent; /* Current process */ struct task_struct *pcurrent; /* Current process */
unsigned long data_offset; /* Per cpu data offset from linker address */ unsigned long data_offset; /* Per cpu data offset from linker address */
struct x8664_pda *me; /* Pointer to itself */ struct x8664_pda *me; /* Pointer to itself */
unsigned long kernelstack; /* TOS for current process */ unsigned long kernelstack; /* top of kernel stack for current */
unsigned long oldrsp; /* user rsp for system call */ unsigned long oldrsp; /* user rsp for system call */
unsigned long irqrsp; /* Old rsp for interrupts. */ unsigned long irqrsp; /* Old rsp for interrupts. */
int irqcount; /* Irq nesting counter. Starts with -1 */ int irqcount; /* Irq nesting counter. Starts with -1 */
int cpunumber; /* Logical CPU number */ int cpunumber; /* Logical CPU number */
char *irqstackptr; /* top of irqstack */ char *irqstackptr; /* top of irqstack */
unsigned long volatile *level4_pgt; unsigned long volatile *level4_pgt; /* Per CPU top level page table */
unsigned int __softirq_pending; unsigned int __softirq_pending;
unsigned int __nmi_count; /* arch dependent */ unsigned int __nmi_count; /* number of NMI on this CPUs */
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
struct mm_struct *active_mm; struct mm_struct *active_mm;
int mmu_state; int mmu_state;
unsigned apic_timer_irqs; unsigned apic_timer_irqs;
......
...@@ -380,4 +380,25 @@ static inline void prefetchw(void *x) ...@@ -380,4 +380,25 @@ static inline void prefetchw(void *x)
ti->task; \ ti->task; \
}) })
#define ASM_NOP1 K8_NOP1
#define ASM_NOP2 K8_NOP2
#define ASM_NOP3 K8_NOP3
#define ASM_NOP4 K8_NOP4
#define ASM_NOP5 K8_NOP5
#define ASM_NOP6 K8_NOP6
#define ASM_NOP7 K8_NOP7
#define ASM_NOP8 K8_NOP8
/* Opteron nops */
#define K8_NOP1 ".byte 0x90\n"
#define K8_NOP2 ".byte 0x66,0x90\n"
#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
#define K8_NOP5 K8_NOP3 K8_NOP2
#define K8_NOP6 K8_NOP3 K8_NOP3
#define K8_NOP7 K8_NOP4 K8_NOP3
#define K8_NOP8 K8_NOP4 K8_NOP4
#define ASM_NOP_MAX 8
#endif /* __ASM_X86_64_PROCESSOR_H */ #endif /* __ASM_X86_64_PROCESSOR_H */
...@@ -126,6 +126,17 @@ extern void load_gs_index(unsigned); ...@@ -126,6 +126,17 @@ extern void load_gs_index(unsigned);
:"r" ((unsigned long) value)) :"r" ((unsigned long) value))
#ifdef __KERNEL__
struct alt_instr {
__u8 *instr; /* original instruction */
__u8 *replacement;
__u8 cpuid; /* cpuid bit set for replacement */
__u8 instrlen; /* length of original instruction */
__u8 replacementlen; /* length of new instruction, <= instrlen */
__u8 pad[5];
};
#endif
/* /*
* Clear and set 'TS' bit respectively * Clear and set 'TS' bit respectively
*/ */
......
...@@ -60,7 +60,7 @@ struct thread_info { ...@@ -60,7 +60,7 @@ struct thread_info {
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
struct thread_info *ti; struct thread_info *ti;
ti = (void *)read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE; ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
return ti; return ti;
} }
......
...@@ -256,12 +256,12 @@ static inline int __copy_from_user(void *dst, const void *src, unsigned size) ...@@ -256,12 +256,12 @@ static inline int __copy_from_user(void *dst, const void *src, unsigned size)
case 10: case 10:
__get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",16); __get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",16);
if (ret) return ret; if (ret) return ret;
__get_user_asm(*(u16*)(8+dst),(u16*)(8+src),ret,"w","w","=r",2); __get_user_asm(*(u16*)(8+(char*)dst),(u16*)(8+(char*)src),ret,"w","w","=r",2);
return ret; return ret;
case 16: case 16:
__get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",16); __get_user_asm(*(u64*)dst,(u64*)src,ret,"q","","=r",16);
if (ret) return ret; if (ret) return ret;
__get_user_asm(*(u64*)(8+dst),(u64*)(8+src),ret,"q","","=r",8); __get_user_asm(*(u64*)(8+(char*)dst),(u64*)(8+(char*)src),ret,"q","","=r",8);
return ret; return ret;
default: default:
return copy_user_generic(dst,src,size); return copy_user_generic(dst,src,size);
......
...@@ -36,7 +36,7 @@ typedef struct capi_register_params { /* CAPI_REGISTER */ ...@@ -36,7 +36,7 @@ typedef struct capi_register_params { /* CAPI_REGISTER */
#define CAPI_MANUFACTURER_LEN 64 #define CAPI_MANUFACTURER_LEN 64
#define CAPI_GET_MANUFACTURER _IOWR('C',0x06,CAPI_MANUFACTURER_LEN) #define CAPI_GET_MANUFACTURER _IOWR('C',0x06,int) /* broken: wanted size 64 (CAPI_MANUFACTURER_LEN) */
/* /*
* CAPI_GET_VERSION * CAPI_GET_VERSION
...@@ -56,7 +56,7 @@ typedef struct capi_version { ...@@ -56,7 +56,7 @@ typedef struct capi_version {
*/ */
#define CAPI_SERIAL_LEN 8 #define CAPI_SERIAL_LEN 8
#define CAPI_GET_SERIAL _IOWR('C',0x08, CAPI_SERIAL_LEN) #define CAPI_GET_SERIAL _IOWR('C',0x08,int) /* broken: wanted size 8 (CAPI_SERIAL_LEN) */
/* /*
* CAPI_GET_PROFILE * CAPI_GET_PROFILE
......
...@@ -20,8 +20,8 @@ ...@@ -20,8 +20,8 @@
#define I8K_PROC "/proc/i8k" #define I8K_PROC "/proc/i8k"
#define I8K_PROC_FMT "1.0" #define I8K_PROC_FMT "1.0"
#define I8K_BIOS_VERSION _IOR ('i', 0x80, 4) #define I8K_BIOS_VERSION _IOR ('i', 0x80, int) /* broken: meant 4 bytes */
#define I8K_MACHINE_ID _IOR ('i', 0x81, 16) #define I8K_MACHINE_ID _IOR ('i', 0x81, int) /* broken: meant 16 bytes */
#define I8K_POWER_STATUS _IOR ('i', 0x82, size_t) #define I8K_POWER_STATUS _IOR ('i', 0x82, size_t)
#define I8K_FN_STATUS _IOR ('i', 0x83, size_t) #define I8K_FN_STATUS _IOR ('i', 0x83, size_t)
#define I8K_GET_TEMP _IOR ('i', 0x84, size_t) #define I8K_GET_TEMP _IOR ('i', 0x84, size_t)
......
...@@ -65,6 +65,7 @@ ...@@ -65,6 +65,7 @@
#define PT_TRACE_EXIT 0x00000200 #define PT_TRACE_EXIT 0x00000200
#define PT_TRACE_MASK 0x000003f4 #define PT_TRACE_MASK 0x000003f4
#define PT_SINGLESTEP 0x80000000 /* single stepping (used on ARM) */
#include <linux/compiler.h> /* For unlikely. */ #include <linux/compiler.h> /* For unlikely. */
#include <linux/sched.h> /* For struct task_struct. */ #include <linux/sched.h> /* For struct task_struct. */
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#define TOSH_PROC "/proc/toshiba" #define TOSH_PROC "/proc/toshiba"
#define TOSH_DEVICE "/dev/toshiba" #define TOSH_DEVICE "/dev/toshiba"
#define TOSH_SMM _IOWR('t', 0x90, 24) #define TOSH_SMM _IOWR('t', 0x90, int) /* broken: meant 24 bytes */
typedef struct { typedef struct {
unsigned int eax; unsigned int eax;
......
...@@ -145,9 +145,16 @@ struct video_info { ...@@ -145,9 +145,16 @@ struct video_info {
/* If changing this, vgatypes.h must also be changed (for X driver) */ /* If changing this, vgatypes.h must also be changed (for X driver) */
/* TW: ioctl for identifying and giving some info (esp. memory heap start) */ /* TW: ioctl for identifying and giving some info (esp. memory heap start) */
#define SISFB_GET_INFO _IOR('n',0xF8,sizeof(__u32))
#define SISFB_GET_VBRSTATUS _IOR('n',0xF9,sizeof(__u32)) /*
* NOTE! The ioctl types used to be "size_t" by mistake, but were
* really meant to be __u32. Changed to "__u32" even though that
* changes the value on 64-bit architectures, because the value
* (with a 4-byte size) is also hardwired in vgatypes.h for user
* space exports. So "__u32" is actually more compatible, duh!
*/
#define SISFB_GET_INFO _IOR('n',0xF8,__u32)
#define SISFB_GET_VBRSTATUS _IOR('n',0xF9,__u32)
/* TW: Structure argument for SISFB_GET_INFO ioctl */ /* TW: Structure argument for SISFB_GET_INFO ioctl */
typedef struct _SISFB_INFO sisfb_info, *psisfb_info; typedef struct _SISFB_INFO sisfb_info, *psisfb_info;
......
...@@ -60,12 +60,11 @@ char modprobe_path[256] = "/sbin/modprobe"; ...@@ -60,12 +60,11 @@ char modprobe_path[256] = "/sbin/modprobe";
*/ */
int request_module(const char *fmt, ...) int request_module(const char *fmt, ...)
{ {
#define MODULENAME_SIZE 32
va_list args; va_list args;
char module_name[MODULENAME_SIZE]; char module_name[MODULE_NAME_LEN];
unsigned int max_modprobes; unsigned int max_modprobes;
int ret; int ret;
char *argv[] = { modprobe_path, "--", module_name, NULL }; char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
static char *envp[] = { "HOME=/", static char *envp[] = { "HOME=/",
"TERM=linux", "TERM=linux",
"PATH=/sbin:/usr/sbin:/bin:/usr/bin", "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
...@@ -75,9 +74,9 @@ int request_module(const char *fmt, ...) ...@@ -75,9 +74,9 @@ int request_module(const char *fmt, ...)
static int kmod_loop_msg; static int kmod_loop_msg;
va_start(args, fmt); va_start(args, fmt);
ret = vsnprintf(module_name, MODULENAME_SIZE, fmt, args); ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
va_end(args); va_end(args);
if (ret >= MODULENAME_SIZE) if (ret >= MODULE_NAME_LEN)
return -ENAMETOOLONG; return -ENAMETOOLONG;
/* If modprobe needs a service that is in a module, we get a recursive /* If modprobe needs a service that is in a module, we get a recursive
......
...@@ -1211,7 +1211,8 @@ static void layout_sections(struct module *mod, ...@@ -1211,7 +1211,8 @@ static void layout_sections(struct module *mod,
if ((s->sh_flags & masks[m][0]) != masks[m][0] if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1]) || (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL || s->sh_entsize != ~0UL
|| strstr(secstrings + s->sh_name, ".init")) || strncmp(secstrings + s->sh_name,
".init", 5) == 0)
continue; continue;
s->sh_entsize = get_offset(&mod->core_size, s); s->sh_entsize = get_offset(&mod->core_size, s);
DEBUGP("\t%s\n", secstrings + s->sh_name); DEBUGP("\t%s\n", secstrings + s->sh_name);
...@@ -1228,7 +1229,8 @@ static void layout_sections(struct module *mod, ...@@ -1228,7 +1229,8 @@ static void layout_sections(struct module *mod,
if ((s->sh_flags & masks[m][0]) != masks[m][0] if ((s->sh_flags & masks[m][0]) != masks[m][0]
|| (s->sh_flags & masks[m][1]) || (s->sh_flags & masks[m][1])
|| s->sh_entsize != ~0UL || s->sh_entsize != ~0UL
|| !strstr(secstrings + s->sh_name, ".init")) || strncmp(secstrings + s->sh_name,
".init", 5) != 0)
continue; continue;
s->sh_entsize = (get_offset(&mod->init_size, s) s->sh_entsize = (get_offset(&mod->init_size, s)
| INIT_OFFSET_MASK); | INIT_OFFSET_MASK);
...@@ -1434,7 +1436,7 @@ static struct module *load_module(void __user *umod, ...@@ -1434,7 +1436,7 @@ static struct module *load_module(void __user *umod,
} }
#ifndef CONFIG_MODULE_UNLOAD #ifndef CONFIG_MODULE_UNLOAD
/* Don't load .exit sections */ /* Don't load .exit sections */
if (strstr(secstrings+sechdrs[i].sh_name, ".exit")) if (strncmp(secstrings+sechdrs[i].sh_name, ".exit", 5) == 0)
sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC; sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC;
#endif #endif
} }
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/security.h> #include <linux/security.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h> #include <linux/swapops.h>
spinlock_t swaplock = SPIN_LOCK_UNLOCKED; spinlock_t swaplock = SPIN_LOCK_UNLOCKED;
......
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment