Commit d05e5732 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86-64 updates for 2.5.44

A few updates for x86-64 in 2.5.44. Some of the bugs fixed were serious.

- Don't count ACPI mappings in end_pfn. This shrinks mem_map a lot
  on many setups.
- Fix mem= option. Remove custom mapping support.
- Revert per_cpu implementation to the generic version. The optimized one
  that used %gs directly triggered too many toolkit problems and was an
  constant source of bugs.
- Make sure pgd_offset_k works correctly for vmalloc mappings. This makes
  modules work again properly.
- Export pci dma symbols
- Export other symbols to make more modules work
- Don't drop physical address bits >32bit on iommu free.
- Add more prototypes to fix warnings
- Resync pci subsystem with i386
- Fix pci dma kernel option parsing.
- Do PCI peer bus scanning after ACPI in case it missed some busses
  (that's a workaround - 2.5 ACPI seems to have some problems here that
  I need to investigate more closely)
- Remove the .eh_frame on linking. This saves several hundred KB in the
  bzImage
- Fix MTRR initialization. It works properly now on SMP again.
- Fix kernel option parsing, it was broken by section name changes in
  init.h
- A few other cleanups and fixes.
- Fix nonatomic warning in ioport.c
parent ecf2c214
......@@ -40,8 +40,10 @@ LDFLAGS_vmlinux := -e stext
CFLAGS += -mno-red-zone
CFLAGS += -mcmodel=kernel
CFLAGS += -pipe
# this makes reading assembly source easier
# this makes reading assembly source easier, but produces worse code
# disable for production kernel
CFLAGS += -fno-reorder-blocks
# should lower this a lot and see how much .text is saves
CFLAGS += -finline-limit=2000
#CFLAGS += -g
......
......@@ -4,7 +4,7 @@
EXTRA_TARGETS := head.o head64.o init_task.o
export-objs := mtrr.o x8664_ksyms.o pci-gart.o
export-objs := mtrr.o x8664_ksyms.o pci-gart.o pci-dma.o
obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
ptrace.o i8259.o ioport.o ldt.o setup.o time.o sys_x86_64.o \
......
......@@ -43,6 +43,7 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/io_apic.h>
#include <asm/proto.h>
extern int acpi_disabled;
......@@ -57,15 +58,6 @@ extern int acpi_disabled;
enum acpi_irq_model_id acpi_irq_model;
/*
* Use reserved fixmap pages for physical-to-virtual mappings of ACPI tables.
* Note that the same range is used for each table, so tables that need to
* persist should be memcpy'd.
*/
extern unsigned long end_pfn;
/* rely on all ACPI tables being in the direct mapping */
char *
__acpi_map_table (
......@@ -75,7 +67,7 @@ __acpi_map_table (
if (!phys_addr || !size)
return NULL;
if (phys_addr < (end_pfn << PAGE_SHIFT))
if (phys_addr < (end_pfn_map << PAGE_SHIFT))
return __va(phys_addr);
printk("acpi mapping beyond end_pfn: %lx > %lx\n", phys_addr, end_pfn<<PAGE_SHIFT);
......
......@@ -668,7 +668,7 @@ void __init init_apic_mappings(void)
apic_phys = mp_lapic_addr;
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
Dprintk("mapped APIC to %08lx (%08lx)\n", APIC_BASE, apic_phys);
Dprintk("mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys);
/*
* Fetch the APIC ID of the BSP in case we have a
......@@ -690,7 +690,7 @@ void __init init_apic_mappings(void)
ioapic_phys = __pa(ioapic_phys);
}
set_fixmap_nocache(idx, ioapic_phys);
Dprintk("mapped IOAPIC to %08lx (%08lx)\n",
Dprintk("mapped IOAPIC to %016lx (%016lx)\n",
__fix_to_virt(idx), ioapic_phys);
idx++;
}
......
......@@ -22,6 +22,18 @@
extern unsigned long table_start, table_end;
extern char _end[];
/*
* end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
* The direct mapping extends to end_pfn_map, so that we can directly access
* ACPI and other tables without having to play with fixmaps.
*/
unsigned long end_pfn_map;
/*
* Last pfn which the user wants to use.
*/
unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
extern struct resource code_resource, data_resource, vram_resource;
/* Check for some hardcoded bad areas that early boot is not allowed to touch */
......@@ -137,21 +149,32 @@ void __init e820_end_of_ram(void)
{
int i;
end_pfn = 0;
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
unsigned long start, end;
/* count all types of areas for now to map ACPI easily */
start = round_up(ei->addr, PAGE_SIZE);
end = round_down(ei->addr + ei->size, PAGE_SIZE);
if (start >= end)
continue;
if (ei->type == E820_RAM) {
if (end > end_pfn<<PAGE_SHIFT)
end_pfn = end>>PAGE_SHIFT;
} else {
if (end > end_pfn_map<<PAGE_SHIFT)
end_pfn_map = end>>PAGE_SHIFT;
}
}
if (end_pfn > MAXMEM >> PAGE_SHIFT)
end_pfn = MAXMEM >> PAGE_SHIFT;
if (end_pfn > end_pfn_map)
end_pfn_map = end_pfn;
if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
end_pfn_map = MAXMEM>>PAGE_SHIFT;
if (end_pfn > end_user_pfn)
end_pfn = end_user_pfn;
if (end_pfn > end_pfn_map)
end_pfn = end_pfn_map;
}
/*
......@@ -482,46 +505,23 @@ void __init setup_memory_region(void)
e820_print_map(who);
}
static int usermem __initdata;
void __init parse_memopt(char *p)
void __init parse_memopt(char *p, char **from)
{
if (!strncmp(p,"exactmap",8)) {
e820.nr_map = 0;
usermem = 1;
} else {
/* If the user specifies memory size, we
* blow away any automatically generated
* size
*/
unsigned long long start_at, mem_size;
if (usermem == 0) {
/* first time in: zap the whitelist
* and reinitialize it with the
* standard low-memory region.
/*
* mem=XXX[kKmM] limits kernel memory to XXX+1MB
*
* It would be more logical to count from 0 instead of from
* HIGH_MEMORY, but we keep that for now for i386 compatibility.
*
* No support for custom mapping like i386. The reason is
* that we need to read the e820 map anyways to handle the
* ACPI mappings in the direct map. Also on x86-64 there
* should be always a good e820 map. This is only an upper
* limit, you cannot force usage of memory not in e820.
*
* -AK
*/
e820.nr_map = 0;
usermem = 1;
add_memory_region(0, LOWMEMSIZE(), E820_RAM);
}
mem_size = memparse(p, &p);
if (*p == '@')
start_at = memparse(p+1, &p);
else {
start_at = HIGH_MEMORY;
mem_size -= HIGH_MEMORY;
usermem=0;
}
add_memory_region(start_at, mem_size, E820_RAM);
}
end_user_pfn = memparse(p, from) + HIGH_MEMORY;
end_user_pfn >>= PAGE_SHIFT;
}
void __init print_user_map(void)
{
if (usermem) {
printk(KERN_INFO "user-defined physical RAM map:\n");
e820_print_map("user");
}
}
......@@ -64,7 +64,6 @@ asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
if (turn_on && !capable(CAP_SYS_RAWIO))
return -EPERM;
tss = init_tss + get_cpu();
if (!t->io_bitmap_ptr) {
t->io_bitmap_ptr = kmalloc((IO_BITMAP_SIZE+1)*4, GFP_KERNEL);
if (!t->io_bitmap_ptr) {
......@@ -72,8 +71,11 @@ asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
goto out;
}
memset(t->io_bitmap_ptr,0xff,(IO_BITMAP_SIZE+1)*4);
tss = init_tss + get_cpu();
tss->io_map_base = IO_BITMAP_OFFSET;
put_cpu();
}
tss = init_tss + get_cpu();
/*
* do it in the per-thread copy and in the TSS ...
......
......@@ -28,6 +28,7 @@
v2.02 July 2002 Dave Jones <davej@suse.de>
Fix gentry inconsistencies between kernel/userspace.
More casts to clean up warnings.
Andi Kleen - rework initialization.
*/
#include <linux/types.h>
......@@ -609,17 +610,6 @@ int mtrr_add_page (u64 base, u32 size, unsigned int type, char increment)
return -EINVAL;
}
#if defined(__x86_64__) && defined(CONFIG_AGP)
/* {
agp_kern_info info;
if (type != MTRR_TYPE_UNCACHABLE && agp_copy_info(&info) >= 0 &&
base<<PAGE_SHIFT >= info.aper_base &&
(base<<PAGE_SHIFT)+(size<<PAGE_SHIFT) >=
info.aper_base+info.aper_size*1024*1024)
printk(KERN_INFO "%s[%d] setting conflicting mtrr into agp aperture\n",current->comm,current->pid);
}*/
#endif
/* Check upper bits of base and last are equal and lower bits are 0
for base and 1 for last */
last = base + size - 1;
......@@ -646,7 +636,7 @@ int mtrr_add_page (u64 base, u32 size, unsigned int type, char increment)
}
if (base & (size_or_mask>>PAGE_SHIFT)) {
printk (KERN_WARNING "mtrr: base(%Lx) exceeds the MTRR width(%Lx)\n",
printk (KERN_WARNING "mtrr: base(%lx) exceeds the MTRR width(%lx)\n",
(unsigned long) base,
(unsigned long) (size_or_mask>>PAGE_SHIFT));
return -EINVAL;
......@@ -1226,12 +1216,13 @@ static void compute_ascii (void)
EXPORT_SYMBOL (mtrr_add);
EXPORT_SYMBOL (mtrr_del);
static void __init mtrr_setup (void)
{
printk ("mtrr: v%s)\n", MTRR_VERSION);
/* If you want to use other vendors please port over the modular
framework from i386 first. */
if (!cpu_has_mtrr || boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
return;
if (cpu_has_mtrr) {
/* Query the width (in bits) of the physical
addressable memory on the Hammer family. */
if ((cpuid_eax (0x80000000) >= 0x80000008)) {
......@@ -1244,8 +1235,6 @@ static void __init mtrr_setup (void)
*/
size_and_mask = (~size_or_mask) & 0x000ffffffffff000L;
}
printk ("mtrr: detected mtrr type: x86-64\n");
}
}
#ifdef CONFIG_SMP
......@@ -1253,48 +1242,42 @@ static void __init mtrr_setup (void)
static volatile u32 smp_changes_mask __initdata = 0;
static struct mtrr_state smp_mtrr_state __initdata = { 0, 0 };
void __init mtrr_init_boot_cpu (void)
#endif /* CONFIG_SMP */
void mtrr_init_cpu(int cpu)
{
#ifndef CONFIG_SMP
if (cpu == 0)
mtrr_setup();
#else
if (cpu == 0) {
mtrr_setup();
get_mtrr_state (&smp_mtrr_state);
}
void __init mtrr_init_secondary_cpu (void)
{
} else {
u64 mask;
int count;
struct set_mtrr_context ctxt;
/* Note that this is not ideal, since the cache is only flushed/disabled
for this CPU while the MTRRs are changed, but changing this requires
more invasive changes to the way the kernel boots */
/* Note that this is not ideal, since the cache is
only flushed/disabled for this CPU while the MTRRs
are changed, but changing this requires more
invasive changes to the way the kernel boots */
set_mtrr_prepare (&ctxt);
mask = set_mtrr_state (&smp_mtrr_state, &ctxt);
set_mtrr_done (&ctxt);
/* Use the atomic bitops to update the global mask */
for (count = 0; count < sizeof mask * 8; ++count) {
if (mask & 0x01)
for (count = 0; count < (sizeof mask) * 8; ++count) {
if (mask & 1)
set_bit (count, &smp_changes_mask);
mask >>= 1;
}
}
#endif
}
#endif /* CONFIG_SMP */
int __init mtrr_init (void)
static int __init mtrr_init (void)
{
#ifdef CONFIG_SMP
/* mtrr_setup() should already have been called from mtrr_init_boot_cpu() */
finalize_mtrr_state (&smp_mtrr_state);
mtrr_state_warn (smp_changes_mask);
#else
mtrr_setup();
#endif
#ifdef CONFIG_PROC_FS
proc_root_mtrr = create_proc_entry ("mtrr", S_IWUSR | S_IRUGO, &proc_root);
if (proc_root_mtrr) {
......@@ -1308,5 +1291,13 @@ int __init mtrr_init (void)
&mtrr_fops, NULL);
#endif
init_table ();
#ifdef CONFIG_SMP
finalize_mtrr_state (&smp_mtrr_state);
mtrr_state_warn (smp_changes_mask);
#endif
return 0;
}
__initcall(mtrr_init);
......@@ -25,6 +25,8 @@
#include <asm/mpspec.h>
#include <asm/nmi.h>
extern void default_do_nmi(struct pt_regs *);
unsigned int nmi_watchdog = NMI_LOCAL_APIC;
static unsigned int nmi_hz = HZ;
unsigned int nmi_perfctr_msr; /* the MSR to reset in NMI handler */
......
......@@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <asm/io.h>
dma_addr_t bad_dma_address = -1UL;
......@@ -32,27 +33,24 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
BUG_ON(direction == PCI_DMA_NONE);
/*
*
*/
for (i = 0; i < nents; i++ ) {
struct scatterlist *s = &sg[i];
if (s->page) {
BUG_ON(!s->page);
s->dma_address = pci_map_page(hwdev, s->page, s->offset,
s->length, direction);
} else
BUG();
if (unlikely(s->dma_address == bad_dma_address))
goto error;
}
return nents;
error:
if (unlikely(s->dma_address == bad_dma_address)) {
pci_unmap_sg(hwdev, sg, i, direction);
return 0;
}
}
return nents;
}
EXPORT_SYMBOL(pci_map_sg);
/* Unmap a set of streaming mode DMA translations.
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
......@@ -68,3 +66,5 @@ void pci_unmap_sg(struct pci_dev *dev, struct scatterlist *sg,
pci_unmap_single(dev, s->dma_address, s->length, dir);
}
}
EXPORT_SYMBOL(pci_unmap_sg);
......@@ -62,7 +62,7 @@ static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
#define GPTE_VALID 1
#define GPTE_COHERENT 2
#define GPTE_ENCODE(x,flag) (((x) & 0xfffffff0) | ((x) >> 28) | GPTE_VALID | (flag))
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((x) & 0xff0) << 28))
#define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
#define for_all_nb(dev) \
pci_for_each_dev(dev) \
......@@ -524,7 +524,7 @@ void __init pci_iommu_init(void)
leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
memaper[=order] allocate an own aperture over RAM with size 32MB^order.
*/
__init int iommu_setup(char *opt)
__init int iommu_setup(char *opt, char **end)
{
int arg;
char *p = opt;
......@@ -551,10 +551,11 @@ __init int iommu_setup(char *opt)
if (isdigit(*p) && get_option(&p, &arg))
iommu_size = arg;
do {
if (*p == ' ' || *p == 0)
if (*p == ' ' || *p == 0) {
*end = p;
return 0;
}
} while (*p++ != ',');
}
return 1;
}
......@@ -37,6 +37,7 @@
#include <linux/console.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/pci.h>
#include <asm/mtrr.h>
#include <asm/uaccess.h>
#include <asm/system.h>
......@@ -196,11 +197,11 @@ static __init void parse_cmdline_early (char ** cmdline_p)
acpi_disabled = 1;
if (!memcmp(from, "mem=", 4))
parse_memopt(from+4);
parse_memopt(from+4, &from);
#ifdef CONFIG_GART_IOMMU
if (!memcmp(from,"iommu=",6)) {
iommu_setup(from+6);
iommu_setup(from+6, &from);
}
#endif
......@@ -214,7 +215,6 @@ static __init void parse_cmdline_early (char ** cmdline_p)
}
*to = '\0';
*cmdline_p = command_line;
print_user_map();
}
#ifndef CONFIG_DISCONTIGMEM
......
......@@ -21,6 +21,7 @@
#include <asm/smp.h>
#include <asm/i387.h>
#include <asm/percpu.h>
#include <asm/mtrr.h>
char x86_boot_params[2048] __initdata = {0,};
......
......@@ -238,7 +238,7 @@ get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (! sas_ss_flags(rsp) == 0)
if (sas_ss_flags(rsp) == 0)
rsp = current->sas_ss_sp + current->sas_ss_size;
}
......
......@@ -327,7 +327,7 @@ void __init smp_callin(void)
/*
* Must be done before calibration delay is computed
*/
mtrr_init_secondary_cpu ();
mtrr_init_cpu (cpuid);
#endif
/*
* Get our bogomips.
......@@ -784,10 +784,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
{
int apicid, cpu;
#ifdef CONFIG_MTRR
/* Must be done before other processors booted */
mtrr_init_boot_cpu ();
#endif
/*
* Initialize the logical to physical CPU number mapping
* and the per-CPU profiling counter/multiplier
......
......@@ -173,7 +173,7 @@ void show_trace(unsigned long *stack)
int i;
printk("\nCall Trace:");
i = 0;
i = 12;
estack_end = in_exception_stack(cpu, (unsigned long)stack);
if (estack_end) {
......
......@@ -28,6 +28,7 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/kdebug.h>
#include <asm/unistd.h>
extern spinlock_t rtc_lock;
......@@ -145,6 +146,7 @@ EXPORT_SYMBOL(rtc_lock);
#undef strcmp
#undef bcopy
#undef strcpy
#undef strcat
extern void * memset(void *,int,__kernel_size_t);
extern size_t strlen(const char *);
......@@ -153,6 +155,10 @@ extern void * memmove(void * dest,const void *src,size_t count);
extern char * strcpy(char * dest,const char *src);
extern int strcmp(const char * cs,const char * ct);
extern void *memchr(const void *s, int c, size_t n);
extern void * memcpy(void *,const void *,__kernel_size_t);
extern void * __memcpy(void *,const void *,__kernel_size_t);
extern char * strcat(char *, const char *);
EXPORT_SYMBOL_NOVERS(memset);
EXPORT_SYMBOL_NOVERS(strlen);
EXPORT_SYMBOL_NOVERS(memmove);
......@@ -168,6 +174,15 @@ EXPORT_SYMBOL_NOVERS(strrchr);
EXPORT_SYMBOL_NOVERS(strnlen);
EXPORT_SYMBOL_NOVERS(memscan);
EXPORT_SYMBOL_NOVERS(bcopy);
EXPORT_SYMBOL_NOVERS(memcpy);
EXPORT_SYMBOL_NOVERS(__memcpy);
/* syscall export needed for misdesigned sound drivers. */
extern ssize_t sys_read(unsigned int fd, char * buf, size_t count);
extern off_t sys_lseek(unsigned int fd, off_t offset, unsigned int origin);
EXPORT_SYMBOL(sys_read);
EXPORT_SYMBOL(sys_lseek);
EXPORT_SYMBOL(sys_open);
EXPORT_SYMBOL(empty_zero_page);
......
......@@ -36,6 +36,7 @@
#include <asm/apic.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
unsigned long start_pfn, end_pfn;
......@@ -171,7 +172,7 @@ static __init void *alloc_low_page(int *index, unsigned long *phys)
unsigned long pfn = start_pfn++, paddr;
void *adr;
if (pfn >= end_pfn)
if (pfn >= end_pfn_map)
panic("alloc_low_page: ran out of memory");
for (i = 0; temp_mappings[i].allocated; i++) {
if (!temp_mappings[i].pmd)
......@@ -239,7 +240,7 @@ void __init init_memory_mapping(void)
unsigned long end;
unsigned long next;
end = PAGE_OFFSET + (end_pfn * PAGE_SIZE);
end = PAGE_OFFSET + (end_pfn_map * PAGE_SIZE);
for (adr = PAGE_OFFSET; adr < end; adr = next) {
int map;
unsigned long pgd_phys;
......
......@@ -17,6 +17,8 @@ static int __init pci_acpi_init(void)
} else
printk(KERN_WARNING "PCI: Invalid ACPI-PCI IRQ routing table\n");
/* still scan manually in case ACPI forgot some bus */
pcibios_fixup_peer_bridges();
}
return 0;
......
......@@ -22,7 +22,7 @@
unsigned int pci_probe = PCI_PROBE_CONF1 | PCI_PROBE_CONF2;
int pcibios_last_bus = 0xfe; /* XXX */
int pcibios_last_bus = 0xff; /* XXX */
struct pci_bus *pci_root_bus = NULL;
struct pci_ops *pci_root_ops = NULL;
......
......@@ -296,7 +296,6 @@ static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
return 1;
}
#if 0 /* kept as reference */
/* Support for AMD756 PCI IRQ Routing
* Jhon H. Caicedo <jhcaiced@osso.org.co>
* Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
......@@ -304,8 +303,6 @@ static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, i
* The AMD756 pirq rules are nibble-based
* offset 0x56 0-3 PIRQA 4-7 PIRQB
* offset 0x57 0-3 PIRQC 4-7 PIRQD
*
* AMD8111 is similar NIY.
*/
static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
{
......@@ -315,14 +312,14 @@ static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq
{
irq = read_config_nybble(router, 0x56, pirq - 1);
}
printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
printk(KERN_INFO "AMD: dev %04x:%04x, router pirq : %d get irq : %2d\n",
dev->vendor, dev->device, pirq, irq);
return irq;
}
static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
{
printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
printk(KERN_INFO "AMD: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
dev->vendor, dev->device, pirq, irq);
if (pirq <= 4)
{
......@@ -330,13 +327,25 @@ static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq
}
return 1;
}
#endif
static struct irq_router pirq_routers[] = {
#if 0 /* all these do not exist on Hammer currently, but keep one example
for each. All these vendors have announced K8 chipsets, so we'll
eventually need a router for them. Luckily they tend to use the
same ones, so with luck just enabling the existing ones will work
when you know the final PCI ids. */
{ "ALI", PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, pirq_ali_get, pirq_ali_set },
{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, pirq_via_get, pirq_via_set },
{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, pirq_via_get, pirq_via_set },
{ "VIA", PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, pirq_via_get, pirq_via_set },
{ "SIS", PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, pirq_sis_get, pirq_sis_set },
#endif
{ "AMD756 VIPER", PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B,
pirq_amd756_get, pirq_amd756_set },
{ "default", 0, 0, NULL, NULL }
};
......@@ -348,14 +357,6 @@ static void __init pirq_find_router(void)
struct irq_routing_table *rt = pirq_table;
struct irq_router *r;
#ifdef CONFIG_PCI_BIOS
if (!rt->signature) {
printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
pirq_router = &pirq_bios_router;
return;
}
#endif
DBG("PCI: Attempting to find IRQ router for %04x:%04x\n",
rt->rtr_vendor, rt->rtr_device);
......@@ -528,38 +529,7 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
return 1;
}
static int __init pcibios_irq_init(void)
{
DBG("PCI: IRQ init\n");
if (pcibios_enable_irq)
return 0;
pirq_table = pirq_find_routing_table();
if (pirq_table) {
pirq_peer_trick();
pirq_find_router();
if (pirq_table->exclusive_irqs) {
int i;
for (i=0; i<16; i++)
if (!(pirq_table->exclusive_irqs & (1 << i)))
pirq_penalty[i] += 100;
}
/* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
if (io_apic_assign_pci_irqs)
pirq_table = NULL;
}
pcibios_enable_irq = pirq_enable_irq;
pcibios_fixup_irqs();
return 0;
}
subsys_initcall(pcibios_irq_init);
void __init pcibios_fixup_irqs(void)
static void __init pcibios_fixup_irqs(void)
{
struct pci_dev *dev;
u8 pin;
......@@ -625,6 +595,38 @@ void __init pcibios_fixup_irqs(void)
}
}
static int __init pcibios_irq_init(void)
{
DBG("PCI: IRQ init\n");
if (pcibios_enable_irq)
return 0;
pirq_table = pirq_find_routing_table();
if (pirq_table) {
pirq_peer_trick();
pirq_find_router();
if (pirq_table->exclusive_irqs) {
int i;
for (i=0; i<16; i++)
if (!(pirq_table->exclusive_irqs & (1 << i)))
pirq_penalty[i] += 100;
}
/* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
if (io_apic_assign_pci_irqs)
pirq_table = NULL;
}
pcibios_enable_irq = pirq_enable_irq;
pcibios_fixup_irqs();
return 0;
}
subsys_initcall(pcibios_irq_init);
void pcibios_penalize_isa_irq(int irq)
{
/*
......@@ -640,12 +642,8 @@ int pirq_enable_irq(struct pci_dev *dev)
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
char *msg;
if (io_apic_assign_pci_irqs)
msg = " Probably buggy MP table.";
else
msg = "";
printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
'A' + pin - 1, dev->slot_name, msg);
printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.\n",
'A' + pin - 1, dev->slot_name);
}
return 0;
......
......@@ -9,14 +9,14 @@
* Discover remaining PCI buses in case there are peer host bridges.
* We use the number of last PCI bus provided by the PCI BIOS.
*/
static void __devinit pcibios_fixup_peer_bridges(void)
void __devinit pcibios_fixup_peer_bridges(void)
{
int n;
struct pci_bus bus;
struct pci_dev dev;
u16 l;
if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff)
return;
DBG("PCI: Peer bridge fixup\n");
for (n=0; n <= pcibios_last_bus; n++) {
......
......@@ -71,3 +71,6 @@ void pcibios_fixup_irqs(void);
int pirq_enable_irq(struct pci_dev *dev);
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
/* legacy.c */
extern void pcibios_fixup_peer_bridges(void);
......@@ -81,11 +81,11 @@ SECTIONS
. = ALIGN(4096); /* Init code and data */
__init_begin = .;
.text.init : { *(.text.init) }
.data.init : { *(.data.init) }
.init.text : { *(.init.text) }
.init.data : { *(.init.data) }
. = ALIGN(16);
__setup_start = .;
.setup.init : { *(.setup.init) }
.init.setup : { *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : {
......@@ -109,8 +109,10 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
*(.data.exit)
*(.exit.data)
*(.exit.text)
*(.exitcall.exit)
*(.eh_frame)
}
/* DWARF 2 */
......
......@@ -54,8 +54,7 @@ extern int e820_mapped(unsigned long start, unsigned long end, int type);
extern void e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end);
extern void __init parse_memopt(char *p);
extern void __init print_user_map(void);
extern void __init parse_memopt(char *p, char **end);
extern struct e820map e820;
#endif/*!__ASSEMBLY__*/
......
......@@ -107,12 +107,7 @@ static __inline__ int mtrr_del_page (int reg, __u64 base, __u32 size)
}
#endif
/* The following functions are for initialisation: don't use them! */
extern int mtrr_init (void);
#if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
extern void mtrr_init_boot_cpu (void);
extern void mtrr_init_secondary_cpu (void);
#endif
extern void mtrr_init_cpu(int cpu);
#endif
......
......@@ -44,7 +44,7 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
struct pci_dev;
extern int iommu_setup(char *opt);
extern int iommu_setup(char *opt, char **end);
extern void pci_iommu_init(void);
......
#ifndef _ASM_X8664_PERCPU_H_
#define _ASM_X8664_PERCPU_H_
#include <linux/compiler.h>
#include <linux/config.h>
#ifndef __ARCH_I386_PERCPU__
#define __ARCH_I386_PERCPU__
#ifdef CONFIG_SMP
#include <asm-generic/percpu.h>
#include <asm/pda.h>
extern unsigned long __per_cpu_offset[NR_CPUS];
/* Separate out the type, so (int[3], foo) works. */
#ifndef MODULE
#define DEFINE_PER_CPU(type, name) \
__attribute__((__section__(".percpu"))) __typeof__(type) name##__per_cpu
#endif
/* Completely hide the relocation from the compiler to avoid problems with
the optimizer */
#define __per_cpu(offset,base) \
({ typeof(base) ptr = (void *)base; \
asm("addq %1,%0" : "=r" (ptr) : "r" (offset), "0" (ptr)); ptr; })
/* var is in discarded region: offset to particular copy we want */
#define per_cpu(var,cpu) (*__per_cpu(__per_cpu_offset[cpu], &var##__per_cpu))
#define __get_cpu_var(var) (*__per_cpu(read_pda(cpudata_offset), &var##__per_cpu))
#else /* ! SMP */
/* Can't define per-cpu variables in modules. Sorry --RR */
#ifndef MODULE
#define DEFINE_PER_CPU(type, name) \
__typeof__(type) name##__per_cpu
#endif
#define per_cpu(var, cpu) var##__per_cpu
#define __get_cpu_var(var) var##__per_cpu
#endif
#define DECLARE_PER_CPU(type, name) extern __typeof__(type) name##__per_cpu
extern void setup_per_cpu_areas(void);
#endif /* _ASM_X8664_PERCPU_H_ */
#endif /* __ARCH_I386_PERCPU__ */
......@@ -274,24 +274,40 @@ static inline int pmd_large(pmd_t pte) {
#define level3_offset_k(dir, address) ((pgd_t *) pml4_page(*(dir)) + pgd_index(address))
/* PGD - Level3 access */
#define __pgd_offset_k(pgd, address) ((pgd) + pgd_index(address))
/* to find an entry in a page-table-directory. */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define current_pgd_offset_k(address) \
__pgd_offset_k((pgd_t *)read_pda(level4_pgt), address)
static inline pgd_t *__pgd_offset_k(pgd_t *pgd, unsigned long address)
{
return pgd + pgd_index(address);
}
/* Find correct pgd via the hidden fourth level page level: */
/* This accesses the reference page table of the boot cpu.
Other CPUs get synced lazily via the page fault handler. */
static inline pgd_t *pgd_offset_k(unsigned long address)
{
pml4_t pml4;
unsigned long addr;
pml4 = init_level4_pgt[pml4_index(address)];
return __pgd_offset_k(__va(pml4_val(pml4) & PTE_MASK), address);
addr = pml4_val(init_level4_pgt[pml4_index(address)]);
addr &= PHYSICAL_PAGE_MASK;
return __pgd_offset_k((pgd_t *)__va(addr), address);
}
/* Access the pgd of the page table as seen by the current CPU. */
static inline pgd_t *current_pgd_offset_k(unsigned long address)
{
unsigned long addr;
addr = read_pda(level4_pgt)[pml4_index(address)];
addr &= PHYSICAL_PAGE_MASK;
return __pgd_offset_k((pgd_t *)__va(addr), address);
}
#if 0 /* disabled because of confusing/wrong naming. */
#define __pgd_offset(address) pgd_index(address)
#endif
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
/* PMD - Level 2 access */
......
......@@ -34,15 +34,13 @@ extern unsigned long numa_free_all_bootmem(void);
extern void reserve_bootmem_generic(unsigned long phys, unsigned len);
extern void free_bootmem_generic(unsigned long phys, unsigned len);
extern unsigned long start_pfn, end_pfn;
extern unsigned long start_pfn, end_pfn, end_pfn_map;
extern void show_stack(unsigned long * rsp);
extern void exception_table_check(void);
extern void acpi_boot_init(char *);
int iommu_setup(char *opt);
extern int acpi_boot_init(char *);
#define round_up(x,y) (((x) + (y) - 1) & ~((y)-1))
#define round_down(x,y) ((x) & ~((y)-1))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment