Commit a32073bf authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86_64: Clean and enhance up K8 northbridge access code

 - Factor out the duplicated access/cache code into a single file
   * Shared between i386/x86-64.
 - Share flush code between AGP and IOMMU
   * Fix a bug: AGP didn't wait for end of flush before
 - Drop 8 northbridges limit and allocate dynamically
 - Add lock to serialize AGP and IOMMU GART flushes
 - Add PCI ID for next AMD northbridge
 - Random related cleanups

The old K8 NUMA discovery code is unchanged. New systems
should all use SRAT for this.

Cc: "Navin Boppuri" <navin.boppuri@newisys.com>
Cc: Dave Jones <davej@redhat.com>
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 7c2d9cd2
...@@ -1054,6 +1054,10 @@ config SCx200 ...@@ -1054,6 +1054,10 @@ config SCx200
This support is also available as a module. If compiled as a This support is also available as a module. If compiled as a
module, it will be called scx200. module, it will be called scx200.
config K8_NB
def_bool y
depends on AGP_AMD64
source "drivers/pcmcia/Kconfig" source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig" source "drivers/pci/hotplug/Kconfig"
......
...@@ -37,6 +37,7 @@ obj-$(CONFIG_EFI) += efi.o efi_stub.o ...@@ -37,6 +37,7 @@ obj-$(CONFIG_EFI) += efi.o efi_stub.o
obj-$(CONFIG_DOUBLEFAULT) += doublefault.o obj-$(CONFIG_DOUBLEFAULT) += doublefault.o
obj-$(CONFIG_VM86) += vm86.o obj-$(CONFIG_VM86) += vm86.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_K8_NB) += k8.o
EXTRA_AFLAGS := -traditional EXTRA_AFLAGS := -traditional
...@@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r ...@@ -76,3 +77,6 @@ SYSCFLAGS_vsyscall-syms.o = -r
$(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \ $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
$(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
$(call if_changed,syscall) $(call if_changed,syscall)
k8-y += ../../x86_64/kernel/k8.o
...@@ -501,6 +501,10 @@ config REORDER ...@@ -501,6 +501,10 @@ config REORDER
optimal TLB usage. If you have pretty much any version of binutils, optimal TLB usage. If you have pretty much any version of binutils,
this can increase your kernel build time by roughly one minute. this can increase your kernel build time by roughly one minute.
config K8_NB
def_bool y
depends on AGP_AMD64 || GART_IOMMU || (PCI && NUMA)
endmenu endmenu
# #
......
...@@ -33,6 +33,7 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o ...@@ -33,6 +33,7 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_KPROBES) += kprobes.o obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o obj-$(CONFIG_X86_PM_TIMER) += pmtimer.o
obj-$(CONFIG_X86_VSMP) += vsmp.o obj-$(CONFIG_X86_VSMP) += vsmp.o
obj-$(CONFIG_K8_NB) += k8.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/k8.h>
int iommu_aperture; int iommu_aperture;
int iommu_aperture_disabled __initdata = 0; int iommu_aperture_disabled __initdata = 0;
...@@ -37,8 +38,6 @@ int fix_aperture __initdata = 1; ...@@ -37,8 +38,6 @@ int fix_aperture __initdata = 1;
/* This code runs before the PCI subsystem is initialized, so just /* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */ access the northbridge directly. */
#define NB_ID_3 (PCI_VENDOR_ID_AMD | (0x1103<<16))
static u32 __init allocate_aperture(void) static u32 __init allocate_aperture(void)
{ {
pg_data_t *nd0 = NODE_DATA(0); pg_data_t *nd0 = NODE_DATA(0);
...@@ -68,20 +67,20 @@ static u32 __init allocate_aperture(void) ...@@ -68,20 +67,20 @@ static u32 __init allocate_aperture(void)
return (u32)__pa(p); return (u32)__pa(p);
} }
static int __init aperture_valid(char *name, u64 aper_base, u32 aper_size) static int __init aperture_valid(u64 aper_base, u32 aper_size)
{ {
if (!aper_base) if (!aper_base)
return 0; return 0;
if (aper_size < 64*1024*1024) { if (aper_size < 64*1024*1024) {
printk("Aperture from %s too small (%d MB)\n", name, aper_size>>20); printk("Aperture too small (%d MB)\n", aper_size>>20);
return 0; return 0;
} }
if (aper_base + aper_size >= 0xffffffff) { if (aper_base + aper_size >= 0xffffffff) {
printk("Aperture from %s beyond 4GB. Ignoring.\n",name); printk("Aperture beyond 4GB. Ignoring.\n");
return 0; return 0;
} }
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) { if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
printk("Aperture from %s pointing to e820 RAM. Ignoring.\n",name); printk("Aperture pointing to e820 RAM. Ignoring.\n");
return 0; return 0;
} }
return 1; return 1;
...@@ -140,7 +139,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order) ...@@ -140,7 +139,7 @@ static __u32 __init read_agp(int num, int slot, int func, int cap, u32 *order)
printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n", printk("Aperture from AGP @ %Lx size %u MB (APSIZE %x)\n",
aper, 32 << *order, apsizereg); aper, 32 << *order, apsizereg);
if (!aperture_valid("AGP bridge", aper, (32*1024*1024) << *order)) if (!aperture_valid(aper, (32*1024*1024) << *order))
return 0; return 0;
return (u32)aper; return (u32)aper;
} }
...@@ -208,9 +207,8 @@ void __init iommu_hole_init(void) ...@@ -208,9 +207,8 @@ void __init iommu_hole_init(void)
fix = 0; fix = 0;
for (num = 24; num < 32; num++) { for (num = 24; num < 32; num++) {
char name[30]; if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) continue;
continue;
iommu_aperture = 1; iommu_aperture = 1;
...@@ -222,9 +220,7 @@ void __init iommu_hole_init(void) ...@@ -222,9 +220,7 @@ void __init iommu_hole_init(void)
printk("CPU %d: aperture @ %Lx size %u MB\n", num-24, printk("CPU %d: aperture @ %Lx size %u MB\n", num-24,
aper_base, aper_size>>20); aper_base, aper_size>>20);
sprintf(name, "northbridge cpu %d", num-24); if (!aperture_valid(aper_base, aper_size)) {
if (!aperture_valid(name, aper_base, aper_size)) {
fix = 1; fix = 1;
break; break;
} }
...@@ -273,7 +269,7 @@ void __init iommu_hole_init(void) ...@@ -273,7 +269,7 @@ void __init iommu_hole_init(void)
/* Fix up the north bridges */ /* Fix up the north bridges */
for (num = 24; num < 32; num++) { for (num = 24; num < 32; num++) {
if (read_pci_config(0, num, 3, 0x00) != NB_ID_3) if (!early_is_k8_nb(read_pci_config(0, num, 3, 0x00)))
continue; continue;
/* Don't enable translation yet. That is done later. /* Don't enable translation yet. That is done later.
......
/*
* Shared support code for AMD K8 northbridges and derivates.
* Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
*/
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/k8.h>
int num_k8_northbridges;
EXPORT_SYMBOL(num_k8_northbridges);
static u32 *flush_words;
struct pci_device_id k8_nb_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
{}
};
EXPORT_SYMBOL(k8_nb_ids);
struct pci_dev **k8_northbridges;
EXPORT_SYMBOL(k8_northbridges);
static struct pci_dev *next_k8_northbridge(struct pci_dev *dev)
{
do {
dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
if (!dev)
break;
} while (!pci_match_id(&k8_nb_ids[0], dev));
return dev;
}
int cache_k8_northbridges(void)
{
int i;
struct pci_dev *dev;
if (num_k8_northbridges)
return 0;
num_k8_northbridges = 0;
dev = NULL;
while ((dev = next_k8_northbridge(dev)) != NULL)
num_k8_northbridges++;
k8_northbridges = kmalloc((num_k8_northbridges + 1) * sizeof(void *),
GFP_KERNEL);
if (!k8_northbridges)
return -ENOMEM;
flush_words = kmalloc(num_k8_northbridges * sizeof(u32), GFP_KERNEL);
if (!flush_words) {
kfree(k8_northbridges);
return -ENOMEM;
}
dev = NULL;
i = 0;
while ((dev = next_k8_northbridge(dev)) != NULL) {
k8_northbridges[i++] = dev;
pci_read_config_dword(dev, 0x9c, &flush_words[i]);
}
k8_northbridges[i] = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(cache_k8_northbridges);
/* Ignores subdevice/subvendor but as far as I can figure out
they're useless anyways */
int __init early_is_k8_nb(u32 device)
{
struct pci_device_id *id;
u32 vendor = device & 0xffff;
device >>= 16;
for (id = k8_nb_ids; id->vendor; id++)
if (vendor == id->vendor && device == id->device)
return 1;
return 0;
}
void k8_flush_garts(void)
{
int flushed, i;
unsigned long flags;
static DEFINE_SPINLOCK(gart_lock);
/* Avoid races between AGP and IOMMU. In theory it's not needed
but I'm not sure if the hardware won't lose flush requests
when another is pending. This whole thing is so expensive anyways
that it doesn't matter to serialize more. -AK */
spin_lock_irqsave(&gart_lock, flags);
flushed = 0;
for (i = 0; i < num_k8_northbridges; i++) {
pci_write_config_dword(k8_northbridges[i], 0x9c,
flush_words[i]|1);
flushed++;
}
for (i = 0; i < num_k8_northbridges; i++) {
u32 w;
/* Make sure the hardware actually executed the flush*/
for (;;) {
pci_read_config_dword(k8_northbridges[i],
0x9c, &w);
if (!(w & 1))
break;
cpu_relax();
}
}
spin_unlock_irqrestore(&gart_lock, flags);
if (!flushed)
printk("nothing to flush?\n");
}
EXPORT_SYMBOL_GPL(k8_flush_garts);
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <asm/kdebug.h> #include <asm/kdebug.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/k8.h>
unsigned long iommu_bus_base; /* GART remapping area (physical) */ unsigned long iommu_bus_base; /* GART remapping area (physical) */
static unsigned long iommu_size; /* size of remapping area bytes */ static unsigned long iommu_size; /* size of remapping area bytes */
...@@ -46,8 +47,6 @@ u32 *iommu_gatt_base; /* Remapping table */ ...@@ -46,8 +47,6 @@ u32 *iommu_gatt_base; /* Remapping table */
also seen with Qlogic at least). */ also seen with Qlogic at least). */
int iommu_fullflush = 1; int iommu_fullflush = 1;
#define MAX_NB 8
/* Allocation bitmap for the remapping area */ /* Allocation bitmap for the remapping area */
static DEFINE_SPINLOCK(iommu_bitmap_lock); static DEFINE_SPINLOCK(iommu_bitmap_lock);
static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */ static unsigned long *iommu_gart_bitmap; /* guarded by iommu_bitmap_lock */
...@@ -63,13 +62,6 @@ static u32 gart_unmapped_entry; ...@@ -63,13 +62,6 @@ static u32 gart_unmapped_entry;
#define to_pages(addr,size) \ #define to_pages(addr,size) \
(round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
#define for_all_nb(dev) \
dev = NULL; \
while ((dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, dev))!=NULL)
static struct pci_dev *northbridges[MAX_NB];
static u32 northbridge_flush_word[MAX_NB];
#define EMERGENCY_PAGES 32 /* = 128KB */ #define EMERGENCY_PAGES 32 /* = 128KB */
#ifdef CONFIG_AGP #ifdef CONFIG_AGP
...@@ -120,44 +112,17 @@ static void free_iommu(unsigned long offset, int size) ...@@ -120,44 +112,17 @@ static void free_iommu(unsigned long offset, int size)
/* /*
* Use global flush state to avoid races with multiple flushers. * Use global flush state to avoid races with multiple flushers.
*/ */
static void flush_gart(struct device *dev) static void flush_gart(void)
{ {
unsigned long flags; unsigned long flags;
int flushed = 0;
int i, max;
spin_lock_irqsave(&iommu_bitmap_lock, flags); spin_lock_irqsave(&iommu_bitmap_lock, flags);
if (need_flush) { if (need_flush) {
max = 0; k8_flush_garts();
for (i = 0; i < MAX_NB; i++) {
if (!northbridges[i])
continue;
pci_write_config_dword(northbridges[i], 0x9c,
northbridge_flush_word[i] | 1);
flushed++;
max = i;
}
for (i = 0; i <= max; i++) {
u32 w;
if (!northbridges[i])
continue;
/* Make sure the hardware actually executed the flush. */
for (;;) {
pci_read_config_dword(northbridges[i], 0x9c, &w);
if (!(w & 1))
break;
cpu_relax();
}
}
if (!flushed)
printk("nothing to flush?\n");
need_flush = 0; need_flush = 0;
} }
spin_unlock_irqrestore(&iommu_bitmap_lock, flags); spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
} }
#ifdef CONFIG_IOMMU_LEAK #ifdef CONFIG_IOMMU_LEAK
#define SET_LEAK(x) if (iommu_leak_tab) \ #define SET_LEAK(x) if (iommu_leak_tab) \
...@@ -266,7 +231,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf, ...@@ -266,7 +231,7 @@ static dma_addr_t gart_map_simple(struct device *dev, char *buf,
size_t size, int dir) size_t size, int dir)
{ {
dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir); dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
flush_gart(dev); flush_gart();
return map; return map;
} }
...@@ -351,7 +316,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, ...@@ -351,7 +316,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
s->dma_address = addr; s->dma_address = addr;
s->dma_length = s->length; s->dma_length = s->length;
} }
flush_gart(dev); flush_gart();
return nents; return nents;
} }
...@@ -458,13 +423,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) ...@@ -458,13 +423,13 @@ int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0) if (dma_map_cont(sg, start, i, sg+out, pages, need) < 0)
goto error; goto error;
out++; out++;
flush_gart(dev); flush_gart();
if (out < nents) if (out < nents)
sg[out].dma_length = 0; sg[out].dma_length = 0;
return out; return out;
error: error:
flush_gart(NULL); flush_gart();
gart_unmap_sg(dev, sg, nents, dir); gart_unmap_sg(dev, sg, nents, dir);
/* When it was forced or merged try again in a dumb way */ /* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) { if (force_iommu || iommu_merge) {
...@@ -532,10 +497,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info) ...@@ -532,10 +497,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
void *gatt; void *gatt;
unsigned aper_base, new_aper_base; unsigned aper_base, new_aper_base;
unsigned aper_size, gatt_size, new_aper_size; unsigned aper_size, gatt_size, new_aper_size;
int i;
printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
aper_size = aper_base = info->aper_size = 0; aper_size = aper_base = info->aper_size = 0;
for_all_nb(dev) { dev = NULL;
for (i = 0; i < num_k8_northbridges; i++) {
dev = k8_northbridges[i];
new_aper_base = read_aperture(dev, &new_aper_size); new_aper_base = read_aperture(dev, &new_aper_size);
if (!new_aper_base) if (!new_aper_base)
goto nommu; goto nommu;
...@@ -558,11 +526,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info) ...@@ -558,11 +526,12 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
panic("Cannot allocate GATT table"); panic("Cannot allocate GATT table");
memset(gatt, 0, gatt_size); memset(gatt, 0, gatt_size);
agp_gatt_table = gatt; agp_gatt_table = gatt;
for_all_nb(dev) { for (i = 0; i < num_k8_northbridges; i++) {
u32 ctl; u32 ctl;
u32 gatt_reg; u32 gatt_reg;
dev = k8_northbridges[i];
gatt_reg = __pa(gatt) >> 12; gatt_reg = __pa(gatt) >> 12;
gatt_reg <<= 4; gatt_reg <<= 4;
pci_write_config_dword(dev, 0x98, gatt_reg); pci_write_config_dword(dev, 0x98, gatt_reg);
...@@ -573,7 +542,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info) ...@@ -573,7 +542,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
pci_write_config_dword(dev, 0x90, ctl); pci_write_config_dword(dev, 0x90, ctl);
} }
flush_gart(NULL); flush_gart();
printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10); printk("PCI-DMA: aperture base @ %x size %u KB\n",aper_base, aper_size>>10);
return 0; return 0;
...@@ -607,10 +576,14 @@ static int __init pci_iommu_init(void) ...@@ -607,10 +576,14 @@ static int __init pci_iommu_init(void)
struct agp_kern_info info; struct agp_kern_info info;
unsigned long aper_size; unsigned long aper_size;
unsigned long iommu_start; unsigned long iommu_start;
struct pci_dev *dev;
unsigned long scratch; unsigned long scratch;
long i; long i;
if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
return -1;
}
#ifndef CONFIG_AGP_AMD64 #ifndef CONFIG_AGP_AMD64
no_agp = 1; no_agp = 1;
#else #else
...@@ -637,14 +610,6 @@ static int __init pci_iommu_init(void) ...@@ -637,14 +610,6 @@ static int __init pci_iommu_init(void)
return -1; return -1;
} }
i = 0;
for_all_nb(dev)
i++;
if (i > MAX_NB) {
printk(KERN_ERR "PCI-GART: Too many northbridges (%ld). Disabled\n", i);
return -1;
}
printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
aper_size = info.aper_size * 1024 * 1024; aper_size = info.aper_size * 1024 * 1024;
iommu_size = check_iommu_size(info.aper_base, aper_size); iommu_size = check_iommu_size(info.aper_base, aper_size);
...@@ -707,20 +672,8 @@ static int __init pci_iommu_init(void) ...@@ -707,20 +672,8 @@ static int __init pci_iommu_init(void)
for (i = EMERGENCY_PAGES; i < iommu_pages; i++) for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
iommu_gatt_base[i] = gart_unmapped_entry; iommu_gatt_base[i] = gart_unmapped_entry;
for_all_nb(dev) { flush_gart();
u32 flag;
int cpu = PCI_SLOT(dev->devfn) - 24;
if (cpu >= MAX_NB)
continue;
northbridges[cpu] = dev;
pci_read_config_dword(dev, 0x9c, &flag); /* cache flush word */
northbridge_flush_word[cpu] = flag;
}
flush_gart(NULL);
dma_ops = &gart_dma_ops; dma_ops = &gart_dma_ops;
return 0; return 0;
} }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <asm/mpspec.h> #include <asm/mpspec.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/k8.h>
/* /*
* This discovers the pcibus <-> node mapping on AMD K8. * This discovers the pcibus <-> node mapping on AMD K8.
...@@ -18,7 +19,6 @@ ...@@ -18,7 +19,6 @@
#define NR_LDT_BUS_NUMBER_REGISTERS 3 #define NR_LDT_BUS_NUMBER_REGISTERS 3
#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF) #define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF) #define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
/** /**
* fill_mp_bus_to_cpumask() * fill_mp_bus_to_cpumask()
...@@ -28,8 +28,7 @@ ...@@ -28,8 +28,7 @@
__init static int __init static int
fill_mp_bus_to_cpumask(void) fill_mp_bus_to_cpumask(void)
{ {
struct pci_dev *nb_dev = NULL; int i, j, k;
int i, j;
u32 ldtbus, nid; u32 ldtbus, nid;
static int lbnr[3] = { static int lbnr[3] = {
LDT_BUS_NUMBER_REGISTER_0, LDT_BUS_NUMBER_REGISTER_0,
...@@ -37,8 +36,9 @@ fill_mp_bus_to_cpumask(void) ...@@ -37,8 +36,9 @@ fill_mp_bus_to_cpumask(void)
LDT_BUS_NUMBER_REGISTER_2 LDT_BUS_NUMBER_REGISTER_2
}; };
while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, cache_k8_northbridges();
PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) { for (k = 0; k < num_k8_northbridges; k++) {
struct pci_dev *nb_dev = k8_northbridges[k];
pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid); pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) { for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
......
...@@ -15,11 +15,9 @@ ...@@ -15,11 +15,9 @@
#include <linux/agp_backend.h> #include <linux/agp_backend.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <asm/page.h> /* PAGE_SIZE */ #include <asm/page.h> /* PAGE_SIZE */
#include <asm/k8.h>
#include "agp.h" #include "agp.h"
/* Will need to be increased if AMD64 ever goes >8-way. */
#define MAX_HAMMER_GARTS 8
/* PTE bits. */ /* PTE bits. */
#define GPTE_VALID 1 #define GPTE_VALID 1
#define GPTE_COHERENT 2 #define GPTE_COHERENT 2
...@@ -53,28 +51,12 @@ ...@@ -53,28 +51,12 @@
#define ULI_X86_64_HTT_FEA_REG 0x50 #define ULI_X86_64_HTT_FEA_REG 0x50
#define ULI_X86_64_ENU_SCR_REG 0x54 #define ULI_X86_64_ENU_SCR_REG 0x54
static int nr_garts;
static struct pci_dev * hammers[MAX_HAMMER_GARTS];
static struct resource *aperture_resource; static struct resource *aperture_resource;
static int __initdata agp_try_unsupported = 1; static int __initdata agp_try_unsupported = 1;
#define for_each_nb() for(gart_iterator=0;gart_iterator<nr_garts;gart_iterator++)
static void flush_amd64_tlb(struct pci_dev *dev)
{
u32 tmp;
pci_read_config_dword (dev, AMD64_GARTCACHECTL, &tmp);
tmp |= INVGART;
pci_write_config_dword (dev, AMD64_GARTCACHECTL, tmp);
}
static void amd64_tlbflush(struct agp_memory *temp) static void amd64_tlbflush(struct agp_memory *temp)
{ {
int gart_iterator; k8_flush_garts();
for_each_nb()
flush_amd64_tlb(hammers[gart_iterator]);
} }
static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type) static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
...@@ -153,7 +135,7 @@ static int amd64_fetch_size(void) ...@@ -153,7 +135,7 @@ static int amd64_fetch_size(void)
u32 temp; u32 temp;
struct aper_size_info_32 *values; struct aper_size_info_32 *values;
dev = hammers[0]; dev = k8_northbridges[0];
if (dev==NULL) if (dev==NULL)
return 0; return 0;
...@@ -201,9 +183,6 @@ static u64 amd64_configure (struct pci_dev *hammer, u64 gatt_table) ...@@ -201,9 +183,6 @@ static u64 amd64_configure (struct pci_dev *hammer, u64 gatt_table)
tmp &= ~(DISGARTCPU | DISGARTIO); tmp &= ~(DISGARTCPU | DISGARTIO);
pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp); pci_write_config_dword(hammer, AMD64_GARTAPERTURECTL, tmp);
/* keep CPU's coherent. */
flush_amd64_tlb (hammer);
return aper_base; return aper_base;
} }
...@@ -222,13 +201,14 @@ static struct aper_size_info_32 amd_8151_sizes[7] = ...@@ -222,13 +201,14 @@ static struct aper_size_info_32 amd_8151_sizes[7] =
static int amd_8151_configure(void) static int amd_8151_configure(void)
{ {
unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real); unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
int gart_iterator; int i;
/* Configure AGP regs in each x86-64 host bridge. */ /* Configure AGP regs in each x86-64 host bridge. */
for_each_nb() { for (i = 0; i < num_k8_northbridges; i++) {
agp_bridge->gart_bus_addr = agp_bridge->gart_bus_addr =
amd64_configure(hammers[gart_iterator],gatt_bus); amd64_configure(k8_northbridges[i], gatt_bus);
} }
k8_flush_garts();
return 0; return 0;
} }
...@@ -236,12 +216,13 @@ static int amd_8151_configure(void) ...@@ -236,12 +216,13 @@ static int amd_8151_configure(void)
static void amd64_cleanup(void) static void amd64_cleanup(void)
{ {
u32 tmp; u32 tmp;
int gart_iterator; int i;
for_each_nb() { for (i = 0; i < num_k8_northbridges; i++) {
struct pci_dev *dev = k8_northbridges[i];
/* disable gart translation */ /* disable gart translation */
pci_read_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, &tmp); pci_read_config_dword (dev, AMD64_GARTAPERTURECTL, &tmp);
tmp &= ~AMD64_GARTEN; tmp &= ~AMD64_GARTEN;
pci_write_config_dword (hammers[gart_iterator], AMD64_GARTAPERTURECTL, tmp); pci_write_config_dword (dev, AMD64_GARTAPERTURECTL, tmp);
} }
} }
...@@ -361,17 +342,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, ...@@ -361,17 +342,15 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp,
static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
{ {
struct pci_dev *loop_dev = NULL; int i;
int i = 0;
if (cache_k8_northbridges() < 0)
/* cache pci_devs of northbridges. */ return -ENODEV;
while ((loop_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1103, loop_dev))
!= NULL) { i = 0;
if (i == MAX_HAMMER_GARTS) { for (i = 0; i < num_k8_northbridges; i++) {
printk(KERN_ERR PFX "Too many northbridges for AGP\n"); struct pci_dev *dev = k8_northbridges[i];
return -1; if (fix_northbridge(dev, pdev, cap_ptr) < 0) {
}
if (fix_northbridge(loop_dev, pdev, cap_ptr) < 0) {
printk(KERN_ERR PFX "No usable aperture found.\n"); printk(KERN_ERR PFX "No usable aperture found.\n");
#ifdef __x86_64__ #ifdef __x86_64__
/* should port this to i386 */ /* should port this to i386 */
...@@ -379,10 +358,8 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) ...@@ -379,10 +358,8 @@ static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr)
#endif #endif
return -1; return -1;
} }
hammers[i++] = loop_dev;
} }
nr_garts = i; return 0;
return i == 0 ? -1 : 0;
} }
/* Handle AMD 8151 quirks */ /* Handle AMD 8151 quirks */
...@@ -450,7 +427,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) ...@@ -450,7 +427,7 @@ static int __devinit uli_agp_init(struct pci_dev *pdev)
} }
/* shadow x86-64 registers into ULi registers */ /* shadow x86-64 registers into ULi registers */
pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &httfea); pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea);
/* if x86-64 aperture base is beyond 4G, exit here */ /* if x86-64 aperture base is beyond 4G, exit here */
if ((httfea & 0x7fff) >> (32 - 25)) if ((httfea & 0x7fff) >> (32 - 25))
...@@ -513,7 +490,7 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev) ...@@ -513,7 +490,7 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev)
pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp);
/* shadow x86-64 registers into NVIDIA registers */ /* shadow x86-64 registers into NVIDIA registers */
pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase); pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase);
/* if x86-64 aperture base is beyond 4G, exit here */ /* if x86-64 aperture base is beyond 4G, exit here */
if ( (apbase & 0x7fff) >> (32 - 25) ) { if ( (apbase & 0x7fff) >> (32 - 25) ) {
...@@ -754,10 +731,6 @@ static struct pci_driver agp_amd64_pci_driver = { ...@@ -754,10 +731,6 @@ static struct pci_driver agp_amd64_pci_driver = {
int __init agp_amd64_init(void) int __init agp_amd64_init(void)
{ {
int err = 0; int err = 0;
static struct pci_device_id amd64nb[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
{ },
};
if (agp_off) if (agp_off)
return -EINVAL; return -EINVAL;
...@@ -774,7 +747,7 @@ int __init agp_amd64_init(void) ...@@ -774,7 +747,7 @@ int __init agp_amd64_init(void)
} }
/* First check that we have at least one AMD64 NB */ /* First check that we have at least one AMD64 NB */
if (!pci_dev_present(amd64nb)) if (!pci_dev_present(k8_nb_ids))
return -ENODEV; return -ENODEV;
/* Look for any AGP bridge */ /* Look for any AGP bridge */
......
#include <asm-x86_64/k8.h>
#ifndef _ASM_K8_H
#define _ASM_K8_H 1
#include <linux/pci.h>
extern struct pci_device_id k8_nb_ids[];
extern int early_is_k8_nb(u32 value);
extern struct pci_dev **k8_northbridges;
extern int num_k8_northbridges;
extern int cache_k8_northbridges(void);
extern void k8_flush_garts(void);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment