Commit 65f0361a authored by Dave Jones's avatar Dave Jones Committed by Linus Torvalds

[PATCH] Fix up agpgart.

Don't know what exactly happened, but somehow, the ZX1 support is
in your tree twice. This patch drops one of them, and also adds
in Intel i845 support to make up for it 8-)
parent 4147b6ca
...@@ -172,6 +172,12 @@ struct agp_bridge_data { ...@@ -172,6 +172,12 @@ struct agp_bridge_data {
#ifndef PCI_DEVICE_ID_INTEL_810_0 #ifndef PCI_DEVICE_ID_INTEL_810_0
#define PCI_DEVICE_ID_INTEL_810_0 0x7120 #define PCI_DEVICE_ID_INTEL_810_0 0x7120
#endif #endif
#ifndef PCI_DEVICE_ID_INTEL_845_G_0
#define PCI_DEVICE_ID_INTEL_845_G_0 0x2560
#endif
#ifndef PCI_DEVICE_ID_INTEL_845_G_1
#define PCI_DEVICE_ID_INTEL_845_G_1 0x2562
#endif
#ifndef PCI_DEVICE_ID_INTEL_830_M_0 #ifndef PCI_DEVICE_ID_INTEL_830_M_0
#define PCI_DEVICE_ID_INTEL_830_M_0 0x3575 #define PCI_DEVICE_ID_INTEL_830_M_0 0x3575
#endif #endif
......
...@@ -3273,368 +3273,6 @@ static void serverworks_agp_enable(u32 mode) ...@@ -3273,368 +3273,6 @@ static void serverworks_agp_enable(u32 mode)
* AGP devices and collect their data. * AGP devices and collect their data.
*/ */
#ifdef CONFIG_AGP_HP_ZX1
#ifndef log2
#define log2(x) ffz(~(x))
#endif
#define HP_ZX1_IOVA_BASE GB(1UL)
#define HP_ZX1_IOVA_SIZE GB(1UL)
#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> \
hp_private.io_tlb_shift)
static aper_size_info_fixed hp_zx1_sizes[] =
{
{0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
};
static gatt_mask hp_zx1_masks[] =
{
{HP_ZX1_PDIR_VALID_BIT, 0}
};
static struct _hp_private {
struct pci_dev *ioc;
volatile u8 *registers;
u64 *io_pdir; // PDIR for entire IOVA
u64 *gatt; // PDIR just for GART (subset of above)
u64 gatt_entries;
u64 iova_base;
u64 gart_base;
u64 gart_size;
u64 io_pdir_size;
int io_pdir_owner; // do we own it, or share it with sba_iommu?
int io_page_size;
int io_tlb_shift;
int io_tlb_ps; // IOC ps config
int io_pages_per_kpage;
} hp_private;
static int __init hp_zx1_ioc_shared(void)
{
struct _hp_private *hp = &hp_private;
printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
/*
* IOC already configured by sba_iommu module; just use
* its setup. We assume:
* - IOVA space is 1Gb in size
* - first 512Mb is IOMMU, second 512Mb is GART
*/
hp->io_tlb_ps = INREG64(hp->registers, HP_ZX1_TCNFG);
switch (hp->io_tlb_ps) {
case 0: hp->io_tlb_shift = 12; break;
case 1: hp->io_tlb_shift = 13; break;
case 2: hp->io_tlb_shift = 14; break;
case 3: hp->io_tlb_shift = 16; break;
default:
printk(KERN_ERR PFX "Invalid IOTLB page size "
"configuration 0x%x\n", hp->io_tlb_ps);
hp->gatt = 0;
hp->gatt_entries = 0;
return -ENODEV;
}
hp->io_page_size = 1 << hp->io_tlb_shift;
hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
hp->iova_base = INREG64(hp->registers, HP_ZX1_IBASE) & ~0x1;
hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir = phys_to_virt(INREG64(hp->registers, HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
hp->gatt = 0;
hp->gatt_entries = 0;
printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
"GART disabled\n");
return -ENODEV;
}
return 0;
}
static int __init hp_zx1_ioc_owner(u8 ioc_rev)
{
struct _hp_private *hp = &hp_private;
printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
/*
* Select an IOV page size no larger than system page size.
*/
if (PAGE_SIZE >= KB(64)) {
hp->io_tlb_shift = 16;
hp->io_tlb_ps = 3;
} else if (PAGE_SIZE >= KB(16)) {
hp->io_tlb_shift = 14;
hp->io_tlb_ps = 2;
} else if (PAGE_SIZE >= KB(8)) {
hp->io_tlb_shift = 13;
hp->io_tlb_ps = 1;
} else {
hp->io_tlb_shift = 12;
hp->io_tlb_ps = 0;
}
hp->io_page_size = 1 << hp->io_tlb_shift;
hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
hp->iova_base = HP_ZX1_IOVA_BASE;
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
return 0;
}
static int __init hp_zx1_ioc_init(void)
{
struct _hp_private *hp = &hp_private;
struct pci_dev *ioc;
int i;
u8 ioc_rev;
ioc = pci_find_device(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_ZX1_IOC, NULL);
if (!ioc) {
printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no IOC\n");
return -ENODEV;
}
hp->ioc = ioc;
pci_read_config_byte(ioc, PCI_REVISION_ID, &ioc_rev);
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
if (pci_resource_flags(ioc, i) == IORESOURCE_MEM) {
hp->registers = (u8 *) ioremap(pci_resource_start(ioc,
i),
pci_resource_len(ioc, i));
break;
}
}
if (!hp->registers) {
printk(KERN_ERR PFX "Detected HP ZX1 AGP bridge but no CSRs\n");
return -ENODEV;
}
/*
* If the IOTLB is currently disabled, we can take it over.
* Otherwise, we have to share with sba_iommu.
*/
hp->io_pdir_owner = (INREG64(hp->registers, HP_ZX1_IBASE) & 0x1) == 0;
if (hp->io_pdir_owner)
return hp_zx1_ioc_owner(ioc_rev);
return hp_zx1_ioc_shared();
}
static int hp_zx1_fetch_size(void)
{
int size;
size = hp_private.gart_size / MB(1);
hp_zx1_sizes[0].size = size;
agp_bridge.current_size = (void *) &hp_zx1_sizes[0];
return size;
}
static int hp_zx1_configure(void)
{
struct _hp_private *hp = &hp_private;
agp_bridge.gart_bus_addr = hp->gart_base;
agp_bridge.capndx = pci_find_capability(agp_bridge.dev, PCI_CAP_ID_AGP);
pci_read_config_dword(agp_bridge.dev,
agp_bridge.capndx + PCI_AGP_STATUS, &agp_bridge.mode);
if (hp->io_pdir_owner) {
OUTREG64(hp->registers, HP_ZX1_PDIR_BASE,
virt_to_phys(hp->io_pdir));
OUTREG64(hp->registers, HP_ZX1_TCNFG, hp->io_tlb_ps);
OUTREG64(hp->registers, HP_ZX1_IMASK, ~(HP_ZX1_IOVA_SIZE - 1));
OUTREG64(hp->registers, HP_ZX1_IBASE, hp->iova_base | 0x1);
OUTREG64(hp->registers, HP_ZX1_PCOM,
hp->iova_base | log2(HP_ZX1_IOVA_SIZE));
INREG64(hp->registers, HP_ZX1_PCOM);
}
return 0;
}
static void hp_zx1_cleanup(void)
{
struct _hp_private *hp = &hp_private;
if (hp->io_pdir_owner)
OUTREG64(hp->registers, HP_ZX1_IBASE, 0);
iounmap((void *) hp->registers);
}
static void hp_zx1_tlbflush(agp_memory * mem)
{
struct _hp_private *hp = &hp_private;
OUTREG64(hp->registers, HP_ZX1_PCOM,
hp->gart_base | log2(hp->gart_size));
INREG64(hp->registers, HP_ZX1_PCOM);
}
static int hp_zx1_create_gatt_table(void)
{
struct _hp_private *hp = &hp_private;
int i;
if (hp->io_pdir_owner) {
hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
get_order(hp->io_pdir_size));
if (!hp->io_pdir) {
printk(KERN_ERR PFX "Couldn't allocate contiguous "
"memory for I/O PDIR\n");
hp->gatt = 0;
hp->gatt_entries = 0;
return -ENOMEM;
}
memset(hp->io_pdir, 0, hp->io_pdir_size);
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
}
for (i = 0; i < hp->gatt_entries; i++) {
hp->gatt[i] = (unsigned long) agp_bridge.scratch_page;
}
return 0;
}
static int hp_zx1_free_gatt_table(void)
{
struct _hp_private *hp = &hp_private;
if (hp->io_pdir_owner)
free_pages((unsigned long) hp->io_pdir,
get_order(hp->io_pdir_size));
else
hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
return 0;
}
static int hp_zx1_insert_memory(agp_memory * mem, off_t pg_start, int type)
{
struct _hp_private *hp = &hp_private;
int i, k;
off_t j, io_pg_start;
int io_pg_count;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
io_pg_start = hp->io_pages_per_kpage * pg_start;
io_pg_count = hp->io_pages_per_kpage * mem->page_count;
if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
return -EINVAL;
}
j = io_pg_start;
while (j < (io_pg_start + io_pg_count)) {
if (hp->gatt[j]) {
return -EBUSY;
}
j++;
}
if (mem->is_flushed == FALSE) {
CACHE_FLUSH();
mem->is_flushed = TRUE;
}
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
unsigned long paddr;
paddr = mem->memory[i];
for (k = 0;
k < hp->io_pages_per_kpage;
k++, j++, paddr += hp->io_page_size) {
hp->gatt[j] = agp_bridge.mask_memory(paddr, type);
}
}
agp_bridge.tlb_flush(mem);
return 0;
}
static int hp_zx1_remove_memory(agp_memory * mem, off_t pg_start, int type)
{
struct _hp_private *hp = &hp_private;
int i, io_pg_start, io_pg_count;
if (type != 0 || mem->type != 0) {
return -EINVAL;
}
io_pg_start = hp->io_pages_per_kpage * pg_start;
io_pg_count = hp->io_pages_per_kpage * mem->page_count;
for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
hp->gatt[i] = agp_bridge.scratch_page;
}
agp_bridge.tlb_flush(mem);
return 0;
}
static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
{
return HP_ZX1_PDIR_VALID_BIT | addr;
}
static unsigned long hp_zx1_unmask_memory(unsigned long addr)
{
return addr & ~(HP_ZX1_PDIR_VALID_BIT);
}
static int __init hp_zx1_setup (struct pci_dev *pdev)
{
agp_bridge.masks = hp_zx1_masks;
agp_bridge.num_of_masks = 1;
agp_bridge.dev_private_data = NULL;
agp_bridge.size_type = FIXED_APER_SIZE;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = hp_zx1_configure;
agp_bridge.fetch_size = hp_zx1_fetch_size;
agp_bridge.cleanup = hp_zx1_cleanup;
agp_bridge.tlb_flush = hp_zx1_tlbflush;
agp_bridge.mask_memory = hp_zx1_mask_memory;
agp_bridge.unmask_memory = hp_zx1_unmask_memory;
agp_bridge.agp_enable = agp_generic_agp_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
agp_bridge.free_gatt_table = hp_zx1_free_gatt_table;
agp_bridge.insert_memory = hp_zx1_insert_memory;
agp_bridge.remove_memory = hp_zx1_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = agp_generic_alloc_page;
agp_bridge.agp_destroy_page = agp_generic_destroy_page;
agp_bridge.cant_use_aperture = 1;
return hp_zx1_ioc_init();
(void) pdev; /* unused */
}
#endif /* CONFIG_AGP_HP_ZX1 */
pci_for_each_dev(device) { pci_for_each_dev(device) {
cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP); cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
...@@ -4296,6 +3934,12 @@ static struct { ...@@ -4296,6 +3934,12 @@ static struct {
"Intel", "Intel",
"i830M", "i830M",
intel_830mp_setup }, intel_830mp_setup },
{ PCI_DEVICE_ID_INTEL_845_G_0,
PCI_VENDOR_ID_INTEL,
INTEL_I845_G,
"Intel",
"i845G",
intel_830mp_setup },
{ PCI_DEVICE_ID_INTEL_840_0, { PCI_DEVICE_ID_INTEL_840_0,
PCI_VENDOR_ID_INTEL, PCI_VENDOR_ID_INTEL,
INTEL_I840, INTEL_I840,
...@@ -4449,15 +4093,6 @@ intel_850_setup }, ...@@ -4449,15 +4093,6 @@ intel_850_setup },
via_generic_setup }, via_generic_setup },
#endif /* CONFIG_AGP_VIA */ #endif /* CONFIG_AGP_VIA */
#ifdef CONFIG_AGP_HP_ZX1
{ PCI_DEVICE_ID_HP_ZX1_LBA,
PCI_VENDOR_ID_HP,
HP_ZX1,
"HP",
"ZX1",
hp_zx1_setup },
#endif
#ifdef CONFIG_AGP_HP_ZX1 #ifdef CONFIG_AGP_HP_ZX1
{ PCI_DEVICE_ID_HP_ZX1_LBA, { PCI_DEVICE_ID_HP_ZX1_LBA,
PCI_VENDOR_ID_HP, PCI_VENDOR_ID_HP,
...@@ -4620,16 +4255,38 @@ static int __init agp_find_supported_device(void) ...@@ -4620,16 +4255,38 @@ static int __init agp_find_supported_device(void)
NULL); NULL);
if (i810_dev == NULL) { if (i810_dev == NULL) {
printk(KERN_ERR PFX "agpgart: Detected an " printk(KERN_ERR PFX "agpgart: Detected an "
"Intel i815, but could not find the" "Intel i815, but could not find the"
" secondary device. Assuming a " " secondary device. Assuming a "
"non-integrated video card.\n"); "non-integrated video card.\n");
break; break;
} }
printk(KERN_INFO PFX "agpgart: Detected an Intel i815 " printk(KERN_INFO PFX "agpgart: Detected an Intel i815 "
"Chipset.\n"); "Chipset.\n");
agp_bridge.type = INTEL_I810; agp_bridge.type = INTEL_I810;
return intel_i810_setup(i810_dev); return intel_i810_setup(i810_dev);
case PCI_DEVICE_ID_INTEL_845_G_0:
i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_845_G_1, NULL);
if(i810_dev && PCI_FUNC(i810_dev->devfn) != 0) {
i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_845_G_1, i810_dev);
}
if (i810_dev == NULL) {
/*
* We probably have a I845MP chipset
* with an external graphics
* card. It will be initialized later
*/
agp_bridge.type = INTEL_I845_G;
break;
}
printk(KERN_INFO PFX "Detected an Intel "
"845G Chipset.\n");
agp_bridge.type = INTEL_I810;
return intel_i830_setup(i810_dev);
case PCI_DEVICE_ID_INTEL_830_M_0: case PCI_DEVICE_ID_INTEL_830_M_0:
i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL, i810_dev = pci_find_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_830_M_1, PCI_DEVICE_ID_INTEL_830_M_1,
...@@ -4641,8 +4298,8 @@ static int __init agp_find_supported_device(void) ...@@ -4641,8 +4298,8 @@ static int __init agp_find_supported_device(void)
} }
if (i810_dev == NULL) { if (i810_dev == NULL) {
/* Intel 830MP with external graphic card */ /* Intel 830MP with external graphic card */
/* It will be initialized later */ /* It will be initialized later */
agp_bridge.type = INTEL_I830_M; agp_bridge.type = INTEL_I830_M;
break; break;
} }
...@@ -4691,23 +4348,6 @@ static int __init agp_find_supported_device(void) ...@@ -4691,23 +4348,6 @@ static int __init agp_find_supported_device(void)
#endif /* CONFIG_AGP_SWORKS */ #endif /* CONFIG_AGP_SWORKS */
#ifdef CONFIG_AGP_HP_ZX1
if (dev->vendor == PCI_VENDOR_ID_HP) {
do {
/* ZX1 LBAs can be either PCI or AGP bridges */
if (pci_find_capability(dev, PCI_CAP_ID_AGP)) {
printk(KERN_INFO PFX "Detected HP ZX1 AGP "
"chipset at %s\n", dev->slot_name);
agp_bridge.type = HP_ZX1;
agp_bridge.dev = dev;
return hp_zx1_setup(dev);
}
dev = pci_find_class(PCI_CLASS_BRIDGE_HOST << 8, dev);
} while (dev);
return -ENODEV;
}
#endif /* CONFIG_AGP_HP_ZX1 */
#ifdef CONFIG_AGP_HP_ZX1 #ifdef CONFIG_AGP_HP_ZX1
if (dev->vendor == PCI_VENDOR_ID_HP) { if (dev->vendor == PCI_VENDOR_ID_HP) {
do { do {
......
...@@ -48,6 +48,7 @@ enum chipset_type { ...@@ -48,6 +48,7 @@ enum chipset_type {
INTEL_I815, INTEL_I815,
INTEL_I820, INTEL_I820,
INTEL_I830_M, INTEL_I830_M,
INTEL_I845_G,
INTEL_I840, INTEL_I840,
INTEL_I845, INTEL_I845,
INTEL_I850, INTEL_I850,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment