Commit 07eee78e authored by Keir Fraser's avatar Keir Fraser Committed by Dave Jones

[PATCH] AGP fix for Xen VMM

When Linux is running on the Xen virtual machine monitor, physical
addresses are virtualised and cannot be directly referenced by the AGP
GART.  This patch fixes the GART driver for Xen by adding a layer of
abstraction between physical addresses and 'GART addresses'.

Architecture-specific functions are also defined for allocating and freeing
the GATT.  Xen requires this to ensure that table really is contiguous from
the point of view of the GART.

These extra interface functions are defined as 'no-ops' for all existing
architectures that use the GART driver.
Signed-off-by: default avatarKeir Fraser <keir@xensource.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarDave Jones <davej@redhat.com>
parent e29b545c
......@@ -278,6 +278,8 @@ void agp3_generic_cleanup(void);
#define AGP_GENERIC_SIZES_ENTRIES 11
extern struct aper_size_info_16 agp3_generic_sizes[];
#define virt_to_gart(x) (phys_to_gart(virt_to_phys(x)))
#define gart_to_virt(x) (phys_to_virt(gart_to_phys(x)))
extern int agp_off;
extern int agp_try_unsupported_boot;
......
......@@ -150,7 +150,7 @@ static void *m1541_alloc_page(struct agp_bridge_data *bridge)
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN ));
virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN ));
return addr;
}
......@@ -174,7 +174,7 @@ static void m1541_destroy_page(void * addr)
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
virt_to_phys(addr)) | ALI_CACHE_FLUSH_EN));
virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN));
agp_generic_destroy_page(addr);
}
......
......@@ -43,7 +43,7 @@ static int amd_create_page_map(struct amd_page_map *page_map)
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
......@@ -154,7 +154,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
......@@ -167,7 +167,7 @@ static int amd_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for (i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_phys(amd_irongate_private.gatt_pages[i]->real) | 1,
writel(virt_to_gart(amd_irongate_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
......
......@@ -219,7 +219,7 @@ static struct aper_size_info_32 amd_8151_sizes[7] =
static int amd_8151_configure(void)
{
unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real);
unsigned long gatt_bus = virt_to_gart(agp_bridge->gatt_table_real);
/* Configure AGP regs in each x86-64 host bridge. */
for_each_nb() {
......@@ -591,7 +591,7 @@ static void __devexit agp_amd64_remove(struct pci_dev *pdev)
{
struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
release_mem_region(virt_to_phys(bridge->gatt_table_real),
release_mem_region(virt_to_gart(bridge->gatt_table_real),
amd64_aperture_sizes[bridge->aperture_size_idx].size);
agp_remove_bridge(bridge);
agp_put_bridge(bridge);
......
......@@ -61,7 +61,7 @@ static int ati_create_page_map(ati_page_map *page_map)
SetPageReserved(virt_to_page(page_map->real));
err = map_page_into_agp(virt_to_page(page_map->real));
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL || err) {
ClearPageReserved(virt_to_page(page_map->real));
......@@ -343,7 +343,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *) page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_bus(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Write out the size register */
current_size = A_SIZE_LVL2(agp_bridge->current_size);
......@@ -373,7 +373,7 @@ static int ati_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++, addr += 0x00400000) {
writel(virt_to_bus(ati_generic_private.gatt_pages[i]->real) | 1,
writel(virt_to_gart(ati_generic_private.gatt_pages[i]->real) | 1,
page_dir.remapped+GET_PAGE_DIR_OFF(addr));
readl(page_dir.remapped+GET_PAGE_DIR_OFF(addr)); /* PCI Posting. */
}
......
......@@ -148,7 +148,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
return -ENOMEM;
}
bridge->scratch_page_real = virt_to_phys(addr);
bridge->scratch_page_real = virt_to_gart(addr);
bridge->scratch_page =
bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
}
......@@ -189,7 +189,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
err_out:
if (bridge->driver->needs_scratch_page)
bridge->driver->agp_destroy_page(
phys_to_virt(bridge->scratch_page_real));
gart_to_virt(bridge->scratch_page_real));
if (got_gatt)
bridge->driver->free_gatt_table(bridge);
if (got_keylist) {
......@@ -214,7 +214,7 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
if (bridge->driver->agp_destroy_page &&
bridge->driver->needs_scratch_page)
bridge->driver->agp_destroy_page(
phys_to_virt(bridge->scratch_page_real));
gart_to_virt(bridge->scratch_page_real));
}
/* When we remove the global variable agp_bridge from all drivers
......
......@@ -219,7 +219,7 @@ static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
efficeon_private.l1_table[index] = page;
value = __pa(page) | pati | present | index;
value = virt_to_gart(page) | pati | present | index;
pci_write_config_dword(agp_bridge->dev,
EFFICEON_ATTPAGE, value);
......
......@@ -153,7 +153,7 @@ void agp_free_memory(struct agp_memory *curr)
}
if (curr->page_count != 0) {
for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i]));
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
}
}
agp_free_key(curr->key);
......@@ -209,7 +209,7 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
agp_free_memory(new);
return NULL;
}
new->memory[i] = virt_to_phys(addr);
new->memory[i] = virt_to_gart(addr);
new->page_count++;
}
new->bridge = bridge;
......@@ -806,8 +806,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
break;
}
table = (char *) __get_free_pages(GFP_KERNEL,
page_order);
table = alloc_gatt_pages(page_order);
if (table == NULL) {
i++;
......@@ -838,7 +837,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
size = ((struct aper_size_info_fixed *) temp)->size;
page_order = ((struct aper_size_info_fixed *) temp)->page_order;
num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
table = (char *) __get_free_pages(GFP_KERNEL, page_order);
table = alloc_gatt_pages(page_order);
}
if (table == NULL)
......@@ -853,7 +852,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
agp_gatt_table = (void *)table;
bridge->driver->cache_flush();
bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
bridge->gatt_table = ioremap_nocache(virt_to_gart(table),
(PAGE_SIZE * (1 << page_order)));
bridge->driver->cache_flush();
......@@ -861,11 +860,11 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page);
free_pages((unsigned long) table, page_order);
free_gatt_pages(table, page_order);
return -ENOMEM;
}
bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real);
bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real);
/* AK: bogus, should encode addresses > 4GB */
for (i = 0; i < num_entries; i++) {
......@@ -919,7 +918,7 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge)
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
ClearPageReserved(page);
free_pages((unsigned long) bridge->gatt_table_real, page_order);
free_gatt_pages(bridge->gatt_table_real, page_order);
agp_gatt_table = NULL;
bridge->gatt_table = NULL;
......
......@@ -110,7 +110,7 @@ static int __init hp_zx1_ioc_shared(void)
hp->gart_size = HP_ZX1_GART_SIZE;
hp->gatt_entries = hp->gart_size / hp->io_page_size;
hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->io_pdir = gart_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
......@@ -248,7 +248,7 @@ hp_zx1_configure (void)
agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
if (hp->io_pdir_owner) {
writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(virt_to_gart(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
readl(hp->ioc_regs+HP_ZX1_TCNFG);
......
......@@ -372,7 +372,7 @@ static int i460_alloc_large_page (struct lp_desc *lp)
}
memset(lp->alloced_map, 0, map_size);
lp->paddr = virt_to_phys(lpage);
lp->paddr = virt_to_gart(lpage);
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0;
......@@ -383,7 +383,7 @@ static void i460_free_large_page (struct lp_desc *lp)
kfree(lp->alloced_map);
lp->alloced_map = NULL;
free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
}
......
......@@ -286,7 +286,7 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
if (new == NULL)
return NULL;
new->memory[0] = virt_to_phys(addr);
new->memory[0] = virt_to_gart(addr);
if (pg_count == 4) {
/* kludge to get 4 physical pages for ARGB cursor */
new->memory[1] = new->memory[0] + PAGE_SIZE;
......@@ -329,10 +329,10 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
agp_free_key(curr->key);
if(curr->type == AGP_PHYS_MEMORY) {
if (curr->page_count == 4)
i8xx_destroy_pages(phys_to_virt(curr->memory[0]));
i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
else
agp_bridge->driver->agp_destroy_page(
phys_to_virt(curr->memory[0]));
gart_to_virt(curr->memory[0]));
vfree(curr->memory);
}
kfree(curr);
......
......@@ -51,7 +51,7 @@ static int serverworks_create_page_map(struct serverworks_page_map *page_map)
}
SetPageReserved(virt_to_page(page_map->real));
global_cache_flush();
page_map->remapped = ioremap_nocache(virt_to_phys(page_map->real),
page_map->remapped = ioremap_nocache(virt_to_gart(page_map->real),
PAGE_SIZE);
if (page_map->remapped == NULL) {
ClearPageReserved(virt_to_page(page_map->real));
......@@ -162,7 +162,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
/* Create a fake scratch directory */
for(i = 0; i < 1024; i++) {
writel(agp_bridge->scratch_page, serverworks_private.scratch_dir.remapped+i);
writel(virt_to_phys(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
writel(virt_to_gart(serverworks_private.scratch_dir.real) | 1, page_dir.remapped+i);
}
retval = serverworks_create_gatt_pages(value->num_entries / 1024);
......@@ -174,7 +174,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
agp_bridge->gatt_table_real = (u32 *)page_dir.real;
agp_bridge->gatt_table = (u32 __iomem *)page_dir.remapped;
agp_bridge->gatt_bus_addr = virt_to_phys(page_dir.real);
agp_bridge->gatt_bus_addr = virt_to_gart(page_dir.real);
/* Get the address for the gart region.
* This is a bus address even on the alpha, b/c its
......@@ -187,7 +187,7 @@ static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
/* Calculate the agp offset */
for(i = 0; i < value->num_entries / 1024; i++)
writel(virt_to_phys(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
writel(virt_to_gart(serverworks_private.gatt_pages[i]->real)|1, page_dir.remapped+i);
return 0;
}
......
......@@ -407,7 +407,7 @@ static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
bridge->gatt_table_real = (u32 *) table;
bridge->gatt_table = (u32 *)table;
bridge->gatt_bus_addr = virt_to_phys(table);
bridge->gatt_bus_addr = virt_to_gart(table);
for (i = 0; i < num_entries; i++)
bridge->gatt_table[i] = 0;
......
......@@ -10,4 +10,14 @@
#define flush_agp_mappings()
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif
......@@ -21,4 +21,14 @@ int unmap_page_from_agp(struct page *page);
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif
......@@ -18,4 +18,14 @@
#define flush_agp_mappings() /* nothing */
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif /* _ASM_IA64_AGP_H */
......@@ -10,4 +10,14 @@
#define flush_agp_mappings()
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif
......@@ -10,4 +10,14 @@
#define flush_agp_mappings()
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif
......@@ -8,4 +8,14 @@
#define flush_agp_mappings()
#define flush_agp_cache() mb()
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif
......@@ -19,4 +19,14 @@ int unmap_page_from_agp(struct page *page);
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
#define gart_to_phys(x) (x)
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
#define alloc_gatt_pages(order) \
((char *)__get_free_pages(GFP_KERNEL, (order)))
#define free_gatt_pages(table, order) \
free_pages((unsigned long)(table), (order))
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment