Commit fe2839b3 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linux-dj.bkbits.net/agpgart

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents e537d26b c0231af3
......@@ -802,7 +802,6 @@ CONFIG_AGP=y
# CONFIG_AGP_AMD is not set
# CONFIG_AGP_AMD64 is not set
CONFIG_AGP_INTEL=y
CONFIG_AGP_INTEL_MCH=m
# CONFIG_AGP_NVIDIA is not set
# CONFIG_AGP_SIS is not set
# CONFIG_AGP_SWORKS is not set
......
......@@ -672,7 +672,6 @@ CONFIG_RTC=y
#
CONFIG_AGP=y
CONFIG_AGP_AMD64=y
# CONFIG_AGP_INTEL_MCH is not set
# CONFIG_DRM is not set
# CONFIG_MWAVE is not set
CONFIG_RAW_DRIVER=y
......
......@@ -789,7 +789,7 @@ static int __init pci_iommu_init(void)
/* Add other K8 AGP bridge drivers here */
no_agp = no_agp ||
(agp_amd64_init() < 0) ||
(agp_copy_info(&info) < 0);
(agp_copy_info(agp_bridge, &info) < 0);
#endif
if (swiotlb) {
......
......@@ -89,16 +89,6 @@ config AGP_INTEL
use GLX or DRI, or if you have any Intel integrated graphics
chipsets. If unsure, say Y.
config AGP_INTEL_MCH
tristate "Intel i865 chipset support"
depends on AGP && X86
help
This option gives you AGP support for the GLX component of XFree86 4.x
on Intel chipsets that support Intel EM64T processors.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say Y.
config AGP_NVIDIA
tristate "NVIDIA nForce/nForce2 chipset support"
depends on AGP && X86 && !X86_64
......
......@@ -9,7 +9,6 @@ obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL_MCH) += intel-mch-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
obj-$(CONFIG_AGP_SIS) += sis-agp.o
......
/*
* AGPGART
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2004 Dave Jones
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
......@@ -100,18 +101,19 @@ struct agp_bridge_driver {
struct gatt_mask *masks;
int (*fetch_size)(void);
int (*configure)(void);
void (*agp_enable)(u32);
void (*agp_enable)(struct agp_bridge_data *, u32);
void (*cleanup)(void);
void (*tlb_flush)(struct agp_memory *);
unsigned long (*mask_memory)(unsigned long, int);
unsigned long (*mask_memory)(struct agp_bridge_data *,
unsigned long, int);
void (*cache_flush)(void);
int (*create_gatt_table)(void);
int (*free_gatt_table)(void);
int (*create_gatt_table)(struct agp_bridge_data *);
int (*free_gatt_table)(struct agp_bridge_data *);
int (*insert_memory)(struct agp_memory *, off_t, int);
int (*remove_memory)(struct agp_memory *, off_t, int);
struct agp_memory *(*alloc_by_type) (size_t, int);
void (*free_by_type)(struct agp_memory *);
void *(*agp_alloc_page)(void);
void *(*agp_alloc_page)(struct agp_bridge_data *);
void (*agp_destroy_page)(void *);
};
......@@ -137,8 +139,10 @@ struct agp_bridge_data {
int max_memory_agp; /* in number of pages */
int aperture_size_idx;
int capndx;
int flags;
char major_version;
char minor_version;
struct list_head list;
};
#define KB(x) ((x) * 1024)
......@@ -243,24 +247,26 @@ int agp_frontend_initialize(void);
void agp_frontend_cleanup(void);
/* Generic routines. */
void agp_generic_enable(u32 mode);
int agp_generic_create_gatt_table(void);
int agp_generic_free_gatt_table(void);
void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
int agp_generic_create_gatt_table(struct agp_bridge_data *bridge);
int agp_generic_free_gatt_table(struct agp_bridge_data *bridge);
struct agp_memory *agp_create_memory(int scratch_pages);
int agp_generic_insert_memory(struct agp_memory *mem, off_t pg_start, int type);
int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
void agp_generic_free_by_type(struct agp_memory *curr);
void *agp_generic_alloc_page(void);
void *agp_generic_alloc_page(struct agp_bridge_data *bridge);
void agp_generic_destroy_page(void *addr);
void agp_free_key(int key);
int agp_num_entries(void);
u32 agp_collect_device_status(u32 mode, u32 command);
u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 mode, u32 command);
void agp_device_command(u32 command, int agp_v3);
int agp_3_5_enable(struct agp_bridge_data *bridge);
void global_cache_flush(void);
void get_agp_version(struct agp_bridge_data *bridge);
unsigned long agp_generic_mask_memory(unsigned long addr, int type);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
unsigned long addr, int type);
struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
/* generic routines for agp>=3 */
int agp3_generic_fetch_size(void);
......@@ -315,4 +321,11 @@ extern int agp_try_unsupported_boot;
#define AGPCTRL_APERENB (1<<8)
#define AGPCTRL_GTLBEN (1<<7)
#define AGP2_RESERVED_MASK 0x00fffcc8
#define AGP3_RESERVED_MASK 0x00ff00cc
#define AGP_ERRATA_FASTWRITES 1<<0
#define AGP_ERRATA_SBA 1<<1
#define AGP_ERRATA_1X 1<<2
#endif /* _AGP_BACKEND_PRIV_H */
......@@ -139,9 +139,9 @@ static void m1541_cache_flush(void)
}
}
static void *m1541_alloc_page(void)
static void *m1541_alloc_page(struct agp_bridge_data *bridge)
{
void *addr = agp_generic_alloc_page();
void *addr = agp_generic_alloc_page(agp_bridge);
u32 temp;
if (!addr)
......@@ -398,7 +398,7 @@ static int __init agp_ali_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_ali_pci_driver);
return pci_register_driver(&agp_ali_pci_driver);
}
static void __exit agp_ali_cleanup(void)
......
......@@ -76,11 +76,12 @@ static void alpha_core_agp_tlbflush(struct agp_memory *mem)
alpha_mv.mv_pci_tbi(agp->hose, 0, -1);
}
static void alpha_core_agp_enable(u32 mode)
static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
alpha_agp_info *agp = bridge->dev_private_data;
agp->mode.lw = agp_collect_device_status(mode, agp->capability.lw);
agp->mode.lw = agp_collect_device_status(bridge, mode,
agp->capability.lw);
agp->mode.bits.enable = 1;
agp->ops->configure(agp);
......
......@@ -132,7 +132,7 @@ static int amd_create_gatt_pages(int nr_tables)
#define GET_GATT(addr) (amd_irongate_private.gatt_pages[\
GET_PAGE_DIR_IDX(addr)]->remapped)
static int amd_create_gatt_table(void)
static int amd_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct amd_page_map page_dir;
......@@ -175,7 +175,7 @@ static int amd_create_gatt_table(void)
return 0;
}
static int amd_free_gatt_table(void)
static int amd_free_gatt_table(struct agp_bridge_data *bridge)
{
struct amd_page_map page_dir;
......@@ -314,7 +314,8 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_generic_mask_memory(mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
writel(agp_generic_mask_memory(agp_bridge,
mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
amd_irongate_tlbflush(mem);
......@@ -421,6 +422,53 @@ static int __devinit agp_amdk7_probe(struct pci_dev *pdev,
bridge->dev = pdev;
bridge->capndx = cap_ptr;
/* 751 Errata (22564_B-1.PDF)
erratum 20: strobe glitch with Nvidia NV10 GeForce cards.
system controller may experience noise due to strong drive strengths
*/
if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_7006) {
u8 cap_ptr=0;
struct pci_dev *gfxcard=NULL;
while (!cap_ptr) {
gfxcard = pci_get_class(PCI_CLASS_DISPLAY_VGA<<8, gfxcard);
if (!gfxcard) {
printk (KERN_INFO PFX "Couldn't find an AGP VGA controller.\n");
return -ENODEV;
}
cap_ptr = pci_find_capability(gfxcard, PCI_CAP_ID_AGP);
if (!cap_ptr) {
pci_dev_put(gfxcard);
continue;
}
}
/* With so many variants of NVidia cards, it's simpler just
to blacklist them all, and then whitelist them as needed
(if necessary at all). */
if (gfxcard->vendor == PCI_VENDOR_ID_NVIDIA) {
agp_bridge->flags |= AGP_ERRATA_1X;
printk (KERN_INFO PFX "AMD 751 chipset with NVidia GeForce detected. Forcing to 1X due to errata.\n");
}
pci_dev_put(gfxcard);
}
/* 761 Errata (23613_F.pdf)
* Revisions B0/B1 were a disaster.
* erratum 44: SYSCLK/AGPCLK skew causes 2X failures -- Force mode to 1X
* erratum 45: Timing problem prevents fast writes -- Disable fast write.
* erratum 46: Setup violation on AGP SBA pins - Disable side band addressing.
* With this lot disabled, we should prevent lockups. */
if (agp_bridge->dev->device == PCI_DEVICE_ID_AMD_FE_GATE_700E) {
u8 revision=0;
pci_read_config_byte(pdev, PCI_REVISION_ID, &revision);
if (revision == 0x10 || revision == 0x11) {
agp_bridge->flags = AGP_ERRATA_FASTWRITES;
agp_bridge->flags |= AGP_ERRATA_SBA;
agp_bridge->flags |= AGP_ERRATA_1X;
printk (KERN_INFO PFX "AMD 761 chipset with errata detected - disabling AGP fast writes & SBA and forcing to 1X.\n");
}
}
/* Fill in the mode register */
pci_read_config_dword(pdev,
bridge->capndx+PCI_AGP_STATUS,
......@@ -480,7 +528,7 @@ static int __init agp_amdk7_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_amdk7_pci_driver);
return pci_register_driver(&agp_amdk7_pci_driver);
}
static void __exit agp_amdk7_cleanup(void)
......
......@@ -106,7 +106,8 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
tmp = agp_bridge->driver->mask_memory(mem->memory[i], mem->type);
tmp = agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type);
BUG_ON(tmp & 0xffffff0000000ffcULL);
pte = (tmp & 0x000000ff00000000ULL) >> 28;
......@@ -709,7 +710,7 @@ int __init agp_amd64_init(void)
if (agp_off)
return -EINVAL;
if (pci_module_init(&agp_amd64_pci_driver) > 0) {
if (pci_register_driver(&agp_amd64_pci_driver) > 0) {
struct pci_dev *dev;
if (!agp_try_unsupported && !agp_try_unsupported_boot) {
printk(KERN_INFO PFX "No supported AGP bridge found.\n");
......
......@@ -291,7 +291,8 @@ static int ati_insert_memory(struct agp_memory * mem,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
writel(agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
agp_bridge->driver->tlb_flush(mem);
......@@ -319,7 +320,7 @@ static int ati_remove_memory(struct agp_memory * mem, off_t pg_start,
return 0;
}
static int ati_create_gatt_table(void)
static int ati_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
ati_page_map page_dir;
......@@ -380,7 +381,7 @@ static int ati_create_gatt_table(void)
return 0;
}
static int ati_free_gatt_table(void)
static int ati_free_gatt_table(struct agp_bridge_data *bridge)
{
ati_page_map page_dir;
......@@ -531,7 +532,7 @@ static int __init agp_ati_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_ati_pci_driver);
return pci_register_driver(&agp_ati_pci_driver);
}
static void __exit agp_ati_cleanup(void)
......
/*
* AGPGART driver backend routines.
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2003 Dave Jones.
* Copyright (C) 1999 Jeff Hartmann.
* Copyright (C) 1999 Precision Insight, Inc.
......@@ -18,12 +19,12 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* TODO:
* TODO:
* - Allocate more than order 0 pages to avoid too much linear map splitting.
*/
#include <linux/module.h>
......@@ -42,34 +43,38 @@
* fix some real stupidity. It's only by chance we can bump
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
#define AGPGART_VERSION_MINOR 100
#define AGPGART_VERSION_MINOR 101
static struct agp_version agp_current_version =
{
.major = AGPGART_VERSION_MAJOR,
.minor = AGPGART_VERSION_MINOR,
};
static int agp_count=0;
struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) =
&agp_generic_find_bridge;
struct agp_bridge_data agp_bridge_dummy = { .type = NOT_SUPPORTED };
struct agp_bridge_data *agp_bridge = &agp_bridge_dummy;
struct agp_bridge_data *agp_bridge;
LIST_HEAD(agp_bridges);
EXPORT_SYMBOL(agp_bridge);
EXPORT_SYMBOL(agp_bridges);
/**
* agp_backend_acquire - attempt to acquire the agp backend.
* agp_backend_acquire - attempt to acquire an agp backend.
*
* returns -EBUSY if agp is in use,
* returns 0 if the caller owns the agp backend
*/
int agp_backend_acquire(void)
struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev)
{
if (agp_bridge->type == NOT_SUPPORTED)
return -EINVAL;
if (atomic_read(&agp_bridge->agp_in_use))
return -EBUSY;
atomic_inc(&agp_bridge->agp_in_use);
return 0;
struct agp_bridge_data *bridge;
bridge = agp_find_bridge(pdev);
if (!bridge)
return NULL;
if (atomic_read(&bridge->agp_in_use))
return NULL;
atomic_inc(&bridge->agp_in_use);
return bridge;
}
EXPORT_SYMBOL(agp_backend_acquire);
......@@ -82,10 +87,11 @@ EXPORT_SYMBOL(agp_backend_acquire);
*
* (Ensure that all memory it bound is unbound.)
*/
void agp_backend_release(void)
void agp_backend_release(struct agp_bridge_data *bridge)
{
if (agp_bridge->type != NOT_SUPPORTED)
atomic_dec(&agp_bridge->agp_in_use);
if (bridge)
atomic_dec(&bridge->agp_in_use);
}
EXPORT_SYMBOL(agp_backend_release);
......@@ -121,7 +127,6 @@ static int agp_find_max(void)
(maxes_table[index].agp - maxes_table[index - 1].agp)) /
(maxes_table[index].mem - maxes_table[index - 1].mem);
printk(KERN_INFO PFX "Maximum main memory to use for agp memory: %ldM\n", result);
result = result << (20 - PAGE_SHIFT);
return result;
}
......@@ -135,7 +140,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
bridge->version = &agp_current_version;
if (bridge->driver->needs_scratch_page) {
void *addr = bridge->driver->agp_alloc_page();
void *addr = bridge->driver->agp_alloc_page(bridge);
if (!addr) {
printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
......@@ -144,7 +149,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
bridge->scratch_page_real = virt_to_phys(addr);
bridge->scratch_page =
bridge->driver->mask_memory(bridge->scratch_page_real, 0);
bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
}
size_value = bridge->driver->fetch_size();
......@@ -153,14 +158,14 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
rc = -EINVAL;
goto err_out;
}
if (bridge->driver->create_gatt_table()) {
if (bridge->driver->create_gatt_table(bridge)) {
printk(KERN_ERR PFX
"unable to get memory for graphics translation table.\n");
rc = -ENOMEM;
goto err_out;
}
got_gatt = 1;
bridge->key_list = vmalloc(PAGE_SIZE * 4);
if (bridge->key_list == NULL) {
printk(KERN_ERR PFX "error allocating memory for key lists.\n");
......@@ -168,7 +173,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
goto err_out;
}
got_keylist = 1;
/* FIXME vmalloc'd memory not guaranteed contiguous */
memset(bridge->key_list, 0, PAGE_SIZE * 4);
......@@ -178,9 +183,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
goto err_out;
}
printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
size_value, bridge->gart_bus_addr);
return 0;
err_out:
......@@ -188,7 +190,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
bridge->driver->agp_destroy_page(
phys_to_virt(bridge->scratch_page_real));
if (got_gatt)
bridge->driver->free_gatt_table();
bridge->driver->free_gatt_table(bridge);
if (got_keylist) {
vfree(bridge->key_list);
bridge->key_list = NULL;
......@@ -202,7 +204,7 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
if (bridge->driver->cleanup)
bridge->driver->cleanup();
if (bridge->driver->free_gatt_table)
bridge->driver->free_gatt_table();
bridge->driver->free_gatt_table(bridge);
if (bridge->key_list) {
vfree(bridge->key_list);
bridge->key_list = NULL;
......@@ -214,16 +216,35 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
phys_to_virt(bridge->scratch_page_real));
}
/* XXX Kludge alert: agpgart isn't ready for multiple bridges yet */
/* When we remove the global variable agp_bridge from all drivers
* then agp_alloc_bridge and agp_generic_find_bridge need to be updated
*/
struct agp_bridge_data *agp_alloc_bridge(void)
{
return agp_bridge;
struct agp_bridge_data *bridge = kmalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
return NULL;
memset(bridge, 0, sizeof(*bridge));
atomic_set(&bridge->agp_in_use, 0);
atomic_set(&bridge->current_memory_agp, 0);
if (list_empty(&agp_bridges))
agp_bridge = bridge;
return bridge;
}
EXPORT_SYMBOL(agp_alloc_bridge);
void agp_put_bridge(struct agp_bridge_data *bridge)
{
kfree(bridge);
if (list_empty(&agp_bridges))
agp_bridge = NULL;
}
EXPORT_SYMBOL(agp_put_bridge);
......@@ -240,40 +261,38 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
return -EINVAL;
}
if (agp_count) {
printk (KERN_INFO PFX
"Only one agpgart device currently supported.\n");
return -ENODEV;
}
/* Grab reference on the chipset driver. */
if (!try_module_get(bridge->driver->owner)) {
printk (KERN_INFO PFX "Couldn't lock chipset driver.\n");
return -EINVAL;
}
bridge->type = SUPPORTED;
error = agp_backend_initialize(agp_bridge);
error = agp_backend_initialize(bridge);
if (error) {
printk (KERN_INFO PFX "agp_backend_initialize() failed.\n");
goto err_out;
}
error = agp_frontend_initialize();
if (error) {
printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n");
goto frontend_err;
if (list_empty(&agp_bridges)) {
error = agp_frontend_initialize();
if (error) {
printk (KERN_INFO PFX "agp_frontend_initialize() failed.\n");
goto frontend_err;
}
printk(KERN_INFO PFX "AGP aperture is %dM @ 0x%lx\n",
bridge->driver->fetch_size(), bridge->gart_bus_addr);
}
agp_count++;
list_add(&bridge->list, &agp_bridges);
return 0;
frontend_err:
agp_backend_cleanup(agp_bridge);
agp_backend_cleanup(bridge);
err_out:
bridge->type = NOT_SUPPORTED;
module_put(bridge->driver->owner);
agp_put_bridge(bridge);
return error;
}
EXPORT_SYMBOL_GPL(agp_add_bridge);
......@@ -281,10 +300,10 @@ EXPORT_SYMBOL_GPL(agp_add_bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge)
{
bridge->type = NOT_SUPPORTED;
agp_frontend_cleanup();
agp_backend_cleanup(bridge);
agp_count--;
list_del(&bridge->list);
if (list_empty(&agp_bridges))
agp_frontend_cleanup();
module_put(bridge->driver->owner);
}
EXPORT_SYMBOL_GPL(agp_remove_bridge);
......
......@@ -148,7 +148,7 @@ static int efficeon_configure(void)
return 0;
}
static int efficeon_free_gatt_table(void)
static int efficeon_free_gatt_table(struct agp_bridge_data *bridge)
{
int index, freed = 0;
......@@ -183,7 +183,7 @@ static int efficeon_free_gatt_table(void)
#define GET_GATT(addr) (efficeon_private.gatt_pages[\
GET_PAGE_DIR_IDX(addr)]->remapped)
static int efficeon_create_gatt_table(void)
static int efficeon_create_gatt_table(struct agp_bridge_data *bridge)
{
int index;
const int pati = EFFICEON_PATI;
......@@ -209,7 +209,7 @@ static int efficeon_create_gatt_table(void)
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
efficeon_free_gatt_table();
efficeon_free_gatt_table(agp_bridge);
return -ENOMEM;
}
SetPageReserved(virt_to_page((char *)page));
......@@ -448,7 +448,7 @@ static int __init agp_efficeon_init(void)
return 0;
agp_initialised=1;
return pci_module_init(&agp_efficeon_pci_driver);
return pci_register_driver(&agp_efficeon_pci_driver);
}
static void __exit agp_efficeon_cleanup(void)
......
/*
* AGPGART driver frontend
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2003 Dave Jones
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
......@@ -18,9 +19,9 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
......@@ -152,8 +153,8 @@ static void agp_add_seg_to_client(struct agp_client *client,
/* Originally taken from linux/mm/mmap.c from the array
* protection_map.
* The original really should be exported to modules, or
* some routine which does the conversion for you
* The original really should be exported to modules, or
* some routine which does the conversion for you
*/
static const pgprot_t my_protect_map[16] =
......@@ -285,8 +286,8 @@ void agp_remove_file_private(struct agp_file_private * priv)
/* End - File flag list routines */
/*
* Wrappers for agp_free_memory & agp_allocate_memory
/*
* Wrappers for agp_free_memory & agp_allocate_memory
* These make sure that internal lists are kept updated.
*/
static void agp_free_memory_wrap(struct agp_memory *memory)
......@@ -299,7 +300,7 @@ static struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
{
struct agp_memory *memory;
memory = agp_allocate_memory(pg_count, type);
memory = agp_allocate_memory(agp_bridge, pg_count, type);
if (memory == NULL)
return NULL;
......@@ -420,7 +421,7 @@ static int agp_remove_controller(struct agp_controller *controller)
if (agp_fe.current_controller == controller) {
agp_fe.current_controller = NULL;
agp_fe.backend_acquired = FALSE;
agp_backend_release();
agp_backend_release(agp_bridge);
}
kfree(controller);
return 0;
......@@ -468,10 +469,10 @@ static void agp_controller_release_current(struct agp_controller *controller,
agp_fe.current_controller = NULL;
agp_fe.used_by_controller = FALSE;
agp_backend_release();
agp_backend_release(agp_bridge);
}
/*
/*
* Routines for managing client lists -
* These routines are for managing the list of auth'ed clients.
*/
......@@ -605,7 +606,7 @@ static int agp_mmap(struct file *file, struct vm_area_struct *vma)
if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
goto out_eperm;
agp_copy_info(&kerninfo);
agp_copy_info(agp_bridge, &kerninfo);
size = vma->vm_end - vma->vm_start;
current_size = kerninfo.aper_size;
current_size = current_size * 0x100000;
......@@ -757,7 +758,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
struct agp_info userinfo;
struct agp_kern_info kerninfo;
agp_copy_info(&kerninfo);
agp_copy_info(agp_bridge, &kerninfo);
userinfo.version.major = kerninfo.version.major;
userinfo.version.minor = kerninfo.version.minor;
......@@ -777,7 +778,6 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
static int agpioc_acquire_wrap(struct agp_file_private *priv)
{
int ret;
struct agp_controller *controller;
DBG("");
......@@ -788,11 +788,15 @@ static int agpioc_acquire_wrap(struct agp_file_private *priv)
if (agp_fe.current_controller != NULL)
return -EBUSY;
ret = agp_backend_acquire();
if (ret == 0)
agp_fe.backend_acquired = TRUE;
else
return ret;
if(!agp_bridge)
return -ENODEV;
if (atomic_read(&agp_bridge->agp_in_use))
return -EBUSY;
atomic_inc(&agp_bridge->agp_in_use);
agp_fe.backend_acquired = TRUE;
controller = agp_find_controller_by_pid(priv->my_pid);
......@@ -803,7 +807,7 @@ static int agpioc_acquire_wrap(struct agp_file_private *priv)
if (controller == NULL) {
agp_fe.backend_acquired = FALSE;
agp_backend_release();
agp_backend_release(agp_bridge);
return -ENOMEM;
}
agp_insert_controller(controller);
......@@ -830,7 +834,7 @@ static int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
return -EFAULT;
agp_enable(mode.agp_mode);
agp_enable(agp_bridge, mode.agp_mode);
return 0;
}
......@@ -993,24 +997,24 @@ static int agp_ioctl(struct inode *inode, struct file *file,
if ((agp_fe.current_controller == NULL) &&
(cmd != AGPIOC_ACQUIRE)) {
ret_val = -EINVAL;
goto ioctl_out;
goto ioctl_out;
}
if ((agp_fe.backend_acquired != TRUE) &&
(cmd != AGPIOC_ACQUIRE)) {
ret_val = -EBUSY;
goto ioctl_out;
goto ioctl_out;
}
if (cmd != AGPIOC_ACQUIRE) {
if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
ret_val = -EPERM;
goto ioctl_out;
goto ioctl_out;
}
/* Use the original pid of the controller,
* in case it's threaded */
if (agp_fe.current_controller->pid != curr_priv->my_pid) {
ret_val = -EBUSY;
goto ioctl_out;
goto ioctl_out;
}
}
......@@ -1022,35 +1026,35 @@ static int agp_ioctl(struct inode *inode, struct file *file,
case AGPIOC_ACQUIRE:
ret_val = agpioc_acquire_wrap(curr_priv);
break;
case AGPIOC_RELEASE:
ret_val = agpioc_release_wrap(curr_priv);
break;
case AGPIOC_SETUP:
ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_RESERVE:
ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_PROTECT:
ret_val = agpioc_protect_wrap(curr_priv);
break;
case AGPIOC_ALLOCATE:
ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_DEALLOCATE:
ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
break;
case AGPIOC_BIND:
ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_UNBIND:
ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
......
This diff is collapsed.
......@@ -289,7 +289,7 @@ hp_zx1_tlbflush (struct agp_memory *mem)
}
static int
hp_zx1_create_gatt_table (void)
hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
{
struct _hp_private *hp = &hp_private;
int i;
......@@ -317,7 +317,7 @@ hp_zx1_create_gatt_table (void)
}
static int
hp_zx1_free_gatt_table (void)
hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
{
struct _hp_private *hp = &hp_private;
......@@ -367,7 +367,9 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
for (k = 0;
k < hp->io_pages_per_kpage;
k++, j++, paddr += hp->io_page_size) {
hp->gatt[j] = agp_bridge->driver->mask_memory(paddr, type);
hp->gatt[j] =
agp_bridge->driver->mask_memory(agp_bridge,
paddr, type);
}
}
......@@ -396,19 +398,20 @@ hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
}
static unsigned long
hp_zx1_mask_memory (unsigned long addr, int type)
hp_zx1_mask_memory (struct agp_bridge_data *bridge,
unsigned long addr, int type)
{
return HP_ZX1_PDIR_VALID_BIT | addr;
}
static void
hp_zx1_enable (u32 mode)
hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
{
struct _hp_private *hp = &hp_private;
u32 command;
command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
command = agp_collect_device_status(mode, command);
command = agp_collect_device_status(bridge, mode, command);
command |= 0x00000100;
writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
......
......@@ -233,7 +233,7 @@ static int i460_configure (void)
return 0;
}
static int i460_create_gatt_table (void)
static int i460_create_gatt_table (struct agp_bridge_data *bridge)
{
int page_order, num_entries, i;
void *temp;
......@@ -258,7 +258,7 @@ static int i460_create_gatt_table (void)
return 0;
}
static int i460_free_gatt_table (void)
static int i460_free_gatt_table (struct agp_bridge_data *bridge)
{
int num_entries, i;
void *temp;
......@@ -314,7 +314,8 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem,
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
paddr = mem->memory[i];
for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
WR_GATT(j, agp_bridge->driver->mask_memory(paddr, mem->type));
WR_GATT(j, agp_bridge->driver->mask_memory(agp_bridge,
paddr, mem->type));
}
WR_FLUSH_GATT(j - 1);
return 0;
......@@ -427,7 +428,8 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem,
if (i460_alloc_large_page(lp) < 0)
return -ENOMEM;
pg = lp - i460.lp_desc;
WR_GATT(pg, agp_bridge->driver->mask_memory(lp->paddr, 0));
WR_GATT(pg, agp_bridge->driver->mask_memory(agp_bridge,
lp->paddr, 0));
WR_FLUSH_GATT(pg);
}
......@@ -508,12 +510,12 @@ static int i460_remove_memory (struct agp_memory *mem,
* Let's just hope nobody counts on the allocated AGP memory being there before bind time
* (I don't think current drivers do)...
*/
static void *i460_alloc_page (void)
static void *i460_alloc_page (struct agp_bridge_data *bridge)
{
void *page;
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
page = agp_generic_alloc_page();
page = agp_generic_alloc_page(agp_bridge);
else
/* Returning NULL would cause problems */
/* AK: really dubious code. */
......@@ -529,10 +531,11 @@ static void i460_destroy_page (void *page)
#endif /* I460_LARGE_IO_PAGES */
static unsigned long i460_mask_memory (unsigned long addr, int type)
static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
unsigned long addr, int type)
{
/* Make sure the returned address is a valid GATT entry */
return agp_bridge->driver->masks[0].mask
return bridge->driver->masks[0].mask
| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12);
}
......@@ -624,7 +627,7 @@ static int __init agp_intel_i460_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_intel_i460_pci_driver);
return pci_register_driver(&agp_intel_i460_pci_driver);
}
static void __exit agp_intel_i460_cleanup(void)
......
......@@ -150,7 +150,7 @@ static void intel_i810_tlbflush(struct agp_memory *mem)
return;
}
static void intel_i810_agp_enable(u32 mode)
static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
return;
}
......@@ -229,8 +229,9 @@ static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
insert:
global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type),
intel_i810_private.registers+I810_PTE_BASE+(j*4));
writel(agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type),
intel_i810_private.registers+I810_PTE_BASE+(j*4));
readl(intel_i810_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
}
global_cache_flush();
......@@ -268,7 +269,7 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
return NULL;
switch (pg_count) {
case 1: addr = agp_bridge->driver->agp_alloc_page();
case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
break;
case 4:
/* kludge to get 4 physical pages for ARGB cursor */
......@@ -337,10 +338,11 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
kfree(curr);
}
static unsigned long intel_i810_mask_memory(unsigned long addr, int type)
static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
unsigned long addr, int type)
{
/* Type checking must be done elsewhere */
return addr | agp_bridge->driver->masks[type].mask;
return addr | bridge->driver->masks[type].mask;
}
static struct aper_size_info_fixed intel_i830_sizes[] =
......@@ -447,7 +449,7 @@ static void intel_i830_init_gtt_entries(void)
/* The intel i830 automatically initializes the agp aperture during POST.
* Use the memory already set aside for in the GTT.
*/
static int intel_i830_create_gatt_table(void)
static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
{
int page_order;
struct aper_size_info_fixed *size;
......@@ -482,7 +484,7 @@ static int intel_i830_create_gatt_table(void)
/* Return the gatt table to a sane state. Use the top of stolen
* memory for the GTT.
*/
static int intel_i830_free_gatt_table(void)
static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
{
return 0;
}
......@@ -582,8 +584,9 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
global_cache_flush(); /* FIXME: Necessary ?*/
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type),
intel_i830_private.registers+I810_PTE_BASE+(j*4));
writel(agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type),
intel_i830_private.registers+I810_PTE_BASE+(j*4));
readl(intel_i830_private.registers+I810_PTE_BASE+(j*4)); /* PCI Posting. */
}
......@@ -691,7 +694,8 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type), intel_i830_private.gtt+j);
writel(agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type), intel_i830_private.gtt+j);
readl(intel_i830_private.gtt+j); /* PCI Posting. */
}
......@@ -743,7 +747,7 @@ static int intel_i915_fetch_size(void)
/* The intel i915 automatically initializes the agp aperture during POST.
* Use the memory already set aside for in the GTT.
*/
static int intel_i915_create_gatt_table(void)
static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
{
int page_order;
struct aper_size_info_fixed *size;
......@@ -1758,6 +1762,8 @@ static int agp_intel_resume(struct pci_dev *pdev)
intel_i915_configure();
else if (bridge->driver == &intel_830_driver)
intel_i830_configure();
else if (bridge->driver == &intel_810_driver)
intel_i810_configure();
return 0;
}
......@@ -1810,7 +1816,9 @@ static struct pci_driver agp_intel_pci_driver = {
static int __init agp_intel_init(void)
{
return pci_module_init(&agp_intel_pci_driver);
if (agp_off)
return -EINVAL;
return pci_register_driver(&agp_intel_pci_driver);
}
static void __exit agp_intel_cleanup(void)
......
This diff is collapsed.
......@@ -215,7 +215,8 @@ static int nvidia_insert_memory(struct agp_memory *mem, off_t pg_start, int type
mem->is_flushed = TRUE;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type),
writel(agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type),
agp_bridge->gatt_table+nvidia_private.pg_offset+j);
readl(agp_bridge->gatt_table+nvidia_private.pg_offset+j); /* PCI Posting. */
}
......@@ -407,7 +408,7 @@ static int __init agp_nvidia_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_nvidia_pci_driver);
return pci_register_driver(&agp_nvidia_pci_driver);
}
static void __exit agp_nvidia_cleanup(void)
......
/*
* SiS AGPGART routines.
* SiS AGPGART routines.
*/
#include <linux/module.h>
......@@ -70,7 +70,7 @@ static void sis_cleanup(void)
(previous_size->size_value & ~(0x03)));
}
static void sis_delayed_enable(u32 mode)
static void sis_delayed_enable(struct agp_bridge_data *bridge, u32 mode)
{
struct pci_dev *device = NULL;
u32 command;
......@@ -82,7 +82,7 @@ static void sis_delayed_enable(u32 mode)
pci_name(agp_bridge->dev));
pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &command);
command = agp_collect_device_status(mode, command);
command = agp_collect_device_status(bridge, mode, command);
command |= AGPSTAT_AGP_ENABLE;
rate = (command & 0x7) << 2;
......@@ -99,9 +99,9 @@ static void sis_delayed_enable(u32 mode)
/*
* Weird: on some sis chipsets any rate change in the target
* command register triggers a 5ms screwup during which the master
* cannot be configured
* cannot be configured
*/
if (device->device == agp_bridge->dev->device) {
if (device->device == bridge->dev->device) {
printk(KERN_INFO PFX "SiS delay workaround: giving bridge time to recover.\n");
msleep(10);
}
......@@ -342,7 +342,7 @@ static int __init agp_sis_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_sis_pci_driver);
return pci_register_driver(&agp_sis_pci_driver);
}
static void __exit agp_sis_cleanup(void)
......
......@@ -141,7 +141,7 @@ static int serverworks_create_gatt_pages(int nr_tables)
#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
#endif
static int serverworks_create_gatt_table(void)
static int serverworks_create_gatt_table(struct agp_bridge_data *bridge)
{
struct aper_size_info_lvl2 *value;
struct serverworks_page_map page_dir;
......@@ -192,7 +192,7 @@ static int serverworks_create_gatt_table(void)
return 0;
}
static int serverworks_free_gatt_table(void)
static int serverworks_free_gatt_table(struct agp_bridge_data *bridge)
{
struct serverworks_page_map page_dir;
......@@ -341,7 +341,7 @@ static int serverworks_insert_memory(struct agp_memory *mem,
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
writel(agp_bridge->driver->mask_memory(mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
}
serverworks_tlbflush(mem);
return 0;
......@@ -387,15 +387,15 @@ static struct aper_size_info_lvl2 serverworks_sizes[7] =
{32, 8192, 0xfe000000}
};
static void serverworks_agp_enable(u32 mode)
static void serverworks_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
u32 command;
pci_read_config_dword(serverworks_private.svrwrks_dev,
agp_bridge->capndx + PCI_AGP_STATUS,
bridge->capndx + PCI_AGP_STATUS,
&command);
command = agp_collect_device_status(mode, command);
command = agp_collect_device_status(bridge, mode, command);
command &= ~0x10; /* disable FW */
command &= ~0x08;
......@@ -403,7 +403,7 @@ static void serverworks_agp_enable(u32 mode)
command |= 0x100;
pci_write_config_dword(serverworks_private.svrwrks_dev,
agp_bridge->capndx + PCI_AGP_COMMAND,
bridge->capndx + PCI_AGP_COMMAND,
command);
agp_device_command(command, 0);
......@@ -541,7 +541,7 @@ static int __init agp_serverworks_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_serverworks_pci_driver);
return pci_register_driver(&agp_serverworks_pci_driver);
}
static void __exit agp_serverworks_cleanup(void)
......
......@@ -124,27 +124,27 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start,
return 0;
}
static void uninorth_agp_enable(u32 mode)
static void uninorth_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
u32 command, scratch;
int timeout;
pci_read_config_dword(agp_bridge->dev,
agp_bridge->capndx + PCI_AGP_STATUS,
pci_read_config_dword(bridge->dev,
bridge->capndx + PCI_AGP_STATUS,
&command);
command = agp_collect_device_status(mode, command);
command = agp_collect_device_status(bridge, mode, command);
command |= 0x100;
uninorth_tlbflush(NULL);
timeout = 0;
do {
pci_write_config_dword(agp_bridge->dev,
agp_bridge->capndx + PCI_AGP_COMMAND,
pci_write_config_dword(bridge->dev,
bridge->capndx + PCI_AGP_COMMAND,
command);
pci_read_config_dword(agp_bridge->dev,
agp_bridge->capndx + PCI_AGP_COMMAND,
pci_read_config_dword(bridge->dev,
bridge->capndx + PCI_AGP_COMMAND,
&scratch);
} while ((scratch & 0x100) == 0 && ++timeout < 1000);
if ((scratch & 0x100) == 0)
......@@ -155,7 +155,7 @@ static void uninorth_agp_enable(u32 mode)
uninorth_tlbflush(NULL);
}
static int uninorth_create_gatt_table(void)
static int uninorth_create_gatt_table(struct agp_bridge_data *bridge)
{
char *table;
char *table_end;
......@@ -212,7 +212,7 @@ static int uninorth_create_gatt_table(void)
return 0;
}
static int uninorth_free_gatt_table(void)
static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
{
int page_order;
char *table, *table_end;
......@@ -375,7 +375,7 @@ static int __init agp_uninorth_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_uninorth_pci_driver);
return pci_register_driver(&agp_uninorth_pci_driver);
}
static void __exit agp_uninorth_cleanup(void)
......
/*
* VIA AGPGART routines.
* VIA AGPGART routines.
*/
#include <linux/types.h>
......@@ -36,6 +36,7 @@ static int via_fetch_size(void)
return values[i].size;
}
}
printk(KERN_ERR PFX "Unknown aperture size from AGP bridge (0x%x)\n", temp);
return 0;
}
......@@ -78,12 +79,19 @@ static void via_cleanup(void)
static void via_tlbflush(struct agp_memory *mem)
{
pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000008f);
pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, 0x0000000f);
u32 temp;
pci_read_config_dword(agp_bridge->dev, VIA_GARTCTRL, &temp);
temp |= (1<<7);
temp &= ~0x7f;
pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
temp &= ~(1<<7);
temp &= ~0x7f;
pci_write_config_dword(agp_bridge->dev, VIA_GARTCTRL, temp);
}
static struct aper_size_info_8 via_generic_sizes[7] =
static struct aper_size_info_8 via_generic_sizes[9] =
{
{256, 65536, 6, 0},
{128, 32768, 5, 128},
......@@ -91,7 +99,9 @@ static struct aper_size_info_8 via_generic_sizes[7] =
{32, 8192, 3, 224},
{16, 4096, 2, 240},
{8, 2048, 1, 248},
{4, 1024, 0, 252}
{4, 1024, 0, 252},
{2, 512, 0, 254},
{1, 256, 0, 255}
};
......@@ -121,7 +131,7 @@ static int via_configure_agp3(void)
{
u32 temp;
struct aper_size_info_16 *current_size;
current_size = A_SIZE_16(agp_bridge->current_size);
/* address to map too */
......@@ -132,13 +142,13 @@ static int via_configure_agp3(void)
pci_write_config_dword(agp_bridge->dev, VIA_AGP3_ATTBASE,
agp_bridge->gatt_bus_addr & 0xfffff000);
/* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch
/* 1. Enable GTLB in RX90<7>, all AGP aperture access needs to fetch
* translation table first.
* 2. Enable AGP aperture in RX91<0>. This bit controls the enabling of the
* graphics AGP aperture for the AGP3.0 port.
*/
pci_read_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, &temp);
pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7));
pci_write_config_dword(agp_bridge->dev, VIA_AGP3_GARTCTRL, temp | (3<<7));
return 0;
}
......@@ -189,7 +199,7 @@ struct agp_bridge_driver via_driver = {
.owner = THIS_MODULE,
.aperture_sizes = via_generic_sizes,
.size_type = U8_APER_SIZE,
.num_aperture_sizes = 7,
.num_aperture_sizes = 9,
.configure = via_configure,
.fetch_size = via_fetch_size,
.cleanup = via_cleanup,
......@@ -525,7 +535,7 @@ static int __init agp_via_init(void)
{
if (agp_off)
return -EINVAL;
return pci_module_init(&agp_via_pci_driver);
return pci_register_driver(&agp_via_pci_driver);
}
static void __exit agp_via_cleanup(void)
......
......@@ -497,6 +497,7 @@ typedef struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */
drm_agp_mem_t *memory; /**< memory entries */
unsigned long mode; /**< AGP mode */
struct agp_bridge_data *bridge;
int enabled; /**< whether the AGP bus as been enabled */
int acquired; /**< whether the AGP device has been acquired */
unsigned long base;
......@@ -807,7 +808,7 @@ extern void *drm_ioremap_nocache(unsigned long offset, unsigned long size,
drm_device_t *dev);
extern void drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev);
extern DRM_AGP_MEM *drm_alloc_agp(int pages, u32 type);
extern DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM *handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start);
extern int drm_unbind_agp(DRM_AGP_MEM *handle);
......@@ -930,10 +931,10 @@ extern int drm_vblank_wait(drm_device_t *dev, unsigned int *vbl_seq);
extern void drm_vbl_send_signals( drm_device_t *dev );
/* AGP/GART support (drm_agpsupport.h) */
extern drm_agp_head_t *drm_agp_init(void);
extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
extern int drm_agp_acquire(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern void drm_agp_do_release(void);
extern void drm_agp_do_release(drm_device_t *dev);
extern int drm_agp_release(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_agp_enable(struct inode *inode, struct file *filp,
......@@ -948,7 +949,7 @@ extern int drm_agp_unbind(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern int drm_agp_bind(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type);
extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type);
extern int drm_agp_free_memory(DRM_AGP_MEM *handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM *handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM *handle);
......
......@@ -92,14 +92,13 @@ int drm_agp_acquire(struct inode *inode, struct file *filp,
{
drm_file_t *priv = filp->private_data;
drm_device_t *dev = priv->dev;
int retcode;
if (!dev->agp)
return -ENODEV;
if (dev->agp->acquired)
return -EBUSY;
if ((retcode = agp_backend_acquire()))
return retcode;
if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
return -ENODEV;
dev->agp->acquired = 1;
return 0;
}
......@@ -123,7 +122,7 @@ int drm_agp_release(struct inode *inode, struct file *filp,
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
agp_backend_release();
agp_backend_release(dev->agp->bridge);
dev->agp->acquired = 0;
return 0;
......@@ -134,9 +133,9 @@ int drm_agp_release(struct inode *inode, struct file *filp,
*
* Calls agp_backend_release().
*/
void drm_agp_do_release(void)
void drm_agp_do_release(drm_device_t *dev)
{
agp_backend_release();
agp_backend_release(dev->agp->bridge);
}
/**
......@@ -165,7 +164,7 @@ int drm_agp_enable(struct inode *inode, struct file *filp,
return -EFAULT;
dev->agp->mode = mode.mode;
agp_enable(mode.mode);
agp_enable(dev->agp->bridge, mode.mode);
dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1;
return 0;
......@@ -207,7 +206,7 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
type = (u32) request.type;
if (!(memory = drm_alloc_agp(pages, type))) {
if (!(memory = drm_alloc_agp(dev->agp->bridge, pages, type))) {
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -ENOMEM;
}
......@@ -381,14 +380,19 @@ int drm_agp_free(struct inode *inode, struct file *filp,
* \return pointer to a drm_agp_head structure.
*
*/
drm_agp_head_t *drm_agp_init(void)
drm_agp_head_t *drm_agp_init(drm_device_t *dev)
{
drm_agp_head_t *head = NULL;
if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
return NULL;
memset((void *)head, 0, sizeof(*head));
agp_copy_info(&head->agp_info);
if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL;
}
agp_copy_info(head->bridge, &head->agp_info);
agp_backend_release(head->bridge);
if (head->agp_info.chipset == NOT_SUPPORTED) {
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL;
......@@ -406,9 +410,9 @@ drm_agp_head_t *drm_agp_init(void)
}
/** Calls agp_allocate_memory() */
DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type)
DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type)
{
return agp_allocate_memory(pages, type);
return agp_allocate_memory(bridge, pages, type);
}
/** Calls agp_free_memory() */
......
......@@ -185,7 +185,7 @@ int drm_takedown( drm_device_t *dev )
}
dev->agp->memory = NULL;
if ( dev->agp->acquired ) drm_agp_do_release();
if ( dev->agp->acquired ) drm_agp_do_release(dev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
......
......@@ -155,9 +155,9 @@ void drm_free_pages(unsigned long address, int order, int area)
#if __OS_HAS_AGP
/** Wrapper around agp_allocate_memory() */
DRM_AGP_MEM *drm_alloc_agp(int pages, u32 type)
DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type)
{
return drm_agp_allocate_memory(pages, type);
return drm_agp_allocate_memory(bridge, pages, type);
}
/** Wrapper around agp_free_memory() */
......
......@@ -91,7 +91,7 @@ static int drm_fill_in_dev(drm_device_t *dev, struct pci_dev *pdev, const struct
goto error_out_unreg;
if (drm_core_has_AGP(dev)) {
dev->agp = drm_agp_init();
dev->agp = drm_agp_init(dev);
if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
DRM_ERROR( "Cannot initialize the agpgart module.\n" );
retcode = -EINVAL;
......
......@@ -1591,40 +1591,41 @@ static int __devinit i810_alloc_agp_mem(struct fb_info *info)
{
struct i810fb_par *par = (struct i810fb_par *) info->par;
int size;
struct agp_bridge_data *bridge;
i810_fix_offsets(par);
size = par->fb.size + par->iring.size;
if (agp_backend_acquire()) {
if (!(bridge = agp_backend_acquire(par->dev))) {
printk("i810fb_alloc_fbmem: cannot acquire agpgart\n");
return -ENODEV;
}
if (!(par->i810_gtt.i810_fb_memory =
agp_allocate_memory(size >> 12, AGP_NORMAL_MEMORY))) {
agp_allocate_memory(bridge, size >> 12, AGP_NORMAL_MEMORY))) {
printk("i810fb_alloc_fbmem: can't allocate framebuffer "
"memory\n");
agp_backend_release();
agp_backend_release(bridge);
return -ENOMEM;
}
if (agp_bind_memory(par->i810_gtt.i810_fb_memory,
par->fb.offset)) {
printk("i810fb_alloc_fbmem: can't bind framebuffer memory\n");
agp_backend_release();
agp_backend_release(bridge);
return -EBUSY;
}
if (!(par->i810_gtt.i810_cursor_memory =
agp_allocate_memory(par->cursor_heap.size >> 12,
agp_allocate_memory(bridge, par->cursor_heap.size >> 12,
AGP_PHYSICAL_MEMORY))) {
printk("i810fb_alloc_cursormem: can't allocate"
"cursor memory\n");
agp_backend_release();
agp_backend_release(bridge);
return -ENOMEM;
}
if (agp_bind_memory(par->i810_gtt.i810_cursor_memory,
par->cursor_heap.offset)) {
printk("i810fb_alloc_cursormem: cannot bind cursor memory\n");
agp_backend_release();
agp_backend_release(bridge);
return -EBUSY;
}
......@@ -1632,7 +1633,7 @@ static int __devinit i810_alloc_agp_mem(struct fb_info *info)
i810_fix_pointers(par);
agp_backend_release();
agp_backend_release(bridge);
return 0;
}
......
......@@ -470,6 +470,7 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
struct agp_kern_info gtt_info;
int agp_memtype;
const char *s;
struct agp_bridge_data *bridge;
DBG_MSG("intelfb_pci_register\n");
......@@ -605,16 +606,16 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Use agpgart to manage the GATT */
if (agp_backend_acquire()) {
if (!(bridge = agp_backend_acquire(pdev))) {
ERR_MSG("cannot acquire agp\n");
cleanup(dinfo);
return -ENODEV;
}
/* get the current gatt info */
if (agp_copy_info(&gtt_info)) {
if (agp_copy_info(bridge, &gtt_info)) {
ERR_MSG("cannot get agp info\n");
agp_backend_release();
agp_backend_release(bridge);
cleanup(dinfo);
return -ENODEV;
}
......@@ -637,17 +638,17 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Allocate memories (which aren't stolen) */
if (dinfo->accel) {
if (!(dinfo->gtt_ring_mem =
agp_allocate_memory(dinfo->ring.size >> 12,
agp_allocate_memory(bridge, dinfo->ring.size >> 12,
AGP_NORMAL_MEMORY))) {
ERR_MSG("cannot allocate ring buffer memory\n");
agp_backend_release();
agp_backend_release(bridge);
cleanup(dinfo);
return -ENOMEM;
}
if (agp_bind_memory(dinfo->gtt_ring_mem,
dinfo->ring.offset)) {
ERR_MSG("cannot bind ring buffer memory\n");
agp_backend_release();
agp_backend_release(bridge);
cleanup(dinfo);
return -EBUSY;
}
......@@ -661,17 +662,17 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
agp_memtype = dinfo->mobile ? AGP_PHYSICAL_MEMORY
: AGP_NORMAL_MEMORY;
if (!(dinfo->gtt_cursor_mem =
agp_allocate_memory(dinfo->cursor.size >> 12,
agp_allocate_memory(bridge, dinfo->cursor.size >> 12,
agp_memtype))) {
ERR_MSG("cannot allocate cursor memory\n");
agp_backend_release();
agp_backend_release(bridge);
cleanup(dinfo);
return -ENOMEM;
}
if (agp_bind_memory(dinfo->gtt_cursor_mem,
dinfo->cursor.offset)) {
ERR_MSG("cannot bind cursor memory\n");
agp_backend_release();
agp_backend_release(bridge);
cleanup(dinfo);
return -EBUSY;
}
......@@ -686,7 +687,7 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
}
if (dinfo->fbmem_gart) {
if (!(dinfo->gtt_fb_mem =
agp_allocate_memory(dinfo->fb.size >> 12,
agp_allocate_memory(bridge, dinfo->fb.size >> 12,
AGP_NORMAL_MEMORY))) {
WRN_MSG("cannot allocate framebuffer memory - use "
"the stolen one\n");
......@@ -709,7 +710,7 @@ intelfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent)
dinfo->fb_start = dinfo->fb.offset << 12;
/* release agpgart */
agp_backend_release();
agp_backend_release(bridge);
if (mtrr)
set_mtrr(dinfo);
......
/*
* AGPGART backend specific includes. Not for userspace consumption.
*
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2003 Dave Jones
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
......@@ -63,21 +64,24 @@ struct agp_kern_info {
struct vm_operations_struct *vm_ops;
};
/*
/*
* The agp_memory structure has information about the block of agp memory
* allocated. A caller may manipulate the next and prev pointers to link
* each allocated item into a list. These pointers are ignored by the backend.
* Everything else should never be written to, but the caller may read any of
* the items to detrimine the status of this block of agp memory.
* the items to determine the status of this block of agp memory.
*/
struct agp_bridge_data;
struct agp_memory {
int key;
struct agp_memory *next;
struct agp_memory *prev;
struct agp_bridge_data *bridge;
unsigned long *memory;
size_t page_count;
int key;
int num_scratch_pages;
unsigned long *memory;
off_t pg_start;
u32 type;
u32 physical;
......@@ -87,14 +91,19 @@ struct agp_memory {
#define AGP_NORMAL_MEMORY 0
extern struct agp_bridge_data *agp_bridge;
extern struct list_head agp_bridges;
extern struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *);
extern void agp_free_memory(struct agp_memory *);
extern struct agp_memory *agp_allocate_memory(size_t, u32);
extern int agp_copy_info(struct agp_kern_info *);
extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t, u32);
extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
extern int agp_bind_memory(struct agp_memory *, off_t);
extern int agp_unbind_memory(struct agp_memory *);
extern void agp_enable(u32);
extern int agp_backend_acquire(void);
extern void agp_backend_release(void);
extern void agp_enable(struct agp_bridge_data *, u32);
extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
extern void agp_backend_release(struct agp_bridge_data *);
#endif /* __KERNEL__ */
#endif /* _AGP_BACKEND_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment