Commit f8abea8f authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart

* master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart:
  [AGPGART] allow drm populated agp memory types cleanups
  [AGPGART] intel-agp: Use ARRAY_SIZE macro when appropriate
  [AGPGART] Add agp-type-to-mask-type method missing from some drivers.
  [AGPGART] Don't try to remap i810 registers on resume.
  [AGPGART] Allow drm-populated agp memory types
  [AGPGART] compat ioctl
parents ef294986 1c14cfbb
agpgart-y := backend.o frontend.o generic.o isoch.o
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_COMPAT) += compat_ioctl.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o
obj-$(CONFIG_AGP_ATI) += ati-agp.o
obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
......
......@@ -114,6 +114,7 @@ struct agp_bridge_driver {
void (*free_by_type)(struct agp_memory *);
void *(*agp_alloc_page)(struct agp_bridge_data *);
void (*agp_destroy_page)(void *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
};
struct agp_bridge_data {
......@@ -218,6 +219,7 @@ struct agp_bridge_data {
#define I810_PTE_MAIN_UNCACHED 0x00000000
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
#define I810_GFX_MEM_WIN_32M 0x00010000
......@@ -270,8 +272,16 @@ void global_cache_flush(void);
void get_agp_version(struct agp_bridge_data *bridge);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
unsigned long addr, int type);
int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
int type);
struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
/* generic functions for user-populated AGP memory types */
struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
void agp_alloc_page_array(size_t size, struct agp_memory *mem);
void agp_free_page_array(struct agp_memory *mem);
/* generic routines for agp>=3 */
int agp3_generic_fetch_size(void);
void agp3_generic_tlbflush(struct agp_memory *mem);
......@@ -288,6 +298,8 @@ extern struct aper_size_info_16 agp3_generic_sizes[];
extern int agp_off;
extern int agp_try_unsupported_boot;
long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
/* Chipset independant registers (from AGP Spec) */
#define AGP_APBASE 0x10
......
......@@ -214,6 +214,7 @@ static struct agp_bridge_driver ali_generic_bridge = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = ali_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver ali_m1541_bridge = {
......@@ -237,6 +238,7 @@ static struct agp_bridge_driver ali_m1541_bridge = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = m1541_alloc_page,
.agp_destroy_page = m1541_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
......
......@@ -91,6 +91,9 @@ static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start,
int num_entries, status;
void *temp;
if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
return -EINVAL;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
......@@ -142,6 +145,7 @@ struct agp_bridge_driver alpha_core_agp_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
struct agp_bridge_data *alpha_bridge;
......
......@@ -381,6 +381,7 @@ static struct agp_bridge_driver amd_irongate_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids amd_agp_device_ids[] __devinitdata =
......
......@@ -62,12 +62,18 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
{
int i, j, num_entries;
long long tmp;
int mask_type;
struct agp_bridge_data *bridge = mem->bridge;
u32 pte;
num_entries = agp_num_entries();
if (type != 0 || mem->type != 0)
if (type != mem->type)
return -EINVAL;
mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
if (mask_type != 0)
return -EINVAL;
/* Make sure we can fit the range in the gatt table. */
/* FIXME: could wrap */
......@@ -90,7 +96,7 @@ static int amd64_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
tmp = agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type);
mem->memory[i], mask_type);
BUG_ON(tmp & 0xffffff0000000ffcULL);
pte = (tmp & 0x000000ff00000000ULL) >> 28;
......@@ -247,6 +253,7 @@ static struct agp_bridge_driver amd_8151_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
/* Some basic sanity checks for the aperture. */
......
......@@ -431,6 +431,7 @@ static struct agp_bridge_driver ati_generic_bridge = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
......
......@@ -43,7 +43,7 @@
* fix some real stupidity. It's only by chance we can bump
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
#define AGPGART_VERSION_MINOR 101
#define AGPGART_VERSION_MINOR 102
static const struct agp_version agp_current_version =
{
.major = AGPGART_VERSION_MAJOR,
......
/*
* AGPGART driver frontend compatibility ioctls
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2003 Dave Jones
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
* Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/agpgart.h>
#include <asm/uaccess.h>
#include "agp.h"
#include "compat_ioctl.h"
static int compat_agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_info32 userinfo;
struct agp_kern_info kerninfo;
agp_copy_info(agp_bridge, &kerninfo);
userinfo.version.major = kerninfo.version.major;
userinfo.version.minor = kerninfo.version.minor;
userinfo.bridge_id = kerninfo.device->vendor |
(kerninfo.device->device << 16);
userinfo.agp_mode = kerninfo.mode;
userinfo.aper_base = (compat_long_t)kerninfo.aper_base;
userinfo.aper_size = kerninfo.aper_size;
userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
userinfo.pg_used = kerninfo.current_memory;
if (copy_to_user(arg, &userinfo, sizeof(userinfo)))
return -EFAULT;
return 0;
}
static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_region32 ureserve;
struct agp_region kreserve;
struct agp_client *client;
struct agp_file_private *client_priv;
DBG("");
if (copy_from_user(&ureserve, arg, sizeof(ureserve)))
return -EFAULT;
if ((unsigned) ureserve.seg_count >= ~0U/sizeof(struct agp_segment32))
return -EFAULT;
kreserve.pid = ureserve.pid;
kreserve.seg_count = ureserve.seg_count;
client = agp_find_client_by_pid(kreserve.pid);
if (kreserve.seg_count == 0) {
/* remove a client */
client_priv = agp_find_private(kreserve.pid);
if (client_priv != NULL) {
set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
}
if (client == NULL) {
/* client is already removed */
return 0;
}
return agp_remove_client(kreserve.pid);
} else {
struct agp_segment32 *usegment;
struct agp_segment *ksegment;
int seg;
if (ureserve.seg_count >= 16384)
return -EINVAL;
usegment = kmalloc(sizeof(*usegment) * ureserve.seg_count, GFP_KERNEL);
if (!usegment)
return -ENOMEM;
ksegment = kmalloc(sizeof(*ksegment) * kreserve.seg_count, GFP_KERNEL);
if (!ksegment) {
kfree(usegment);
return -ENOMEM;
}
if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
sizeof(*usegment) * ureserve.seg_count)) {
kfree(usegment);
kfree(ksegment);
return -EFAULT;
}
for (seg = 0; seg < ureserve.seg_count; seg++) {
ksegment[seg].pg_start = usegment[seg].pg_start;
ksegment[seg].pg_count = usegment[seg].pg_count;
ksegment[seg].prot = usegment[seg].prot;
}
kfree(usegment);
kreserve.seg_list = ksegment;
if (client == NULL) {
/* Create the client and add the segment */
client = agp_create_client(kreserve.pid);
if (client == NULL) {
kfree(ksegment);
return -ENOMEM;
}
client_priv = agp_find_private(kreserve.pid);
if (client_priv != NULL) {
set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
}
}
return agp_create_segment(client, &kreserve);
}
/* Will never really happen */
return -EINVAL;
}
static int compat_agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_memory *memory;
struct agp_allocate32 alloc;
DBG("");
if (copy_from_user(&alloc, arg, sizeof(alloc)))
return -EFAULT;
memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
if (memory == NULL)
return -ENOMEM;
alloc.key = memory->key;
alloc.physical = memory->physical;
if (copy_to_user(arg, &alloc, sizeof(alloc))) {
agp_free_memory_wrap(memory);
return -EFAULT;
}
return 0;
}
static int compat_agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_bind32 bind_info;
struct agp_memory *memory;
DBG("");
if (copy_from_user(&bind_info, arg, sizeof(bind_info)))
return -EFAULT;
memory = agp_find_mem_by_key(bind_info.key);
if (memory == NULL)
return -EINVAL;
return agp_bind_memory(memory, bind_info.pg_start);
}
static int compat_agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_memory *memory;
struct agp_unbind32 unbind;
DBG("");
if (copy_from_user(&unbind, arg, sizeof(unbind)))
return -EFAULT;
memory = agp_find_mem_by_key(unbind.key);
if (memory == NULL)
return -EINVAL;
return agp_unbind_memory(memory);
}
long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct agp_file_private *curr_priv = file->private_data;
int ret_val = -ENOTTY;
mutex_lock(&(agp_fe.agp_mutex));
if ((agp_fe.current_controller == NULL) &&
(cmd != AGPIOC_ACQUIRE32)) {
ret_val = -EINVAL;
goto ioctl_out;
}
if ((agp_fe.backend_acquired != TRUE) &&
(cmd != AGPIOC_ACQUIRE32)) {
ret_val = -EBUSY;
goto ioctl_out;
}
if (cmd != AGPIOC_ACQUIRE32) {
if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
ret_val = -EPERM;
goto ioctl_out;
}
/* Use the original pid of the controller,
* in case it's threaded */
if (agp_fe.current_controller->pid != curr_priv->my_pid) {
ret_val = -EBUSY;
goto ioctl_out;
}
}
switch (cmd) {
case AGPIOC_INFO32:
ret_val = compat_agpioc_info_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_ACQUIRE32:
ret_val = agpioc_acquire_wrap(curr_priv);
break;
case AGPIOC_RELEASE32:
ret_val = agpioc_release_wrap(curr_priv);
break;
case AGPIOC_SETUP32:
ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_RESERVE32:
ret_val = compat_agpioc_reserve_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_PROTECT32:
ret_val = agpioc_protect_wrap(curr_priv);
break;
case AGPIOC_ALLOCATE32:
ret_val = compat_agpioc_allocate_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_DEALLOCATE32:
ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
break;
case AGPIOC_BIND32:
ret_val = compat_agpioc_bind_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_UNBIND32:
ret_val = compat_agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
}
ioctl_out:
DBG("ioctl returns %d\n", ret_val);
mutex_unlock(&(agp_fe.agp_mutex));
return ret_val;
}
/*
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
* Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _AGP_COMPAT_IOCTL_H
#define _AGP_COMPAT_IOCTL_H
#include <linux/compat.h>
#include <linux/agpgart.h>
#define AGPIOC_INFO32 _IOR (AGPIOC_BASE, 0, compat_uptr_t)
#define AGPIOC_ACQUIRE32 _IO (AGPIOC_BASE, 1)
#define AGPIOC_RELEASE32 _IO (AGPIOC_BASE, 2)
#define AGPIOC_SETUP32 _IOW (AGPIOC_BASE, 3, compat_uptr_t)
#define AGPIOC_RESERVE32 _IOW (AGPIOC_BASE, 4, compat_uptr_t)
#define AGPIOC_PROTECT32 _IOW (AGPIOC_BASE, 5, compat_uptr_t)
#define AGPIOC_ALLOCATE32 _IOWR(AGPIOC_BASE, 6, compat_uptr_t)
#define AGPIOC_DEALLOCATE32 _IOW (AGPIOC_BASE, 7, compat_int_t)
#define AGPIOC_BIND32 _IOW (AGPIOC_BASE, 8, compat_uptr_t)
#define AGPIOC_UNBIND32 _IOW (AGPIOC_BASE, 9, compat_uptr_t)
struct agp_info32 {
struct agp_version version; /* version of the driver */
u32 bridge_id; /* bridge vendor/device */
u32 agp_mode; /* mode info of bridge */
compat_long_t aper_base; /* base of aperture */
compat_size_t aper_size; /* size of aperture */
compat_size_t pg_total; /* max pages (swap + system) */
compat_size_t pg_system; /* max pages (system) */
compat_size_t pg_used; /* current pages used */
};
/*
* The "prot" down below needs still a "sleep" flag somehow ...
*/
struct agp_segment32 {
compat_off_t pg_start; /* starting page to populate */
compat_size_t pg_count; /* number of pages */
compat_int_t prot; /* prot flags for mmap */
};
struct agp_region32 {
compat_pid_t pid; /* pid of process */
compat_size_t seg_count; /* number of segments */
struct agp_segment32 *seg_list;
};
struct agp_allocate32 {
compat_int_t key; /* tag of allocation */
compat_size_t pg_count; /* number of pages */
u32 type; /* 0 == normal, other devspec */
u32 physical; /* device specific (some devices
* need a phys address of the
* actual page behind the gatt
* table) */
};
struct agp_bind32 {
compat_int_t key; /* tag of allocation */
compat_off_t pg_start; /* starting page to populate */
};
struct agp_unbind32 {
compat_int_t key; /* tag of allocation */
u32 priority; /* priority for paging out */
};
extern struct agp_front_data agp_fe;
int agpioc_acquire_wrap(struct agp_file_private *priv);
int agpioc_release_wrap(struct agp_file_private *priv);
int agpioc_protect_wrap(struct agp_file_private *priv);
int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg);
int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg);
struct agp_file_private *agp_find_private(pid_t pid);
struct agp_client *agp_create_client(pid_t id);
int agp_remove_client(pid_t id);
int agp_create_segment(struct agp_client *client, struct agp_region *region);
void agp_free_memory_wrap(struct agp_memory *memory);
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
struct agp_memory *agp_find_mem_by_key(int key);
struct agp_client *agp_find_client_by_pid(pid_t id);
#endif /* _AGP_COMPAT_H */
......@@ -335,6 +335,7 @@ static struct agp_bridge_driver efficeon_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int __devinit agp_efficeon_probe(struct pci_dev *pdev,
......
......@@ -41,9 +41,9 @@
#include <asm/pgtable.h>
#include "agp.h"
static struct agp_front_data agp_fe;
struct agp_front_data agp_fe;
static struct agp_memory *agp_find_mem_by_key(int key)
struct agp_memory *agp_find_mem_by_key(int key)
{
struct agp_memory *curr;
......@@ -159,7 +159,7 @@ static pgprot_t agp_convert_mmap_flags(int prot)
return vm_get_page_prot(prot_bits);
}
static int agp_create_segment(struct agp_client *client, struct agp_region *region)
int agp_create_segment(struct agp_client *client, struct agp_region *region)
{
struct agp_segment_priv **ret_seg;
struct agp_segment_priv *seg;
......@@ -211,7 +211,7 @@ static void agp_insert_into_pool(struct agp_memory * temp)
/* File private list routines */
static struct agp_file_private *agp_find_private(pid_t pid)
struct agp_file_private *agp_find_private(pid_t pid)
{
struct agp_file_private *curr;
......@@ -266,13 +266,13 @@ static void agp_remove_file_private(struct agp_file_private * priv)
* Wrappers for agp_free_memory & agp_allocate_memory
* These make sure that internal lists are kept updated.
*/
static void agp_free_memory_wrap(struct agp_memory *memory)
void agp_free_memory_wrap(struct agp_memory *memory)
{
agp_remove_from_pool(memory);
agp_free_memory(memory);
}
static struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
{
struct agp_memory *memory;
......@@ -484,7 +484,7 @@ static struct agp_controller *agp_find_controller_for_client(pid_t id)
return NULL;
}
static struct agp_client *agp_find_client_by_pid(pid_t id)
struct agp_client *agp_find_client_by_pid(pid_t id)
{
struct agp_client *temp;
......@@ -509,7 +509,7 @@ static void agp_insert_client(struct agp_client *client)
agp_fe.current_controller->num_clients++;
}
static struct agp_client *agp_create_client(pid_t id)
struct agp_client *agp_create_client(pid_t id)
{
struct agp_client *new_client;
......@@ -522,7 +522,7 @@ static struct agp_client *agp_create_client(pid_t id)
return new_client;
}
static int agp_remove_client(pid_t id)
int agp_remove_client(pid_t id)
{
struct agp_client *client;
struct agp_client *prev_client;
......@@ -746,7 +746,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
return 0;
}
static int agpioc_acquire_wrap(struct agp_file_private *priv)
int agpioc_acquire_wrap(struct agp_file_private *priv)
{
struct agp_controller *controller;
......@@ -789,14 +789,14 @@ static int agpioc_acquire_wrap(struct agp_file_private *priv)
return 0;
}
static int agpioc_release_wrap(struct agp_file_private *priv)
int agpioc_release_wrap(struct agp_file_private *priv)
{
DBG("");
agp_controller_release_current(agp_fe.current_controller, priv);
return 0;
}
static int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_setup mode;
......@@ -876,7 +876,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
return -EINVAL;
}
static int agpioc_protect_wrap(struct agp_file_private *priv)
int agpioc_protect_wrap(struct agp_file_private *priv)
{
DBG("");
/* This function is not currently implemented */
......@@ -892,6 +892,9 @@ static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
return -EFAULT;
if (alloc.type >= AGP_USER_TYPES)
return -EINVAL;
memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
if (memory == NULL)
......@@ -907,7 +910,7 @@ static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
return 0;
}
static int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
{
struct agp_memory *memory;
......@@ -1043,6 +1046,9 @@ static const struct file_operations agp_fops =
.read = agp_read,
.write = agp_write,
.ioctl = agp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_agp_ioctl,
#endif
.mmap = agp_mmap,
.open = agp_open,
.release = agp_release,
......
......@@ -101,6 +101,63 @@ static int agp_get_key(void)
return -1;
}
/*
* Use kmalloc if possible for the page list. Otherwise fall back to
* vmalloc. This speeds things up and also saves memory for small AGP
* regions.
*/
void agp_alloc_page_array(size_t size, struct agp_memory *mem)
{
mem->memory = NULL;
mem->vmalloc_flag = 0;
if (size <= 2*PAGE_SIZE)
mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
if (mem->memory == NULL) {
mem->memory = vmalloc(size);
mem->vmalloc_flag = 1;
}
}
EXPORT_SYMBOL(agp_alloc_page_array);
void agp_free_page_array(struct agp_memory *mem)
{
if (mem->vmalloc_flag) {
vfree(mem->memory);
} else {
kfree(mem->memory);
}
}
EXPORT_SYMBOL(agp_free_page_array);
static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
{
struct agp_memory *new;
unsigned long alloc_size = num_agp_pages*sizeof(struct page *);
new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL);
if (new == NULL)
return NULL;
new->key = agp_get_key();
if (new->key < 0) {
kfree(new);
return NULL;
}
agp_alloc_page_array(alloc_size, new);
if (new->memory == NULL) {
agp_free_key(new->key);
kfree(new);
return NULL;
}
new->num_scratch_pages = 0;
return new;
}
struct agp_memory *agp_create_memory(int scratch_pages)
{
......@@ -116,7 +173,8 @@ struct agp_memory *agp_create_memory(int scratch_pages)
kfree(new);
return NULL;
}
new->memory = vmalloc(PAGE_SIZE * scratch_pages);
agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
if (new->memory == NULL) {
agp_free_key(new->key);
......@@ -124,6 +182,7 @@ struct agp_memory *agp_create_memory(int scratch_pages)
return NULL;
}
new->num_scratch_pages = scratch_pages;
new->type = AGP_NORMAL_MEMORY;
return new;
}
EXPORT_SYMBOL(agp_create_memory);
......@@ -146,6 +205,11 @@ void agp_free_memory(struct agp_memory *curr)
if (curr->is_bound == TRUE)
agp_unbind_memory(curr);
if (curr->type >= AGP_USER_TYPES) {
agp_generic_free_by_type(curr);
return;
}
if (curr->type != 0) {
curr->bridge->driver->free_by_type(curr);
return;
......@@ -157,7 +221,7 @@ void agp_free_memory(struct agp_memory *curr)
flush_agp_mappings();
}
agp_free_key(curr->key);
vfree(curr->memory);
agp_free_page_array(curr);
kfree(curr);
}
EXPORT_SYMBOL(agp_free_memory);
......@@ -188,6 +252,13 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp)
return NULL;
if (type >= AGP_USER_TYPES) {
new = agp_generic_alloc_user(page_count, type);
if (new)
new->bridge = bridge;
return new;
}
if (type != 0) {
new = bridge->driver->alloc_by_type(page_count, type);
if (new)
......@@ -960,6 +1031,7 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
off_t j;
void *temp;
struct agp_bridge_data *bridge;
int mask_type;
bridge = mem->bridge;
if (!bridge)
......@@ -995,7 +1067,11 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
num_entries -= agp_memory_reserved/PAGE_SIZE;
if (num_entries < 0) num_entries = 0;
if (type != 0 || mem->type != 0) {
if (type != mem->type)
return -EINVAL;
mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
if (mask_type != 0) {
/* The generic routines know nothing of memory types */
return -EINVAL;
}
......@@ -1018,7 +1094,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j);
writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type),
bridge->gatt_table+j);
}
readl(bridge->gatt_table+j-1); /* PCI Posting. */
......@@ -1032,6 +1109,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
size_t i;
struct agp_bridge_data *bridge;
int mask_type;
bridge = mem->bridge;
if (!bridge)
......@@ -1040,7 +1118,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
if (mem->page_count == 0)
return 0;
if (type != 0 || mem->type != 0) {
if (type != mem->type)
return -EINVAL;
mask_type = bridge->driver->agp_type_to_mask_type(bridge, type);
if (mask_type != 0) {
/* The generic routines know nothing of memory types */
return -EINVAL;
}
......@@ -1056,22 +1138,40 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
}
EXPORT_SYMBOL(agp_generic_remove_memory);
struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
{
return NULL;
}
EXPORT_SYMBOL(agp_generic_alloc_by_type);
void agp_generic_free_by_type(struct agp_memory *curr)
{
vfree(curr->memory);
agp_free_page_array(curr);
agp_free_key(curr->key);
kfree(curr);
}
EXPORT_SYMBOL(agp_generic_free_by_type);
struct agp_memory *agp_generic_alloc_user(size_t page_count, int type)
{
struct agp_memory *new;
int i;
int pages;
pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
new = agp_create_user_memory(page_count);
if (new == NULL)
return NULL;
for (i = 0; i < page_count; i++)
new->memory[i] = 0;
new->page_count = 0;
new->type = type;
new->num_scratch_pages = pages;
return new;
}
EXPORT_SYMBOL(agp_generic_alloc_user);
/*
* Basic Page Allocation Routines -
......@@ -1165,6 +1265,15 @@ unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
}
EXPORT_SYMBOL(agp_generic_mask_memory);
int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
int type)
{
if (type >= AGP_USER_TYPES)
return 0;
return type;
}
EXPORT_SYMBOL(agp_generic_type_to_mask_type);
/*
* These functions are implemented according to the AGPv3 spec,
* which covers implementation details that had previously been
......
......@@ -438,6 +438,7 @@ struct agp_bridge_driver hp_zx1_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
};
......
......@@ -293,6 +293,9 @@ static int i460_insert_memory_small_io_page (struct agp_memory *mem,
pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
mem, pg_start, type, mem->memory[0]);
if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
return -EINVAL;
io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
temp = agp_bridge->current_size;
......@@ -396,6 +399,9 @@ static int i460_insert_memory_large_io_page (struct agp_memory *mem,
struct lp_desc *start, *end, *lp;
void *temp;
if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
return -EINVAL;
temp = agp_bridge->current_size;
num_entries = A_SIZE_8(temp)->num_entries;
......@@ -572,6 +578,7 @@ struct agp_bridge_driver intel_i460_driver = {
#endif
.alloc_by_type = agp_generic_alloc_by_type,
.free_by_type = agp_generic_free_by_type,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
};
......
......@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include "agp.h"
......@@ -24,6 +25,9 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB)
extern int agp_memory_reserved;
/* Intel 815 register */
#define INTEL_815_APCONT 0x51
#define INTEL_815_ATTBASE_MASK ~0x1FFFFFFF
......@@ -68,12 +72,15 @@ static struct aper_size_info_fixed intel_i810_sizes[] =
#define AGP_DCACHE_MEMORY 1
#define AGP_PHYS_MEMORY 2
#define INTEL_AGP_CACHED_MEMORY 3
static struct gatt_mask intel_i810_masks[] =
{
{.mask = I810_PTE_VALID, .type = 0},
{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
{.mask = I810_PTE_VALID, .type = 0}
{.mask = I810_PTE_VALID, .type = 0},
{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
.type = INTEL_AGP_CACHED_MEMORY}
};
static struct _intel_i810_private {
......@@ -117,6 +124,7 @@ static int intel_i810_configure(void)
current_size = A_SIZE_FIX(agp_bridge->current_size);
if (!intel_i810_private.registers) {
pci_read_config_dword(intel_i810_private.i810_dev, I810_MMADDR, &temp);
temp &= 0xfff80000;
......@@ -125,6 +133,7 @@ static int intel_i810_configure(void)
printk(KERN_ERR PFX "Unable to remap memory.\n");
return -ENOMEM;
}
}
if ((readl(intel_i810_private.registers+I810_DRAM_CTL)
& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
......@@ -201,62 +210,79 @@ static void i8xx_destroy_pages(void *addr)
atomic_dec(&agp_bridge->current_memory_agp);
}
static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
int type)
{
if (type < AGP_USER_TYPES)
return type;
else if (type == AGP_USER_CACHED_MEMORY)
return INTEL_AGP_CACHED_MEMORY;
else
return 0;
}
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
int i, j, num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
if (mem->page_count == 0)
return 0;
goto out;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
goto out_err;
for (j = pg_start; j < (pg_start + mem->page_count); j++) {
if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j)))
return -EBUSY;
if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
ret = -EBUSY;
goto out_err;
}
if (type != 0 || mem->type != 0) {
if ((type == AGP_DCACHE_MEMORY) && (mem->type == AGP_DCACHE_MEMORY)) {
/* special insert */
if (!mem->is_flushed) {
global_cache_flush();
mem->is_flushed = TRUE;
}
for (i = pg_start; i < (pg_start + mem->page_count); i++) {
writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID, intel_i810_private.registers+I810_PTE_BASE+(i*4));
}
readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
if (type != mem->type)
goto out_err;
agp_bridge->driver->tlb_flush(mem);
return 0;
}
if ((type == AGP_PHYS_MEMORY) && (mem->type == AGP_PHYS_MEMORY))
goto insert;
return -EINVAL;
}
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
insert:
if (!mem->is_flushed) {
switch (mask_type) {
case AGP_DCACHE_MEMORY:
if (!mem->is_flushed)
global_cache_flush();
mem->is_flushed = TRUE;
for (i = pg_start; i < (pg_start + mem->page_count); i++) {
writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
intel_i810_private.registers+I810_PTE_BASE+(i*4));
}
readl(intel_i810_private.registers+I810_PTE_BASE+((i-1)*4));
break;
case AGP_PHYS_MEMORY:
case AGP_NORMAL_MEMORY:
if (!mem->is_flushed)
global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type),
mem->memory[i],
mask_type),
intel_i810_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4)); /* PCI Posting. */
readl(intel_i810_private.registers+I810_PTE_BASE+((j-1)*4));
break;
default:
goto out_err;
}
agp_bridge->driver->tlb_flush(mem);
return 0;
out:
ret = 0;
out_err:
mem->is_flushed = 1;
return ret;
}
static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
......@@ -337,12 +363,11 @@ static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
new->type = AGP_DCACHE_MEMORY;
new->page_count = pg_count;
new->num_scratch_pages = 0;
vfree(new->memory);
agp_free_page_array(new);
return new;
}
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
return NULL;
}
......@@ -357,7 +382,7 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
gart_to_virt(curr->memory[0]));
global_flush_tlb();
}
vfree(curr->memory);
agp_free_page_array(curr);
}
kfree(curr);
}
......@@ -619,9 +644,11 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
{
int i,j,num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
if (mem->page_count == 0)
return 0;
goto out;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
......@@ -631,34 +658,41 @@ static int intel_i830_insert_entries(struct agp_memory *mem,off_t pg_start, int
pg_start,intel_i830_private.gtt_entries);
printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
return -EINVAL;
goto out_err;
}
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
goto out_err;
/* The i830 can't check the GTT for entries since its read only,
* depend on the caller to make the correct offset decisions.
*/
if ((type != 0 && type != AGP_PHYS_MEMORY) ||
(mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
return -EINVAL;
if (type != mem->type)
goto out_err;
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
mask_type != INTEL_AGP_CACHED_MEMORY)
goto out_err;
if (!mem->is_flushed) {
if (!mem->is_flushed)
global_cache_flush();
mem->is_flushed = TRUE;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type),
mem->memory[i], mask_type),
intel_i830_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_i830_private.registers+I810_PTE_BASE+((j-1)*4));
agp_bridge->driver->tlb_flush(mem);
return 0;
out:
ret = 0;
out_err:
mem->is_flushed = 1;
return ret;
}
static int intel_i830_remove_entries(struct agp_memory *mem,off_t pg_start,
......@@ -687,7 +721,6 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count,int type)
{
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
/* always return NULL for other allocation types for now */
return NULL;
}
......@@ -734,9 +767,11 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
{
int i,j,num_entries;
void *temp;
int ret = -EINVAL;
int mask_type;
if (mem->page_count == 0)
return 0;
goto out;
temp = agp_bridge->current_size;
num_entries = A_SIZE_FIX(temp)->num_entries;
......@@ -746,33 +781,41 @@ static int intel_i915_insert_entries(struct agp_memory *mem,off_t pg_start,
pg_start,intel_i830_private.gtt_entries);
printk (KERN_INFO PFX "Trying to insert into local/stolen memory\n");
return -EINVAL;
goto out_err;
}
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
goto out_err;
/* The i830 can't check the GTT for entries since its read only,
/* The i915 can't check the GTT for entries since its read only,
* depend on the caller to make the correct offset decisions.
*/
if ((type != 0 && type != AGP_PHYS_MEMORY) ||
(mem->type != 0 && mem->type != AGP_PHYS_MEMORY))
return -EINVAL;
if (type != mem->type)
goto out_err;
if (!mem->is_flushed) {
mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
mask_type != INTEL_AGP_CACHED_MEMORY)
goto out_err;
if (!mem->is_flushed)
global_cache_flush();
mem->is_flushed = TRUE;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
mem->memory[i], mem->type), intel_i830_private.gtt+j);
mem->memory[i], mask_type), intel_i830_private.gtt+j);
}
readl(intel_i830_private.gtt+j-1);
readl(intel_i830_private.gtt+j-1);
agp_bridge->driver->tlb_flush(mem);
return 0;
out:
ret = 0;
out_err:
mem->is_flushed = 1;
return ret;
}
static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
......@@ -803,7 +846,7 @@ static int intel_i915_remove_entries(struct agp_memory *mem,off_t pg_start,
*/
static int intel_i9xx_fetch_size(void)
{
int num_sizes = sizeof(intel_i830_sizes) / sizeof(*intel_i830_sizes);
int num_sizes = ARRAY_SIZE(intel_i830_sizes);
int aper_size; /* size in megabytes */
int i;
......@@ -1384,6 +1427,7 @@ static struct agp_bridge_driver intel_generic_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_810_driver = {
......@@ -1408,6 +1452,7 @@ static struct agp_bridge_driver intel_810_driver = {
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_815_driver = {
......@@ -1431,6 +1476,7 @@ static struct agp_bridge_driver intel_815_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_830_driver = {
......@@ -1455,6 +1501,7 @@ static struct agp_bridge_driver intel_830_driver = {
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
};
static struct agp_bridge_driver intel_820_driver = {
......@@ -1478,6 +1525,7 @@ static struct agp_bridge_driver intel_820_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_830mp_driver = {
......@@ -1501,6 +1549,7 @@ static struct agp_bridge_driver intel_830mp_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_840_driver = {
......@@ -1524,6 +1573,7 @@ static struct agp_bridge_driver intel_840_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_845_driver = {
......@@ -1547,6 +1597,7 @@ static struct agp_bridge_driver intel_845_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_850_driver = {
......@@ -1570,6 +1621,7 @@ static struct agp_bridge_driver intel_850_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_860_driver = {
......@@ -1593,6 +1645,7 @@ static struct agp_bridge_driver intel_860_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver intel_915_driver = {
......@@ -1617,6 +1670,7 @@ static struct agp_bridge_driver intel_915_driver = {
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
};
static struct agp_bridge_driver intel_i965_driver = {
......@@ -1641,6 +1695,7 @@ static struct agp_bridge_driver intel_i965_driver = {
.free_by_type = intel_i810_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = intel_i830_type_to_mask_type,
};
static struct agp_bridge_driver intel_7505_driver = {
......@@ -1664,6 +1719,7 @@ static struct agp_bridge_driver intel_7505_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int find_i810(u16 device)
......
......@@ -310,6 +310,7 @@ static struct agp_bridge_driver nvidia_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int __devinit agp_nvidia_probe(struct pci_dev *pdev,
......
......@@ -228,6 +228,7 @@ struct agp_bridge_driver parisc_agp_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
};
......
......@@ -265,6 +265,7 @@ struct agp_bridge_driver sgi_tioca_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = sgi_tioca_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
.needs_scratch_page = 0,
.num_aperture_sizes = 1,
......
......@@ -140,6 +140,7 @@ static struct agp_bridge_driver sis_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids sis_agp_device_ids[] __devinitdata =
......
......@@ -444,6 +444,7 @@ static struct agp_bridge_driver sworks_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static int __devinit agp_serverworks_probe(struct pci_dev *pdev,
......
......@@ -510,6 +510,7 @@ struct agp_bridge_driver uninorth_agp_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
};
......@@ -534,6 +535,7 @@ struct agp_bridge_driver u3_agp_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
.cant_use_aperture = 1,
.needs_scratch_page = 1,
};
......
......@@ -191,6 +191,7 @@ static struct agp_bridge_driver via_agp3_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_bridge_driver via_driver = {
......@@ -214,6 +215,7 @@ static struct agp_bridge_driver via_driver = {
.free_by_type = agp_generic_free_by_type,
.agp_alloc_page = agp_generic_alloc_page,
.agp_destroy_page = agp_generic_destroy_page,
.agp_type_to_mask_type = agp_generic_type_to_mask_type,
};
static struct agp_device_ids via_agp_device_ids[] __devinitdata =
......
......@@ -87,10 +87,15 @@ struct agp_memory {
u32 physical;
u8 is_bound;
u8 is_flushed;
u8 vmalloc_flag;
};
#define AGP_NORMAL_MEMORY 0
#define AGP_USER_TYPES (1 << 16)
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
extern struct agp_bridge_data *agp_bridge;
extern struct list_head agp_bridges;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment