Commit c198ff87 authored by James Simmons's avatar James Simmons

Merge maxwell.earthlink.net:/usr/src/linus-2.5

into maxwell.earthlink.net:/usr/src/fbdev-2.5
parents d07f16f6 e2a6cbc0
......@@ -29,6 +29,7 @@ obj-$(CONFIG_X86_NUMAQ) += numaq.o
obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_EDD) += edd.o
obj-$(CONFIG_MODULES) += module.o
obj-y += sysenter.o
EXTRA_AFLAGS := -traditional
......
......@@ -94,7 +94,7 @@ VM_MASK = 0x00020000
movl %edx, %ds; \
movl %edx, %es;
#define RESTORE_ALL \
#define RESTORE_REGS \
popl %ebx; \
popl %ecx; \
popl %edx; \
......@@ -104,14 +104,25 @@ VM_MASK = 0x00020000
popl %eax; \
1: popl %ds; \
2: popl %es; \
addl $4, %esp; \
3: iret; \
.section .fixup,"ax"; \
4: movl $0,(%esp); \
3: movl $0,(%esp); \
jmp 1b; \
5: movl $0,(%esp); \
4: movl $0,(%esp); \
jmp 2b; \
6: pushl %ss; \
.previous; \
.section __ex_table,"a";\
.align 4; \
.long 1b,3b; \
.long 2b,4b; \
.previous
#define RESTORE_ALL \
RESTORE_REGS \
addl $4, %esp; \
1: iret; \
.section .fixup,"ax"; \
2: pushl %ss; \
popl %ds; \
pushl %ss; \
popl %es; \
......@@ -120,11 +131,11 @@ VM_MASK = 0x00020000
.previous; \
.section __ex_table,"a";\
.align 4; \
.long 1b,4b; \
.long 2b,5b; \
.long 3b,6b; \
.long 1b,2b; \
.previous
ENTRY(lcall7)
pushfl # We get a different stack layout with call
# gates, which has to be cleaned up later..
......@@ -220,6 +231,40 @@ need_resched:
jmp need_resched
#endif
/* Points to after the "sysenter" instruction in the vsyscall page */
#define SYSENTER_RETURN 0xfffff007
# sysenter call handler stub
ALIGN
ENTRY(sysenter_entry)
sti
pushl $(__USER_DS)
pushl %ebp
pushfl
pushl $(__USER_CS)
pushl $SYSENTER_RETURN
pushl %eax
SAVE_ALL
GET_THREAD_INFO(%ebx)
cmpl $(NR_syscalls), %eax
jae syscall_badsys
testb $_TIF_SYSCALL_TRACE,TI_FLAGS(%ebx)
jnz syscall_trace_entry
call *sys_call_table(,%eax,4)
movl %eax,EAX(%esp)
cli
movl TI_FLAGS(%ebx), %ecx
testw $_TIF_ALLWORK_MASK, %cx
jne syscall_exit_work
RESTORE_REGS
movl 4(%esp),%edx
movl 16(%esp),%ecx
sti
sysexit
# system call handler stub
ALIGN
ENTRY(system_call)
......
......@@ -414,8 +414,8 @@ ENTRY(cpu_gdt_table)
.quad 0x0000000000000000 /* 0x0b reserved */
.quad 0x0000000000000000 /* 0x13 reserved */
.quad 0x0000000000000000 /* 0x1b reserved */
.quad 0x00cffa000000ffff /* 0x23 user 4GB code at 0x00000000 */
.quad 0x00cff2000000ffff /* 0x2b user 4GB data at 0x00000000 */
.quad 0x0000000000000000 /* 0x20 unused */
.quad 0x0000000000000000 /* 0x28 unused */
.quad 0x0000000000000000 /* 0x33 TLS entry 1 */
.quad 0x0000000000000000 /* 0x3b TLS entry 2 */
.quad 0x0000000000000000 /* 0x43 TLS entry 3 */
......@@ -425,22 +425,25 @@ ENTRY(cpu_gdt_table)
.quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
.quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
.quad 0x0000000000000000 /* 0x70 TSS descriptor */
.quad 0x0000000000000000 /* 0x78 LDT descriptor */
.quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
.quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
.quad 0x0000000000000000 /* 0x80 TSS descriptor */
.quad 0x0000000000000000 /* 0x88 LDT descriptor */
/* Segments used for calling PnP BIOS */
.quad 0x00c09a0000000000 /* 0x80 32-bit code */
.quad 0x00809a0000000000 /* 0x88 16-bit code */
.quad 0x0080920000000000 /* 0x90 16-bit data */
.quad 0x0080920000000000 /* 0x98 16-bit data */
.quad 0x00c09a0000000000 /* 0x90 32-bit code */
.quad 0x00809a0000000000 /* 0x98 16-bit code */
.quad 0x0080920000000000 /* 0xa0 16-bit data */
.quad 0x0080920000000000 /* 0xa8 16-bit data */
.quad 0x0080920000000000 /* 0xb0 16-bit data */
/*
* The APM segments have byte granularity and their bases
* and limits are set at run time.
*/
.quad 0x00409a0000000000 /* 0xa8 APM CS code */
.quad 0x00009a0000000000 /* 0xb0 APM CS 16 code (16 bit) */
.quad 0x0040920000000000 /* 0xb8 APM DS data */
.quad 0x00409a0000000000 /* 0xb8 APM CS code */
.quad 0x00009a0000000000 /* 0xc0 APM CS 16 code (16 bit) */
.quad 0x0040920000000000 /* 0xc8 APM DS data */
#if CONFIG_SMP
.fill (NR_CPUS-1)*GDT_ENTRIES,8,0 /* other CPU's GDT */
......
/*
* linux/arch/i386/kernel/sysenter.c
*
* (C) Copyright 2002 Linus Torvalds
*
* This file contains the needed initializations to support sysenter.
*/
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/thread_info.h>
#include <linux/gfp.h>
#include <linux/string.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
#include <asm/pgtable.h>
extern asmlinkage void sysenter_entry(void);
static void __init enable_sep_cpu(void *info)
{
unsigned long page = __get_free_page(GFP_ATOMIC);
int cpu = get_cpu();
unsigned long *esp0_ptr = &(init_tss + cpu)->esp0;
unsigned long rel32;
rel32 = (unsigned long) sysenter_entry - (page+11);
*(short *) (page+0) = 0x258b; /* movl xxxxx,%esp */
*(long **) (page+2) = esp0_ptr;
*(char *) (page+6) = 0xe9; /* jmp rl32 */
*(long *) (page+7) = rel32;
wrmsr(0x174, __KERNEL_CS, 0); /* SYSENTER_CS_MSR */
wrmsr(0x175, page+PAGE_SIZE, 0); /* SYSENTER_ESP_MSR */
wrmsr(0x176, page, 0); /* SYSENTER_EIP_MSR */
printk("Enabling SEP on CPU %d\n", cpu);
put_cpu();
}
static int __init sysenter_setup(void)
{
static const char int80[] = {
0xcd, 0x80, /* int $0x80 */
0xc3 /* ret */
};
static const char sysent[] = {
0x55, /* push %ebp */
0x51, /* push %ecx */
0x52, /* push %edx */
0x89, 0xe5, /* movl %esp,%ebp */
0x0f, 0x34, /* sysenter */
0x5a, /* pop %edx */
0x59, /* pop %ecx */
0x5d, /* pop %ebp */
0xc3 /* ret */
};
unsigned long page = get_zeroed_page(GFP_ATOMIC);
__set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY);
memcpy((void *) page, int80, sizeof(int80));
if (!boot_cpu_has(X86_FEATURE_SEP))
return 0;
memcpy((void *) page, sysent, sizeof(sysent));
enable_sep_cpu(NULL);
smp_call_function(enable_sep_cpu, NULL, 1, 1);
return 0;
}
__initcall(sysenter_setup);
......@@ -72,7 +72,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
static pte_t * __init one_page_table_init(pmd_t *pmd)
{
pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
if (page_table != pte_offset_kernel(pmd, 0))
BUG();
......
......@@ -29,8 +29,11 @@ config AGP_GART
bool "/dev/agpgart (AGP Support)"
depends on GART_IOMMU
config AGP3
bool "AGP 3.0 compliance (EXPERIMENTAL)"
config AGP_INTEL
bool "Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support"
tristate "Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support"
depends on AGP
help
This option gives you AGP support for the GLX component of the
......@@ -40,7 +43,7 @@ config AGP_INTEL
use GLX or DRI. If unsure, say N.
#config AGP_I810
# bool "Intel I810/I815/I830M (on-board) support"
# tristate "Intel I810/I815/I830M (on-board) support"
# depends on AGP
# help
# This option gives you AGP support for the Xserver on the Intel 810
......@@ -48,7 +51,7 @@ config AGP_INTEL
# is required to do any useful video modes with these boards.
config AGP_VIA
bool "VIA chipset support"
tristate "VIA chipset support"
depends on AGP
help
This option gives you AGP support for the GLX component of the
......@@ -58,7 +61,7 @@ config AGP_VIA
use GLX or DRI. If unsure, say N.
config AGP_AMD
bool "AMD Irongate, 761, and 762 support"
tristate "AMD Irongate, 761, and 762 support"
depends on AGP
help
This option gives you AGP support for the GLX component of the
......@@ -68,7 +71,7 @@ config AGP_AMD
use GLX or DRI. If unsure, say N.
config AGP_SIS
bool "Generic SiS support"
tristate "Generic SiS support"
depends on AGP
help
This option gives you AGP support for the GLX component of the "soon
......@@ -81,7 +84,7 @@ config AGP_SIS
use GLX or DRI. If unsure, say N.
config AGP_ALI
bool "ALI chipset support"
tristate "ALI chipset support"
depends on AGP
---help---
This option gives you AGP support for the GLX component of the
......@@ -99,14 +102,14 @@ config AGP_ALI
use GLX or DRI. If unsure, say N.
config AGP_SWORKS
bool "Serverworks LE/HE support"
tristate "Serverworks LE/HE support"
depends on AGP
help
Say Y here to support the Serverworks AGP card. See
<http://www.serverworks.com/> for product descriptions and images.
config AGP_AMD_8151
bool "AMD 8151 support"
tristate "AMD 8151 support"
depends on AGP
default GART_IOMMU
help
......@@ -114,16 +117,28 @@ config AGP_AMD_8151
GART on the AMD Athlon64/Opteron ("Hammer") CPUs.
config AGP_I460
bool "Intel 460GX support"
tristate "Intel 460GX support"
depends on AGP && IA64
help
This option gives you AGP GART support for the Intel 460GX chipset
for IA64 processors.
config AGP_HP_ZX1
bool "HP ZX1 AGP support"
tristate "HP ZX1 AGP support"
depends on AGP && IA64
help
This option gives you AGP GART support for the HP ZX1 chipset
for IA64 processors.
# Put AGP 3.0 entries below here.
config AGP_I7505
tristate "Intel 7205/7505 support (AGP 3.0)"
depends on AGP3
help
This option gives you AGP support for the GLX component of the
XFree86 4.x on Intel I7505 chipsets.
You should say Y here if you use XFree86 3.3.6 or 4.x and want to
use GLX or DRI. If unsure, say N
......@@ -8,6 +8,7 @@ export-objs := backend.o
agpgart-y := backend.o frontend.o generic.o
agpgart-objs := $(agpgart-y)
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP3) += generic-3.0.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_AGP_VIA) += via-agp.o
......@@ -19,3 +20,5 @@ obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_AMD_8151) += amd-k8-agp.o
obj-$(CONFIG_AGP_I7x05) += i7x05-agp.o
......@@ -46,28 +46,6 @@ int agp_generic_suspend(void);
void agp_generic_resume(void);
void agp_free_key(int key);
/* chipset specific init routines. */
/*
int __init ali_generic_setup (struct pci_dev *pdev);
int __init amd_irongate_setup (struct pci_dev *pdev);
int __init amd_8151_setup (struct pci_dev *pdev);
int __init hp_zx1_setup (struct pci_dev *pdev);
int __init intel_i460_setup (struct pci_dev *pdev);
int __init intel_generic_setup (struct pci_dev *pdev);
int __init intel_i810_setup(struct pci_dev *i810_dev);
int __init intel_815_setup(struct pci_dev *pdev);
int __init intel_i830_setup(struct pci_dev *i830_dev);
int __init intel_820_setup (struct pci_dev *pdev);
int __init intel_830mp_setup (struct pci_dev *pdev);
int __init intel_840_setup (struct pci_dev *pdev);
int __init intel_845_setup (struct pci_dev *pdev);
int __init intel_850_setup (struct pci_dev *pdev);
int __init intel_860_setup (struct pci_dev *pdev);
int __init serverworks_setup (struct pci_dev *pdev);
int __init sis_generic_setup (struct pci_dev *pdev);
int __init via_generic_setup (struct pci_dev *pdev);
*/
#define PFX "agpgart: "
int agp_register_driver (struct pci_dev *dev);
......
......@@ -138,8 +138,8 @@ static int amd_create_gatt_table(void)
return retval;
}
agp_bridge.gatt_table_real = (u32 *)page_dir.real;
agp_bridge.gatt_table = (u32 *)page_dir.remapped;
agp_bridge.gatt_table_real = (unsigned long *)page_dir.real;
agp_bridge.gatt_table = (unsigned long *)page_dir.remapped;
agp_bridge.gatt_bus_addr = virt_to_phys(page_dir.real);
/* Get the address for the gart region.
......@@ -165,8 +165,8 @@ static int amd_free_gatt_table(void)
{
struct amd_page_map page_dir;
page_dir.real = (u32 *)agp_bridge.gatt_table_real;
page_dir.remapped = (u32 *)agp_bridge.gatt_table;
page_dir.real = (unsigned long *)agp_bridge.gatt_table_real;
page_dir.remapped = (unsigned long *)agp_bridge.gatt_table;
amd_free_gatt_pages();
amd_free_page_map(&page_dir);
......
......@@ -151,7 +151,7 @@ static int amd_x86_64_fetch_size(void)
}
static void inline flush_x86_64_tlb(struct pci_dev *dev)
static void flush_x86_64_tlb(struct pci_dev *dev)
{
u32 tmp;
......
......@@ -36,8 +36,11 @@
#include <linux/agp_backend.h>
#include "agp.h"
#define AGPGART_VERSION_MAJOR 1
#define AGPGART_VERSION_MINOR 0
/* Due to XFree86 brain-damage, we can't go to 1.0 until they
* fix some real stupidity. It's only by chance we can bump
* past 0.99 at all due to some boolean logic error. */
#define AGPGART_VERSION_MAJOR 0
#define AGPGART_VERSION_MINOR 100
struct agp_bridge_data agp_bridge = { .type = NOT_SUPPORTED };
......@@ -258,7 +261,7 @@ int agp_register_driver (struct pci_dev *dev)
return 0;
}
int __exit agp_unregister_driver(void)
int agp_unregister_driver(void)
{
agp_bridge.type = NOT_SUPPORTED;
pm_unregister_all(agp_power);
......@@ -269,8 +272,23 @@ int __exit agp_unregister_driver(void)
return 0;
}
int __exit agp_exit(void)
{
if (agp_count==0)
return -EBUSY;
return 0;
}
int __init agp_init(void)
{
static int already_initialised=0;
if (already_initialised!=0)
return 0;
already_initialised = 1;
memset(&agp_bridge, 0, sizeof(struct agp_bridge_data));
agp_bridge.type = NOT_SUPPORTED;
......@@ -281,11 +299,13 @@ int __init agp_init(void)
#ifndef CONFIG_GART_IOMMU
module_init(agp_init);
module_exit(agp_exit);
#endif
EXPORT_SYMBOL(agp_backend_acquire);
EXPORT_SYMBOL(agp_backend_release);
EXPORT_SYMBOL_GPL(agp_register_driver);
EXPORT_SYMBOL_GPL(agp_unregister_driver);
MODULE_AUTHOR("Dave Jones <davej@codemonkey.org.uk>");
MODULE_LICENSE("GPL and additional rights");
......@@ -1062,9 +1062,9 @@ static struct file_operations agp_fops =
static struct miscdevice agp_miscdev =
{
AGPGART_MINOR,
"agpgart",
&agp_fops
.minor = AGPGART_MINOR,
.name = "agpgart",
.fops = &agp_fops
};
int __init agp_frontend_initialize(void)
......@@ -1079,7 +1079,7 @@ int __init agp_frontend_initialize(void)
return 0;
}
void __exit agp_frontend_cleanup(void)
void agp_frontend_cleanup(void)
{
misc_deregister(&agp_miscdev);
}
......
#include <linux/list.h>
#include <linux/pci.h>
//#include <linux/pagemap.h>
//#include <linux/miscdevice.h>
//#include <linux/pm.h>
#include <linux/agp_backend.h>
#include "agp.h"
/* Generic AGP 3.0 enabling routines */
struct agp_3_0_dev {
struct list_head list;
u8 capndx;
u32 maxbw;
struct pci_dev *dev;
};
static int agp_3_0_dev_list_insert(struct list_head *head, struct list_head *new)
{
struct agp_3_0_dev *cur, *n = list_entry(new, struct agp_3_0_dev, list);
struct list_head *pos;
list_for_each(pos, head) {
cur = list_entry(pos, struct agp_3_0_dev, list);
if(cur->maxbw > n->maxbw)
break;
}
list_add_tail(new, pos);
return 0;
}
static int agp_3_0_dev_list_sort(struct agp_3_0_dev *list, unsigned int ndevs)
{
struct agp_3_0_dev *cur;
struct pci_dev *dev;
struct list_head *pos, *tmp, *head = &list->list, *start = head->next;
u32 nistat;
INIT_LIST_HEAD(head);
for(pos = start; pos != head;) {
cur = list_entry(pos, struct agp_3_0_dev, list);
dev = cur->dev;
pci_read_config_dword(dev, cur->capndx + 0x0c, &nistat);
cur->maxbw = (nistat >> 16) & 0xff;
tmp = pos;
pos = pos->next;
agp_3_0_dev_list_insert(head, tmp);
}
return 0;
}
/*
* Initialize all isochronous transfer parameters for an AGP 3.0
* node (i.e. a host bridge in combination with the adapters
* lying behind it...)
*/
static int agp_3_0_isochronous_node_enable(struct agp_3_0_dev *dev_list, unsigned int ndevs)
{
/*
* Convenience structure to make the calculations clearer
* here. The field names come straight from the AGP 3.0 spec.
*/
struct isoch_data {
u32 maxbw;
u32 n;
u32 y;
u32 l;
u32 rq;
struct agp_3_0_dev *dev;
};
struct pci_dev *td = agp_bridge.dev, *dev;
struct list_head *head = &dev_list->list, *pos;
struct agp_3_0_dev *cur;
struct isoch_data *master, target;
unsigned int cdev = 0;
u32 mnistat, tnistat, tstatus, mcmd;
u16 tnicmd, mnicmd;
u8 mcapndx;
u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
u32 step, rem, rem_isoch, rem_async;
int ret = 0;
/*
* We'll work with an array of isoch_data's (one for each
* device in dev_list) throughout this function.
*/
if((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto get_out;
}
/*
* Sort the device list by maxbw. We need to do this because the
* spec suggests that the devices with the smallest requirements
* have their resources allocated first, with all remaining resources
* falling to the device with the largest requirement.
*
* We don't exactly do this, we divide target resources by ndevs
* and split them amongst the AGP 3.0 devices. The remainder of such
* division operations are dropped on the last device, sort of like
* the spec mentions it should be done.
*
* We can't do this sort when we initially construct the dev_list
* because we don't know until this function whether isochronous
* transfers are enabled and consequently whether maxbw will mean
* anything.
*/
if((ret = agp_3_0_dev_list_sort(dev_list, ndevs)) != 0)
goto free_and_exit;
pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat);
pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus);
/* Extract power-on defaults from the target */
target.maxbw = (tnistat >> 16) & 0xff;
target.n = (tnistat >> 8) & 0xff;
target.y = (tnistat >> 6) & 0x3;
target.l = (tnistat >> 3) & 0x7;
target.rq = (tstatus >> 24) & 0xff;
y_max = target.y;
/*
* Extract power-on defaults for each device in dev_list. Along
* the way, calculate the total isochronous bandwidth required
* by these devices and the largest requested payload size.
*/
list_for_each(pos, head) {
cur = list_entry(pos, struct agp_3_0_dev, list);
dev = cur->dev;
mcapndx = cur->capndx;
pci_read_config_dword(dev, cur->capndx + 0x0c, &mnistat);
master[cdev].maxbw = (mnistat >> 16) & 0xff;
master[cdev].n = (mnistat >> 8) & 0xff;
master[cdev].y = (mnistat >> 6) & 0x3;
master[cdev].dev = cur;
tot_bw += master[cdev].maxbw;
y_max = max(y_max, master[cdev].y);
cdev++;
}
/* Check if this configuration has any chance of working */
if(tot_bw > target.maxbw) {
printk(KERN_ERR PFX "isochronous bandwidth required "
"by AGP 3.0 devices exceeds that which is supported by "
"the AGP 3.0 bridge!\n");
ret = -ENODEV;
goto free_and_exit;
}
target.y = y_max;
/*
* Write the calculated payload size into the target's NICMD
* register. Doing this directly effects the ISOCH_N value
* in the target's NISTAT register, so we need to do this now
* to get an accurate value for ISOCH_N later.
*/
pci_read_config_word(td, agp_bridge.capndx + 0x20, &tnicmd);
tnicmd &= ~(0x3 << 6);
tnicmd |= target.y << 6;
pci_write_config_word(td, agp_bridge.capndx + 0x20, tnicmd);
/* Reread the target's ISOCH_N */
pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat);
target.n = (tnistat >> 8) & 0xff;
/* Calculate the minimum ISOCH_N needed by each master */
for(cdev = 0; cdev < ndevs; cdev++) {
master[cdev].y = target.y;
master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1);
tot_n += master[cdev].n;
}
/* Exit if the minimal ISOCH_N allocation among the masters is more
* than the target can handle. */
if(tot_n > target.n) {
printk(KERN_ERR PFX "number of isochronous "
"transactions per period required by AGP 3.0 devices "
"exceeds that which is supported by the AGP 3.0 "
"bridge!\n");
ret = -ENODEV;
goto free_and_exit;
}
/* Calculate left over ISOCH_N capability in the target. We'll give
* this to the hungriest device (as per the spec) */
rem = target.n - tot_n;
/*
* Calculate the minimum isochronous RQ depth needed by each master.
* Along the way, distribute the extra ISOCH_N capability calculated
* above.
*/
for(cdev = 0; cdev < ndevs; cdev++) {
/*
* This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y
* byte isochronous writes will be broken into 64B pieces.
* This means we need to budget more RQ depth to account for
* these kind of writes (each isochronous write is actually
* many writes on the AGP bus).
*/
master[cdev].rq = master[cdev].n;
if(master[cdev].y > 0x1) {
master[cdev].rq *= (1 << (master[cdev].y - 1));
}
tot_rq += master[cdev].rq;
if(cdev == ndevs - 1)
master[cdev].n += rem;
}
/* Figure the number of isochronous and asynchronous RQ slots the
* target is providing. */
rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n;
rq_async = target.rq - rq_isoch;
/* Exit if the minimal RQ needs of the masters exceeds what the target
* can provide. */
if(tot_rq > rq_isoch) {
printk(KERN_ERR PFX "number of request queue slots "
"required by the isochronous bandwidth requested by "
"AGP 3.0 devices exceeds the number provided by the "
"AGP 3.0 bridge!\n");
ret = -ENODEV;
goto free_and_exit;
}
/* Calculate asynchronous RQ capability in the target (per master) as
* well as the total number of leftover isochronous RQ slots. */
step = rq_async / ndevs;
rem_async = step + (rq_async % ndevs);
rem_isoch = rq_isoch - tot_rq;
/* Distribute the extra RQ slots calculated above and write our
* isochronous settings out to the actual devices. */
for(cdev = 0; cdev < ndevs; cdev++) {
cur = master[cdev].dev;
dev = cur->dev;
mcapndx = cur->capndx;
master[cdev].rq += (cdev == ndevs - 1)
? (rem_async + rem_isoch) : step;
pci_read_config_word(dev, cur->capndx + 0x20, &mnicmd);
pci_read_config_dword(dev, cur->capndx + 0x08, &mcmd);
mnicmd &= ~(0xff << 8);
mnicmd &= ~(0x3 << 6);
mcmd &= ~(0xff << 24);
mnicmd |= master[cdev].n << 8;
mnicmd |= master[cdev].y << 6;
mcmd |= master[cdev].rq << 24;
pci_write_config_dword(dev, cur->capndx + 0x08, mcmd);
pci_write_config_word(dev, cur->capndx + 0x20, mnicmd);
}
free_and_exit:
kfree(master);
get_out:
return ret;
}
/*
* This function basically allocates request queue slots among the
* AGP 3.0 systems in nonisochronous nodes. The algorithm is
* pretty stupid, divide the total number of RQ slots provided by the
* target by ndevs. Distribute this many slots to each AGP 3.0 device,
* giving any left over slots to the last device in dev_list.
*/
static int agp_3_0_nonisochronous_node_enable(struct agp_3_0_dev *dev_list, unsigned int ndevs)
{
struct agp_3_0_dev *cur;
struct list_head *head = &dev_list->list, *pos;
u32 tstatus, mcmd;
u32 trq, mrq, rem;
unsigned int cdev = 0;
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x04, &tstatus);
trq = (tstatus >> 24) & 0xff;
mrq = trq / ndevs;
rem = mrq + (trq % ndevs);
for(pos = head->next; cdev < ndevs; cdev++, pos = pos->next) {
cur = list_entry(pos, struct agp_3_0_dev, list);
pci_read_config_dword(cur->dev, cur->capndx + 0x08, &mcmd);
mcmd &= ~(0xff << 24);
mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24;
pci_write_config_dword(cur->dev, cur->capndx + 0x08, mcmd);
}
return 0;
}
/*
* Fully configure and enable an AGP 3.0 host bridge and all the devices
* lying behind it.
*/
static int agp_3_0_node_enable(u32 mode, u32 minor)
{
struct pci_dev *td = agp_bridge.dev, *dev;
u8 bus_num, mcapndx;
u32 isoch, arqsz, cal_cycle, tmp, rate;
u32 tstatus, tcmd, mcmd, mstatus, ncapid;
u32 mmajor, mminor;
u16 mpstat;
struct agp_3_0_dev *dev_list, *cur;
struct list_head *head, *pos;
unsigned int ndevs = 0;
int ret = 0;
/*
* Allocate a head for our AGP 3.0 device list (multiple AGP 3.0
* devices are allowed behind a single bridge).
*/
if((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto get_out;
}
head = &dev_list->list;
INIT_LIST_HEAD(head);
/*
* Find all the devices on this bridge's secondary bus and add them
* to dev_list.
*/
pci_read_config_byte(td, PCI_SECONDARY_BUS, &bus_num);
pci_for_each_dev(dev) {
if(dev->bus->number == bus_num) {
if((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) {
ret = -ENOMEM;
goto free_and_exit;
}
cur->dev = dev;
pos = &cur->list;
list_add(pos, head);
ndevs++;
}
}
/* Extract some power-on defaults from the target */
pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus);
isoch = (tstatus >> 17) & 0x1;
arqsz = (tstatus >> 13) & 0x7;
cal_cycle = (tstatus >> 10) & 0x7;
rate = tstatus & 0x7;
/*
* Take an initial pass through the devices lying behind our host
* bridge. Make sure each one is actually an AGP 3.0 device, otherwise
* exit with an error message. Along the way store the AGP 3.0
* cap_ptr for each device, the minimum supported cal_cycle, and the
* minimum supported data rate.
*/
list_for_each(pos, head) {
cur = list_entry(pos, struct agp_3_0_dev, list);
dev = cur->dev;
pci_read_config_word(dev, PCI_STATUS, &mpstat);
if((mpstat & PCI_STATUS_CAP_LIST) == 0)
continue;
pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx);
if (mcapndx != 0x00) {
do {
pci_read_config_dword(dev, mcapndx, &ncapid);
if ((ncapid & 0xff) != 0x02)
mcapndx = (ncapid >> 8) & 0xff;
}
while (((ncapid & 0xff) != 0x02) && (mcapndx != 0x00));
}
if(mcapndx == 0) {
printk(KERN_ERR PFX "woah! Non-AGP device "
"found on the secondary bus of an AGP 3.0 "
"bridge!\n");
ret = -ENODEV;
goto free_and_exit;
}
mmajor = (ncapid >> 20) & 0xf;
mminor = (ncapid >> 16) & 0xf;
if(mmajor < 3) {
printk(KERN_ERR PFX "woah! AGP 2.0 device "
"found on the secondary bus of an AGP 3.0 "
"bridge operating with AGP 3.0 electricals!\n");
ret = -ENODEV;
goto free_and_exit;
}
cur->capndx = mcapndx;
pci_read_config_dword(dev, cur->capndx + 0x04, &mstatus);
if(((mstatus >> 3) & 0x1) == 0) {
printk(KERN_ERR PFX "woah! AGP 3.0 device "
"not operating in AGP 3.0 mode found on the "
"secondary bus of an AGP 3.0 bridge operating "
"with AGP 3.0 electricals!\n");
ret = -ENODEV;
goto free_and_exit;
}
tmp = (mstatus >> 10) & 0x7;
cal_cycle = min(cal_cycle, tmp);
/* figure the lesser rate */
tmp = mstatus & 0x7;
if(tmp < rate)
rate = tmp;
}
/* Turn rate into something we can actually write out to AGPCMD */
switch(rate) {
case 0x1:
case 0x2:
break;
case 0x3:
rate = 0x2;
break;
default:
printk(KERN_ERR PFX "woah! Bogus AGP rate "
"value found advertised behind an AGP 3.0 "
"bridge!\n");
ret = -ENODEV;
goto free_and_exit;
}
/*
* Call functions to divide target resources amongst the AGP 3.0
* masters. This process is dramatically different depending on
* whether isochronous transfers are supported.
*/
if(isoch != 0) {
if((ret = agp_3_0_isochronous_node_enable(dev_list,
ndevs)) != 0)
goto free_and_exit;
} else {
if((ret = agp_3_0_nonisochronous_node_enable(dev_list,
ndevs)) != 0)
goto free_and_exit;
}
/*
* Set the calculated minimum supported cal_cycle and minimum
* supported transfer rate in the target's AGPCMD register.
* Also set the AGP_ENABLE bit, effectively 'turning on' the
* target (this has to be done _before_ turning on the masters).
*/
pci_read_config_dword(td, agp_bridge.capndx + 0x08, &tcmd);
tcmd &= ~(0x7 << 10);
tcmd &= ~0x7;
tcmd |= cal_cycle << 10;
tcmd |= 0x1 << 8;
tcmd |= rate;
pci_write_config_dword(td, agp_bridge.capndx + 0x08, tcmd);
/*
* Set the target's advertised arqsz value, the minimum supported
* transfer rate, and the AGP_ENABLE bit in each master's AGPCMD
* register.
*/
list_for_each(pos, head) {
cur = list_entry(pos, struct agp_3_0_dev, list);
dev = cur->dev;
mcapndx = cur->capndx;
pci_read_config_dword(dev, cur->capndx + 0x08, &mcmd);
mcmd &= ~(0x7 << 13);
mcmd &= ~0x7;
mcmd |= arqsz << 13;
mcmd |= 0x1 << 8;
mcmd |= rate;
pci_write_config_dword(dev, cur->capndx + 0x08, mcmd);
}
free_and_exit:
/* Be sure to free the dev_list */
for(pos = head->next; pos != head;) {
cur = list_entry(pos, struct agp_3_0_dev, list);
pos = pos->next;
kfree(cur);
}
kfree(dev_list);
get_out:
return ret;
}
/*
* Entry point to AGP 3.0 host bridge init. Check to see if we
* have an AGP 3.0 device operating in 3.0 mode. Call
* agp_3_0_node_enable or agp_generic_agp_enable if we don't
* (AGP 3.0 devices are required to operate as AGP 2.0 devices
* when not using 3.0 electricals.
*/
void agp_generic_agp_3_0_enable(u32 mode)
{
u32 ncapid, major, minor, agp_3_0;
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx, &ncapid);
major = (ncapid >> 20) & 0xf;
minor = (ncapid >> 16) & 0xf;
printk(KERN_INFO PFX "Found an AGP %d.%d compliant device.\n",
major, minor);
if(major >= 3) {
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x4, &agp_3_0);
/*
* Check to see if we are operating in 3.0 mode
*/
if((agp_3_0 >> 3) & 0x1) {
agp_3_0_node_enable(mode, minor);
return;
}
}
agp_generic_agp_enable(mode);
}
......@@ -469,7 +469,7 @@ int agp_generic_create_gatt_table(void)
for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
SetPageReserved(page);
agp_bridge.gatt_table_real = (u32 *) table;
agp_bridge.gatt_table_real = (unsigned long *) table;
agp_gatt_table = (void *)table;
CACHE_FLUSH();
agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
......@@ -693,7 +693,23 @@ void agp_enable(u32 mode)
EXPORT_SYMBOL(agp_free_memory);
EXPORT_SYMBOL(agp_allocate_memory);
EXPORT_SYMBOL(agp_copy_info);
EXPORT_SYMBOL(agp_create_memory);
EXPORT_SYMBOL(agp_bind_memory);
EXPORT_SYMBOL(agp_unbind_memory);
EXPORT_SYMBOL(agp_free_key);
EXPORT_SYMBOL(agp_enable);
EXPORT_SYMBOL(agp_bridge);
EXPORT_SYMBOL(agp_generic_alloc_page);
EXPORT_SYMBOL(agp_generic_destroy_page);
EXPORT_SYMBOL(agp_generic_suspend);
EXPORT_SYMBOL(agp_generic_resume);
EXPORT_SYMBOL(agp_generic_agp_enable);
EXPORT_SYMBOL(agp_generic_create_gatt_table);
EXPORT_SYMBOL(agp_generic_free_gatt_table);
EXPORT_SYMBOL(agp_generic_insert_memory);
EXPORT_SYMBOL(agp_generic_remove_memory);
EXPORT_SYMBOL(agp_generic_alloc_by_type);
EXPORT_SYMBOL(agp_generic_free_by_type);
EXPORT_SYMBOL(global_cache_flush);
......@@ -18,8 +18,7 @@
#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> \
hp_private.io_tlb_shift)
#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
static struct aper_size_info_fixed hp_zx1_sizes[] =
{
......@@ -330,12 +329,7 @@ static unsigned long hp_zx1_mask_memory(unsigned long addr, int type)
return HP_ZX1_PDIR_VALID_BIT | addr;
}
static unsigned long hp_zx1_unmask_memory(unsigned long addr)
{
return addr & ~(HP_ZX1_PDIR_VALID_BIT);
}
int __init hp_zx1_setup (struct pci_dev *pdev)
int __init hp_zx1_setup (struct pci_dev *pdev __attribute__((unused)))
{
agp_bridge.masks = hp_zx1_masks;
agp_bridge.num_of_masks = 1;
......@@ -347,7 +341,6 @@ int __init hp_zx1_setup (struct pci_dev *pdev)
agp_bridge.cleanup = hp_zx1_cleanup;
agp_bridge.tlb_flush = hp_zx1_tlbflush;
agp_bridge.mask_memory = hp_zx1_mask_memory;
agp_bridge.unmask_memory = hp_zx1_unmask_memory;
agp_bridge.agp_enable = agp_generic_agp_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = hp_zx1_create_gatt_table;
......@@ -375,8 +368,6 @@ static int __init agp_find_supported_device(struct pci_dev *dev)
return hp_zx1_setup(dev);
}
return -ENODEV;
}
static int agp_hp_probe (struct pci_dev *dev, const struct pci_device_id *ent)
{
......
/*
* FIXME: Nothing ever calls this stuff!
* For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
* the "Intel 460GTX Chipset Software Developer's Manual":
* http://developer.intel.com/design/itanium/downloads/24870401s.htm
*/
/*
* 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
* Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include "agp.h"
/* BIOS configures the chipset so that one of two apbase registers are used */
static u8 intel_i460_dynamic_apbase = 0x10;
/* 460 supports multiple GART page sizes, so GART pageshift is dynamic */
static u8 intel_i460_pageshift = 12;
static u32 intel_i460_pagesize;
/* Keep track of which is larger, chipset or kernel page size. */
static u32 intel_i460_cpk = 1;
#include "agp.h"
/* Structure for tracking partial use of 4MB GART pages */
static u32 **i460_pg_detail = NULL;
static u32 *i460_pg_count = NULL;
/*
* The i460 can operate with large (4MB) pages, but there is no sane way to support this
* within the current kernel/DRM environment, so we disable the relevant code for now.
* See also comments in ia64_alloc_page()...
*/
#define I460_LARGE_IO_PAGES 0
#define I460_CPAGES_PER_KPAGE (PAGE_SIZE >> intel_i460_pageshift)
#define I460_KPAGES_PER_CPAGE ((1 << intel_i460_pageshift) >> PAGE_SHIFT)
#if I460_LARGE_IO_PAGES
# define I460_IO_PAGE_SHIFT i460.io_page_shift
#else
# define I460_IO_PAGE_SHIFT 12
#endif
#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
#define I460_SRAM_IO_DISABLE (1 << 4)
#define I460_BAPBASE_ENABLE (1 << 3)
#define I460_AGPSIZ_MASK 0x7
#define I460_4M_PS (1 << 1)
/* Control bits for Out-Of-GART coherency and Burst Write Combining */
#define I460_GXBCTL_OOG (1UL << 0)
#define I460_GXBCTL_BWC (1UL << 2)
/*
* gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
* gatt_table and gatt_table_real pointers a "void *"...
*/
#define RD_GATT(index) readl((u32 *) i460.gatt + (index))
#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
/*
* The 460 spec says we have to read the last location written to make sure that all
* writes have taken effect
*/
#define WR_FLUSH_GATT(index) RD_GATT(index)
#define log2(x) ffz(~(x))
static inline void intel_i460_read_back (volatile u32 *entry)
static struct {
void *gatt; /* ioremap'd GATT area */
/* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
u8 io_page_shift;
/* BIOS configures chipset to one of 2 possible apbase values: */
u8 dynamic_apbase;
/* structure for tracking partial use of 4MB GART pages: */
struct lp_desc {
unsigned long *alloced_map; /* bitmap of kernel-pages in use */
int refcount; /* number of kernel pages using the large page */
u64 paddr; /* physical address of large page */
} *lp_desc;
} i460;
static const struct aper_size_info_8 i460_sizes[3] =
{
/*
* The 460 spec says we have to read the last location written to
* make sure that all writes have taken effect
* The 32GB aperture is only available with a 4M GART page size. Due to the
* dynamic GART page size, we can't figure out page_order or num_entries until
* runtime.
*/
*entry;
}
{32768, 0, 0, 4},
{1024, 0, 0, 2},
{256, 0, 0, 1}
};
static struct gatt_mask i460_masks[] =
{
{
.mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
.type = 0
}
};
static int intel_i460_fetch_size(void)
static int i460_fetch_size (void)
{
int i;
u8 temp;
......@@ -49,8 +97,15 @@ static int intel_i460_fetch_size(void)
/* Determine the GART page size */
pci_read_config_byte(agp_bridge.dev, INTEL_I460_GXBCTL, &temp);
intel_i460_pageshift = (temp & I460_4M_PS) ? 22 : 12;
intel_i460_pagesize = 1UL << intel_i460_pageshift;
i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
printk(KERN_ERR PFX
"I/O (GART) page-size %ZuKB doesn't match expected size %ZuKB\n",
1UL << (i460.io_page_shift - 10), 1UL << (I460_IO_PAGE_SHIFT));
return 0;
}
values = A_SIZE_8(agp_bridge.aperture_sizes);
......@@ -64,16 +119,16 @@ static int intel_i460_fetch_size(void)
}
/* Make sure we don't try to create an 2 ^ 23 entry GATT */
if ((intel_i460_pageshift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
return 0;
}
/* Determine the proper APBASE register */
if (temp & I460_BAPBASE_ENABLE)
intel_i460_dynamic_apbase = INTEL_I460_BAPBASE;
i460.dynamic_apbase = INTEL_I460_BAPBASE;
else
intel_i460_dynamic_apbase = INTEL_I460_APBASE;
i460.dynamic_apbase = INTEL_I460_APBASE;
for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
/*
......@@ -81,7 +136,7 @@ static int intel_i460_fetch_size(void)
* the define aperture sizes. Take care not to shift off the end of
* values[i].size.
*/
values[i].num_entries = (values[i].size << 8) >> (intel_i460_pageshift - 12);
values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
values[i].page_order = log2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
}
......@@ -98,7 +153,7 @@ static int intel_i460_fetch_size(void)
}
/* There isn't anything to do here since 460 has no GART TLB. */
static void intel_i460_tlb_flush(agp_memory * mem)
static void i460_tlb_flush (agp_memory * mem)
{
return;
}
......@@ -107,7 +162,7 @@ static void intel_i460_tlb_flush(agp_memory * mem)
* This utility function is needed to prevent corruption of the control bits
* which are stored along with the aperture size in 460's AGPSIZ register
*/
static void intel_i460_write_agpsiz(u8 size_value)
static void i460_write_agpsiz (u8 size_value)
{
u8 temp;
......@@ -116,47 +171,39 @@ static void intel_i460_write_agpsiz(u8 size_value)
((temp & ~I460_AGPSIZ_MASK) | size_value));
}
static void intel_i460_cleanup(void)
static void i460_cleanup (void)
{
struct aper_size_info_8 *previous_size;
previous_size = A_SIZE_8(agp_bridge.previous_size);
intel_i460_write_agpsiz(previous_size->size_value);
i460_write_agpsiz(previous_size->size_value);
if (intel_i460_cpk == 0) {
vfree(i460_pg_detail);
vfree(i460_pg_count);
}
if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
kfree(i460.lp_desc);
}
/* Control bits for Out-Of-GART coherency and Burst Write Combining */
#define I460_GXBCTL_OOG (1UL << 0)
#define I460_GXBCTL_BWC (1UL << 2)
static int intel_i460_configure(void)
static int i460_configure (void)
{
union {
u32 small[2];
u64 large;
} temp;
size_t size;
u8 scratch;
int i;
struct aper_size_info_8 *current_size;
temp.large = 0;
current_size = A_SIZE_8(agp_bridge.current_size);
intel_i460_write_agpsiz(current_size->size_value);
i460_write_agpsiz(current_size->size_value);
/*
* Do the necessary rigmarole to read all eight bytes of APBASE.
* This has to be done since the AGP aperture can be above 4GB on
* 460 based systems.
*/
pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase, &(temp.small[0]));
pci_read_config_dword(agp_bridge.dev, intel_i460_dynamic_apbase + 4, &(temp.small[1]));
pci_read_config_dword(agp_bridge.dev, i460.dynamic_apbase, &(temp.small[0]));
pci_read_config_dword(agp_bridge.dev, i460.dynamic_apbase + 4, &(temp.small[1]));
/* Clear BAR control bits */
agp_bridge.gart_bus_addr = temp.large & ~((1UL << 3) - 1);
......@@ -166,403 +213,347 @@ static int intel_i460_configure(void)
(scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
/*
* Initialize partial allocation trackers if a GART page is bigger than
* a kernel page.
* Initialize partial allocation trackers if a GART page is bigger than a kernel
* page.
*/
if (I460_CPAGES_PER_KPAGE >= 1) {
intel_i460_cpk = 1;
} else {
intel_i460_cpk = 0;
i460_pg_detail = vmalloc(sizeof(*i460_pg_detail) * current_size->num_entries);
i460_pg_count = vmalloc(sizeof(*i460_pg_count) * current_size->num_entries);
for (i = 0; i < current_size->num_entries; i++) {
i460_pg_count[i] = 0;
i460_pg_detail[i] = NULL;
}
if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
size = current_size->num_entries * sizeof(i460.lp_desc[0]);
i460.lp_desc = kmalloc(size, GFP_KERNEL);
if (!i460.lp_desc)
return -ENOMEM;
memset(i460.lp_desc, 0, size);
}
return 0;
}
static int intel_i460_create_gatt_table(void)
static int i460_create_gatt_table (void)
{
char *table;
int i;
int page_order;
int num_entries;
int page_order, num_entries, i;
void *temp;
/*
* Load up the fixed address of the GART SRAMS which hold our
* GATT table.
* Load up the fixed address of the GART SRAMS which hold our GATT table.
*/
table = (char *) __va(INTEL_I460_ATTBASE);
temp = agp_bridge.current_size;
page_order = A_SIZE_8(temp)->page_order;
num_entries = A_SIZE_8(temp)->num_entries;
agp_bridge.gatt_table_real = (u32 *) table;
agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order)));
agp_bridge.gatt_bus_addr = virt_to_phys(agp_bridge.gatt_table_real);
i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
for (i = 0; i < num_entries; i++) {
agp_bridge.gatt_table[i] = 0;
}
/* These are no good, the should be removed from the agp_bridge strucure... */
agp_bridge.gatt_table_real = NULL;
agp_bridge.gatt_table = NULL;
agp_bridge.gatt_bus_addr = 0;
intel_i460_read_back(agp_bridge.gatt_table + i - 1);
for (i = 0; i < num_entries; ++i)
WR_GATT(i, 0);
WR_FLUSH_GATT(i - 1);
return 0;
}
static int intel_i460_free_gatt_table(void)
static int i460_free_gatt_table (void)
{
int num_entries;
int i;
int num_entries, i;
void *temp;
temp = agp_bridge.current_size;
num_entries = A_SIZE_8(temp)->num_entries;
for (i = 0; i < num_entries; i++) {
agp_bridge.gatt_table[i] = 0;
}
intel_i460_read_back(agp_bridge.gatt_table + i - 1);
for (i = 0; i < num_entries; ++i)
WR_GATT(i, 0);
WR_FLUSH_GATT(num_entries - 1);
iounmap(agp_bridge.gatt_table);
iounmap(i460.gatt);
return 0;
}
/* These functions are called when PAGE_SIZE exceeds the GART page size */
/*
* The following functions are called when the I/O (GART) page size is smaller than
* PAGE_SIZE.
*/
static int intel_i460_insert_memory_cpk(agp_memory * mem, off_t pg_start, int type)
static int i460_insert_memory_small_io_page (agp_memory *mem, off_t pg_start, int type)
{
unsigned long paddr, io_pg_start, io_page_size;
int i, j, k, num_entries;
void *temp;
unsigned long paddr;
/*
* The rest of the kernel will compute page offsets in terms of
* PAGE_SIZE.
*/
pg_start = I460_CPAGES_PER_KPAGE * pg_start;
pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
mem, pg_start, type, mem->memory[0]);
io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
temp = agp_bridge.current_size;
num_entries = A_SIZE_8(temp)->num_entries;
if ((pg_start + I460_CPAGES_PER_KPAGE * mem->page_count) > num_entries) {
if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
return -EINVAL;
}
j = pg_start;
while (j < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count)) {
if (!PGE_EMPTY(agp_bridge.gatt_table[j])) {
j = io_pg_start;
while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
if (!PGE_EMPTY(RD_GATT(j))) {
pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
j, RD_GATT(j));
return -EBUSY;
}
j++;
}
#if 0
/* not necessary since 460 GART is operated in coherent mode... */
if (mem->is_flushed == FALSE) {
CACHE_FLUSH();
mem->is_flushed = TRUE;
}
#endif
for (i = 0, j = pg_start; i < mem->page_count; i++) {
io_page_size = 1UL << I460_IO_PAGE_SHIFT;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
paddr = mem->memory[i];
for (k = 0; k < I460_CPAGES_PER_KPAGE; k++, j++, paddr += intel_i460_pagesize)
agp_bridge.gatt_table[j] = (u32) agp_bridge.mask_memory(paddr, mem->type);
for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
WR_GATT(j, agp_bridge.mask_memory(paddr, mem->type));
}
intel_i460_read_back(agp_bridge.gatt_table + j - 1);
WR_FLUSH_GATT(j - 1);
return 0;
}
static int intel_i460_remove_memory_cpk(agp_memory * mem, off_t pg_start, int type)
static int i460_remove_memory_small_io_page(agp_memory * mem, off_t pg_start, int type)
{
int i;
pg_start = I460_CPAGES_PER_KPAGE * pg_start;
pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
mem, pg_start, type);
for (i = pg_start; i < (pg_start + I460_CPAGES_PER_KPAGE * mem->page_count); i++)
agp_bridge.gatt_table[i] = 0;
pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
intel_i460_read_back(agp_bridge.gatt_table + i - 1);
for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
WR_GATT(i, 0);
WR_FLUSH_GATT(i - 1);
return 0;
}
#if I460_LARGE_IO_PAGES
/*
* These functions are called when the GART page size exceeds PAGE_SIZE.
* These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
*
* This situation is interesting since AGP memory allocations that are
* smaller than a single GART page are possible. The structures i460_pg_count
* and i460_pg_detail track partial allocation of the large GART pages to
* work around this issue.
* This situation is interesting since AGP memory allocations that are smaller than a
* single GART page are possible. The i460.lp_desc array tracks partial allocation of the
* large GART pages to work around this issue.
*
* i460_pg_count[pg_num] tracks the number of kernel pages in use within
* GART page pg_num. i460_pg_detail[pg_num] is an array containing a
* psuedo-GART entry for each of the aforementioned kernel pages. The whole
* of i460_pg_detail is equivalent to a giant GATT with page size equal to
* that of the kernel.
* i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
* pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
* i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
*/
static void *intel_i460_alloc_large_page(int pg_num)
static int i460_alloc_large_page (struct lp_desc *lp)
{
int i;
void *bp, *bp_end;
struct page *page;
i460_pg_detail[pg_num] = (void *) vmalloc(sizeof(u32) * I460_KPAGES_PER_CPAGE);
if (i460_pg_detail[pg_num] == NULL) {
printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
return NULL;
}
unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
size_t map_size;
void *lpage;
for (i = 0; i < I460_KPAGES_PER_CPAGE; i++)
i460_pg_detail[pg_num][i] = 0;
bp = (void *) __get_free_pages(GFP_KERNEL, intel_i460_pageshift - PAGE_SHIFT);
if (bp == NULL) {
lpage = (void *) __get_free_pages(GFP_KERNEL, order);
if (!lpage) {
printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
return NULL;
return -ENOMEM;
}
bp_end = bp + ((PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT))) - 1);
for (page = virt_to_page(bp); page <= virt_to_page(bp_end); page++) {
atomic_inc(&agp_bridge.current_memory_agp);
map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
lp->alloced_map = kmalloc(map_size, GFP_KERNEL);
if (!lp->alloced_map) {
free_pages((unsigned long) lpage, order);
printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
return -ENOMEM;
}
return bp;
memset(lp->alloced_map, 0, map_size);
lp->paddr = virt_to_phys(lpage);
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge.current_memory_agp);
return 0;
}
static void intel_i460_free_large_page(int pg_num, unsigned long addr)
static void i460_free_large_page (struct lp_desc *lp)
{
struct page *page;
void *bp, *bp_end;
bp = (void *) __va(addr);
bp_end = bp + (PAGE_SIZE * (1 << (intel_i460_pageshift - PAGE_SHIFT)));
vfree(i460_pg_detail[pg_num]);
i460_pg_detail[pg_num] = NULL;
kfree(lp->alloced_map);
lp->alloced_map = NULL;
for (page = virt_to_page(bp); page < virt_to_page(bp_end); page++) {
atomic_dec(&agp_bridge.current_memory_agp);
}
free_pages((unsigned long) bp, intel_i460_pageshift - PAGE_SHIFT);
free_pages((unsigned long) phys_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge.current_memory_agp);
}
static int intel_i460_insert_memory_kpc(agp_memory * mem, off_t pg_start, int type)
static int i460_insert_memory_large_io_page (agp_memory * mem, off_t pg_start, int type)
{
int i, pg, start_pg, end_pg, start_offset, end_offset, idx;
int num_entries;
int i, start_offset, end_offset, idx, pg, num_entries;
struct lp_desc *start, *end, *lp;
void *temp;
unsigned long paddr;
temp = agp_bridge.current_size;
num_entries = A_SIZE_8(temp)->num_entries;
/* Figure out what pg_start means in terms of our large GART pages */
start_pg = pg_start / I460_KPAGES_PER_CPAGE;
start_offset = pg_start % I460_KPAGES_PER_CPAGE;
end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE;
start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
if (end_pg > num_entries) {
if (end > i460.lp_desc + num_entries) {
printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
return -EINVAL;
}
/* Check if the requested region of the aperture is free */
for (pg = start_pg; pg <= end_pg; pg++) {
/* Allocate new GART pages if necessary */
if (i460_pg_detail[pg] == NULL) {
temp = intel_i460_alloc_large_page(pg);
if (temp == NULL)
return -ENOMEM;
agp_bridge.gatt_table[pg] = agp_bridge.mask_memory((unsigned long) temp,
0);
intel_i460_read_back(agp_bridge.gatt_table + pg);
}
for (lp = start; lp <= end; ++lp) {
if (!lp->alloced_map)
continue; /* OK, the entire large page is available... */
for (idx = ((pg == start_pg) ? start_offset : 0);
idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
for (idx = ((lp == start) ? start_offset : 0);
idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
idx++)
{
if (i460_pg_detail[pg][idx] != 0)
if (test_bit(idx, lp->alloced_map))
return -EBUSY;
}
}
#if 0
/* not necessary since 460 GART is operated in coherent mode... */
if (mem->is_flushed == FALSE) {
CACHE_FLUSH();
mem->is_flushed = TRUE;
for (lp = start, i = 0; lp <= end; ++lp) {
if (!lp->alloced_map) {
/* Allocate new GART pages... */
if (i460_alloc_large_page(lp) < 0)
return -ENOMEM;
pg = lp - i460.lp_desc;
WR_GATT(pg, agp_bridge.mask_memory(lp->paddr, 0));
WR_FLUSH_GATT(pg);
}
#endif
for (pg = start_pg, i = 0; pg <= end_pg; pg++) {
paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]);
for (idx = ((pg == start_pg) ? start_offset : 0);
idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
for (idx = ((lp == start) ? start_offset : 0);
idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
idx++, i++)
{
mem->memory[i] = paddr + (idx * PAGE_SIZE);
i460_pg_detail[pg][idx] = agp_bridge.mask_memory(mem->memory[i],
mem->type);
i460_pg_count[pg]++;
mem->memory[i] = lp->paddr + idx*PAGE_SIZE;
__set_bit(idx, lp->alloced_map);
++lp->refcount;
}
}
return 0;
}
static int intel_i460_remove_memory_kpc(agp_memory * mem, off_t pg_start, int type)
static int i460_remove_memory_large_io_page (agp_memory * mem, off_t pg_start, int type)
{
int i, pg, start_pg, end_pg, start_offset, end_offset, idx;
int num_entries;
int i, pg, start_offset, end_offset, idx, num_entries;
struct lp_desc *start, *end, *lp;
void *temp;
unsigned long paddr;
temp = agp_bridge.current_size;
num_entries = A_SIZE_8(temp)->num_entries;
/* Figure out what pg_start means in terms of our large GART pages */
start_pg = pg_start / I460_KPAGES_PER_CPAGE;
start_offset = pg_start % I460_KPAGES_PER_CPAGE;
end_pg = (pg_start + mem->page_count - 1) / I460_KPAGES_PER_CPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_CPAGE;
for (i = 0, pg = start_pg; pg <= end_pg; pg++) {
for (idx = ((pg == start_pg) ? start_offset : 0);
idx < ((pg == end_pg) ? (end_offset + 1) : I460_KPAGES_PER_CPAGE);
start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
for (i = 0, lp = start; lp <= end; ++lp) {
for (idx = ((lp == start) ? start_offset : 0);
idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
idx++, i++)
{
mem->memory[i] = 0;
i460_pg_detail[pg][idx] = 0;
i460_pg_count[pg]--;
__clear_bit(idx, lp->alloced_map);
--lp->refcount;
}
/* Free GART pages if they are unused */
if (i460_pg_count[pg] == 0) {
paddr = agp_bridge.unmask_memory(agp_bridge.gatt_table[pg]);
agp_bridge.gatt_table[pg] = agp_bridge.scratch_page;
intel_i460_read_back(agp_bridge.gatt_table + pg);
intel_i460_free_large_page(pg, paddr);
if (lp->refcount == 0) {
pg = lp - i460.lp_desc;
WR_GATT(pg, 0);
WR_FLUSH_GATT(pg);
i460_free_large_page(lp);
}
}
return 0;
}
/* Dummy routines to call the approriate {cpk,kpc} function */
/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
static int intel_i460_insert_memory(agp_memory * mem, off_t pg_start, int type)
static int i460_insert_memory (agp_memory * mem, off_t pg_start, int type)
{
if (intel_i460_cpk)
return intel_i460_insert_memory_cpk(mem, pg_start, type);
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
return i460_insert_memory_small_io_page(mem, pg_start, type);
else
return intel_i460_insert_memory_kpc(mem, pg_start, type);
return i460_insert_memory_large_io_page(mem, pg_start, type);
}
static int intel_i460_remove_memory(agp_memory * mem, off_t pg_start, int type)
static int i460_remove_memory (agp_memory * mem, off_t pg_start, int type)
{
if (intel_i460_cpk)
return intel_i460_remove_memory_cpk(mem, pg_start, type);
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
return i460_remove_memory_small_io_page(mem, pg_start, type);
else
return intel_i460_remove_memory_kpc(mem, pg_start, type);
return i460_remove_memory_large_io_page(mem, pg_start, type);
}
/*
* If the kernel page size is smaller that the chipset page size, we don't
* want to allocate memory until we know where it is to be bound in the
* aperture (a multi-kernel-page alloc might fit inside of an already
* allocated GART page). Consequently, don't allocate or free anything
* if i460_cpk (meaning chipset pages per kernel page) isn't set.
* If the I/O (GART) page size is bigger than the kernel page size, we don't want to
* allocate memory until we know where it is to be bound in the aperture (a
* multi-kernel-page alloc might fit inside of an already allocated GART page).
*
* Let's just hope nobody counts on the allocated AGP memory being there
* before bind time (I don't think current drivers do)...
* Let's just hope nobody counts on the allocated AGP memory being there before bind time
* (I don't think current drivers do)...
*/
static void * intel_i460_alloc_page(void)
static void *i460_alloc_page (void)
{
if (intel_i460_cpk)
return agp_generic_alloc_page();
void *page;
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
page = agp_generic_alloc_page();
else
/* Returning NULL would cause problems */
/* AK: really dubious code. */
return (void *)~0UL;
page = (void *)~0UL;
return page;
}
static void intel_i460_destroy_page(void *page)
static void i460_destroy_page (void *page)
{
if (intel_i460_cpk)
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
agp_generic_destroy_page(page);
}
static struct gatt_mask intel_i460_masks[] =
{
{
.mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
.type = 0
}
};
#endif /* I460_LARGE_IO_PAGES */
static unsigned long intel_i460_mask_memory(unsigned long addr, int type)
static unsigned long i460_mask_memory (unsigned long addr, int type)
{
/* Make sure the returned address is a valid GATT entry */
return (agp_bridge.masks[0].mask
| (((addr & ~((1 << intel_i460_pageshift) - 1)) & 0xffffff000) >> 12));
| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12));
}
static unsigned long intel_i460_unmask_memory(unsigned long addr)
{
/* Turn a GATT entry into a physical address */
return ((addr & 0xffffff) << 12);
}
static struct aper_size_info_8 intel_i460_sizes[3] =
{
/*
* The 32GB aperture is only available with a 4M GART page size.
* Due to the dynamic GART page size, we can't figure out page_order
* or num_entries until runtime.
*/
{32768, 0, 0, 4},
{1024, 0, 0, 2},
{256, 0, 0, 1}
};
int __init intel_i460_setup (struct pci_dev *pdev __attribute__((unused)))
{
agp_bridge.masks = intel_i460_masks;
agp_bridge.aperture_sizes = (void *) intel_i460_sizes;
agp_bridge.num_of_masks = 1;
agp_bridge.masks = i460_masks;
agp_bridge.aperture_sizes = (void *) i460_sizes;
agp_bridge.size_type = U8_APER_SIZE;
agp_bridge.num_aperture_sizes = 3;
agp_bridge.dev_private_data = NULL;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = intel_i460_configure;
agp_bridge.fetch_size = intel_i460_fetch_size;
agp_bridge.cleanup = intel_i460_cleanup;
agp_bridge.tlb_flush = intel_i460_tlb_flush;
agp_bridge.mask_memory = intel_i460_mask_memory;
agp_bridge.unmask_memory = intel_i460_unmask_memory;
agp_bridge.configure = i460_configure;
agp_bridge.fetch_size = i460_fetch_size;
agp_bridge.cleanup = i460_cleanup;
agp_bridge.tlb_flush = i460_tlb_flush;
agp_bridge.mask_memory = i460_mask_memory;
agp_bridge.agp_enable = agp_generic_agp_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = intel_i460_create_gatt_table;
agp_bridge.free_gatt_table = intel_i460_free_gatt_table;
agp_bridge.insert_memory = intel_i460_insert_memory;
agp_bridge.remove_memory = intel_i460_remove_memory;
agp_bridge.create_gatt_table = i460_create_gatt_table;
agp_bridge.free_gatt_table = i460_free_gatt_table;
#if I460_LARGE_IO_PAGES
agp_bridge.insert_memory = i460_insert_memory;
agp_bridge.remove_memory = i460_remove_memory;
agp_bridge.agp_alloc_page = i460_alloc_page;
agp_bridge.agp_destroy_page = i460_destroy_page;
#else
agp_bridge.insert_memory = i460_insert_memory_small_io_page;
agp_bridge.remove_memory = i460_remove_memory_small_io_page;
agp_bridge.agp_alloc_page = agp_generic_alloc_page;
agp_bridge.agp_destroy_page = agp_generic_destroy_page;
#endif
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = intel_i460_alloc_page;
agp_bridge.agp_destroy_page = intel_i460_destroy_page;
agp_bridge.suspend = agp_generic_suspend;
agp_bridge.resume = agp_generic_resume;
agp_bridge.cant_use_aperture = 1;
......@@ -619,6 +610,5 @@ static void __exit agp_i460_cleanup(void)
module_init(agp_i460_init);
module_exit(agp_i460_cleanup);
MODULE_AUTHOR("Bjorn Helgaas <helgaas@fc.hp.com>");
MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
MODULE_LICENSE("GPL and additional rights");
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include "agp.h"
static int intel_7505_fetch_size(void)
{
int i;
u16 tmp;
aper_size_info_16 *values;
/*
* For AGP 3.0 APSIZE is now 16 bits
*/
pci_read_config_word (agp_bridge.dev, INTEL_I7505_APSIZE, &tmp);
tmp = (tmp & 0xfff);
values = A_SIZE_16(agp_bridge.aperture_sizes);
for (i=0; i < agp_bridge.num_aperture_sizes; i++) {
if (tmp == values[i].size_value) {
agp_bridge.previous_size = agp_bridge.current_size =
(void *)(values + i);
agp_bridge.aperture_size_idx = i;
return values[i].size;
}
}
return 0;
}
static void intel_7505_tlbflush(agp_memory *mem)
{
u32 temp;
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp & ~(1 << 7));
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, &temp);
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, temp | (1 << 7));
}
static void intel_7505_cleanup(void)
{
aper_size_info_16 *previous_size;
previous_size = A_SIZE_16(agp_bridge.previous_size);
pci_write_config_byte(agp_bridge.dev, INTEL_I7505_APSIZE,
previous_size->size_value);
}
static int intel_7505_configure(void)
{
u32 temp;
aper_size_info_16 *current_size;
current_size = A_SIZE_16(agp_bridge.current_size);
/* aperture size */
pci_write_config_word(agp_bridge.dev, INTEL_I7505_APSIZE,
current_size->size_value);
/* address to map to */
pci_read_config_dword(agp_bridge.dev, INTEL_I7505_NAPBASELO, &temp);
agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
/* attbase */
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_ATTBASE,
agp_bridge.gatt_bus_addr);
/* agpctrl */
pci_write_config_dword(agp_bridge.dev, INTEL_I7505_AGPCTRL, 0x0000);
/* clear error registers */
pci_write_config_byte(agp_bridge.dev, INTEL_I7505_ERRSTS, 0xff);
return 0;
}
static aper_size_info_16 intel_7505_sizes[7] =
{
{256, 65536, 6, 0xf00},
{128, 32768, 5, 0xf20},
{64, 16384, 4, 0xf30},
{32, 8192, 3, 0xf38},
{16, 4096, 2, 0xf3c},
{8, 2048, 1, 0xf3e},
{4, 1024, 0, 0xf3f}
};
static int __init intel_7505_setup (struct pci_dev *pdev)
{
agp_bridge.masks = intel_generic_masks;
agp_bridge.num_of_masks = 1;
agp_bridge.aperture_sizes = (void *) intel_7505_sizes;
agp_bridge.size_type = U16_APER_SIZE;
agp_bridge.num_aperture_sizes = 7;
agp_bridge.dev_private_data = NULL;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = intel_7505_configure;
agp_bridge.fetch_size = intel_7505_fetch_size;
agp_bridge.cleanup = intel_7505_cleanup;
agp_bridge.tlb_flush = intel_7505_tlbflush;
agp_bridge.mask_memory = intel_mask_memory;
agp_bridge.agp_enable = agp_generic_agp_3_0_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
agp_bridge.insert_memory = agp_generic_insert_memory;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = agp_generic_alloc_page;
agp_bridge.agp_destroy_page = agp_generic_destroy_page;
agp_bridge.suspend = agp_generic_suspend;
agp_bridge.resume = agp_generic_resume;
agp_bridge.cant_use_aperture = 0;
return 0;
}
struct agp_device_ids i7x05_agp_device_ids[] __initdata =
{
{
.device_id = PCI_DEVICE_ID_INTEL_7505_0,
.chipset = INTEL_I7505,
.chipset_name = "i7505",
},
{
.device_id = PCI_DEVICE_ID_INTEL_7205_0,
.chipset = INTEL_I7505,
.chipset_name = "i7205",
},
{ }, /* dummy final entry, always present */
};
/* scan table above for supported devices */
static int __init agp_lookup_host_bridge (struct pci_dev *pdev)
{
int j=0;
struct agp_device_ids *devs;
devs = i7x05_agp_device_ids;
while (devs[j].chipset_name != NULL) {
if (pdev->device == devs[j].device_id) {
printk (KERN_INFO PFX "Detected Intel %s chipset\n",
devs[j].chipset_name);
agp_bridge.type = devs[j].chipset;
if (devs[j].chipset_setup != NULL)
return devs[j].chipset_setup(pdev);
else
return intel_7505_setup(pdev);
}
j++;
}
printk(KERN_ERR PFX "Unsupported Intel chipset (device id: %04x),",
pdev->device);
return -ENODEV;
}
static int __init agp_find_supported_device(struct pci_dev *dev)
{
agp_bridge.dev = dev;
if (pci_find_capability(dev, PCI_CAP_ID_AGP)==0)
return -ENODEV;
/* probe for known chipsets */
return agp_lookup_host_bridge(dev);
}
static int agp_i7x05_probe (struct pci_dev *dev, const struct pci_device_id *ent)
{
if (agp_find_supported_device(dev) == 0) {
agp_register_driver(dev);
return 0;
}
return -ENODEV;
}
static struct pci_device_id agp_i7x05_pci_table[] __initdata = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, agp_i7x05_pci_table);
static struct pci_driver agp_i7x05_pci_driver = {
.name = "agpgart-i7x05",
.id_table = agp_i7x05_pci_table,
.probe = agp_i7x05_probe,
};
int __init agp_i7x05_init(void)
{
int ret_val;
ret_val = pci_module_init(&agp_i7x05_pci_driver);
if (ret_val)
agp_bridge.type = NOT_SUPPORTED;
return ret_val;
}
static void __exit agp_i7x05_cleanup(void)
{
agp_unregister_driver();
pci_unregister_driver(&agp_i7x05_pci_driver);
}
module_init(agp_i7x05_init);
module_exit(agp_i7x05_cleanup);
MODULE_AUTHOR("Matthew E Tolentino <matthew.e.tolentino@intel.com>");
MODULE_LICENSE("GPL and additional rights");
......@@ -1473,6 +1473,11 @@ static struct pci_driver agp_intel_pci_driver = {
static int __init agp_intel_init(void)
{
int ret_val;
static int agp_initialised=0;
if (agp_initialised==1)
return 0;
agp_initialised=1;
ret_val = pci_module_init(&agp_intel_pci_driver);
if (ret_val)
......
......@@ -986,6 +986,10 @@ static int copy_params(struct dm_ioctl *user, struct dm_ioctl **param)
static int validate_params(uint cmd, struct dm_ioctl *param)
{
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD)
return 0;
/* Unless creating, either name of uuid but not both */
if (cmd != DM_DEV_CREATE_CMD) {
if ((!*param->uuid && !*param->name) ||
......
......@@ -43,7 +43,7 @@ static int linear_ctr(struct dm_target *ti, int argc, char **argv)
goto bad;
}
if (dm_get_device(ti, argv[0], ti->begin, ti->len,
if (dm_get_device(ti, argv[0], lc->start, ti->len,
dm_table_get_mode(ti->table), &lc->dev)) {
ti->error = "dm-linear: Device lookup failed";
goto bad;
......
......@@ -117,12 +117,29 @@ static int stripe_ctr(struct dm_target *ti, int argc, char **argv)
return -EINVAL;
}
/*
* chunk_size is a power of two
*/
if (!chunk_size || (chunk_size & (chunk_size - 1))) {
ti->error = "dm-stripe: Invalid chunk size";
return -EINVAL;
}
if (!multiple(ti->len, stripes, &width)) {
ti->error = "dm-stripe: Target length not divisable by "
"number of stripes";
return -EINVAL;
}
/*
* Do we have enough arguments for that many stripes ?
*/
if (argc != (2 + 2 * stripes)) {
ti->error = "dm-stripe: Not enough destinations "
"specified";
return -EINVAL;
}
sc = alloc_context(stripes);
if (!sc) {
ti->error = "dm-stripe: Memory allocation for striped context "
......@@ -134,15 +151,6 @@ static int stripe_ctr(struct dm_target *ti, int argc, char **argv)
sc->stripe_width = width;
ti->split_io = chunk_size;
/*
* chunk_size is a power of two
*/
if (!chunk_size || (chunk_size & (chunk_size - 1))) {
ti->error = "dm-stripe: Invalid chunk size";
kfree(sc);
return -EINVAL;
}
sc->chunk_mask = ((sector_t) chunk_size) - 1;
for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
chunk_size >>= 1;
......@@ -152,13 +160,6 @@ static int stripe_ctr(struct dm_target *ti, int argc, char **argv)
* Get the stripe destinations.
*/
for (i = 0; i < stripes; i++) {
if (argc < 2) {
ti->error = "dm-stripe: Not enough destinations "
"specified";
kfree(sc);
return -EINVAL;
}
argv += 2;
r = get_stripe(ti, sc, i, argv);
......
......@@ -312,7 +312,7 @@ static int lookup_device(const char *path, dev_t *dev)
}
if (!S_ISBLK(inode->i_mode)) {
r = -EINVAL;
r = -ENOTBLK;
goto out;
}
......@@ -356,7 +356,7 @@ static int open_dev(struct dm_dev *d, dev_t dev)
return -ENOMEM;
r = blkdev_get(d->bdev, d->mode, 0, BDEV_RAW);
if (!r)
if (r)
return r;
r = bd_claim(d->bdev, _claim_ptr);
......@@ -388,7 +388,7 @@ static void close_dev(struct dm_dev *d)
static int check_device_area(struct dm_dev *dd, sector_t start, sector_t len)
{
sector_t dev_size;
dev_size = dd->bdev->bd_inode->i_size;
dev_size = dd->bdev->bd_inode->i_size >> SECTOR_SHIFT;
return ((start < dev_size) && (len <= (dev_size - start)));
}
......
......@@ -16,7 +16,6 @@
static const char *_name = DM_NAME;
#define MAX_DEVICES (1 << KDEV_MINOR_BITS)
#define SECTOR_SHIFT 9
static int major = 0;
static int _major = 0;
......@@ -59,11 +58,15 @@ struct mapped_device {
* The current mapping.
*/
struct dm_table *map;
/*
* io objects are allocated from here.
*/
mempool_t *io_pool;
};
#define MIN_IOS 256
static kmem_cache_t *_io_cache;
static mempool_t *_io_pool;
static __init int local_init(void)
{
......@@ -75,18 +78,10 @@ static __init int local_init(void)
if (!_io_cache)
return -ENOMEM;
_io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
mempool_free_slab, _io_cache);
if (!_io_pool) {
kmem_cache_destroy(_io_cache);
return -ENOMEM;
}
_major = major;
r = register_blkdev(_major, _name, &dm_blk_dops);
if (r < 0) {
DMERR("register_blkdev failed");
mempool_destroy(_io_pool);
kmem_cache_destroy(_io_cache);
return r;
}
......@@ -99,7 +94,6 @@ static __init int local_init(void)
static void local_exit(void)
{
mempool_destroy(_io_pool);
kmem_cache_destroy(_io_cache);
if (unregister_blkdev(_major, _name) < 0)
......@@ -179,14 +173,14 @@ static int dm_blk_close(struct inode *inode, struct file *file)
return 0;
}
static inline struct dm_io *alloc_io(void)
static inline struct dm_io *alloc_io(struct mapped_device *md)
{
return mempool_alloc(_io_pool, GFP_NOIO);
return mempool_alloc(md->io_pool, GFP_NOIO);
}
static inline void free_io(struct dm_io *io)
static inline void free_io(struct mapped_device *md, struct dm_io *io)
{
mempool_free(io, _io_pool);
mempool_free(io, md->io_pool);
}
static inline struct deferred_io *alloc_deferred(void)
......@@ -212,7 +206,7 @@ static int queue_io(struct mapped_device *md, struct bio *bio)
down_write(&md->lock);
if (!test_bit(DMF_SUSPENDED, &md->flags)) {
if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
up_write(&md->lock);
free_deferred(di);
return 1;
......@@ -234,6 +228,15 @@ static int queue_io(struct mapped_device *md, struct bio *bio)
* interests of getting something for people to use I give
* you this clearly demarcated crap.
*---------------------------------------------------------------*/
static inline sector_t to_sector(unsigned int bytes)
{
return bytes >> SECTOR_SHIFT;
}
static inline unsigned int to_bytes(sector_t sector)
{
return sector << SECTOR_SHIFT;
}
/*
* Decrements the number of outstanding ios that a bio has been
......@@ -244,18 +247,19 @@ static inline void dec_pending(struct dm_io *io, int error)
static spinlock_t _uptodate_lock = SPIN_LOCK_UNLOCKED;
unsigned long flags;
if (error) {
spin_lock_irqsave(&_uptodate_lock, flags);
if (error)
io->error = error;
spin_unlock_irqrestore(&_uptodate_lock, flags);
}
if (atomic_dec_and_test(&io->io_count)) {
if (atomic_dec_and_test(&io->md->pending))
/* nudge anyone waiting on suspend queue */
wake_up(&io->md->wait);
bio_endio(io->bio, io->error ? 0 : io->bio->bi_size, io->error);
free_io(io);
bio_endio(io->bio, io->bio->bi_size, io->error);
free_io(io->md, io);
}
}
......@@ -263,16 +267,11 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
{
struct dm_io *io = bio->bi_private;
/*
* Only call dec_pending if the clone has completely
* finished. If a partial io errors I'm assuming it won't
* be requeued. FIXME: check this.
*/
if (error || !bio->bi_size) {
if (bio->bi_size)
return 1;
dec_pending(io, error);
bio_put(bio);
}
return 0;
}
......@@ -280,16 +279,17 @@ static int clone_endio(struct bio *bio, unsigned int done, int error)
static sector_t max_io_len(struct mapped_device *md,
sector_t sector, struct dm_target *ti)
{
sector_t len = ti->len;
sector_t offset = sector - ti->begin;
sector_t len = ti->len - offset;
/* FIXME: obey io_restrictions ! */
/*
* Does the target need to split even further ?
*/
if (ti->split_io) {
sector_t boundary;
sector_t offset = sector - ti->begin;
boundary = dm_round_up(offset + 1, ti->split_io) - offset;
if (len > boundary)
......@@ -299,23 +299,23 @@ static sector_t max_io_len(struct mapped_device *md,
return len;
}
static void __map_bio(struct dm_target *ti, struct bio *clone)
static void __map_bio(struct dm_target *ti, struct bio *clone, struct dm_io *io)
{
struct dm_io *io = clone->bi_private;
int r;
/*
* Sanity checks.
*/
if (!clone->bi_size)
BUG();
BUG_ON(!clone->bi_size);
clone->bi_end_io = clone_endio;
clone->bi_private = io;
/*
* Map the clone. If r == 0 we don't need to do
* anything, the target has assumed ownership of
* this io.
*/
atomic_inc(&io->md->pending);
atomic_inc(&io->io_count);
r = ti->type->map(ti, clone);
if (r > 0)
......@@ -337,77 +337,125 @@ struct clone_info {
};
/*
* Issues a little bio that just does the back end of a split page.
* Creates a little bio that is just does part of a bvec.
*/
static void __split_page(struct clone_info *ci, unsigned int len)
static struct bio *split_bvec(struct bio *bio, sector_t sector,
unsigned short idx, unsigned int offset,
unsigned int len)
{
struct dm_target *ti = dm_table_find_target(ci->md->map, ci->sector);
struct bio *clone, *bio = ci->bio;
struct bio_vec *bv = bio->bi_io_vec + (bio->bi_vcnt - 1);
DMWARN("splitting page");
if (len > ci->sector_count)
len = ci->sector_count;
struct bio *clone;
struct bio_vec *bv = bio->bi_io_vec + idx;
clone = bio_alloc(GFP_NOIO, 1);
if (clone) {
memcpy(clone->bi_io_vec, bv, sizeof(*bv));
clone->bi_sector = ci->sector;
clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
clone->bi_flags = bio->bi_flags | (1 << BIO_SEG_VALID);
clone->bi_rw = bio->bi_rw;
clone->bi_size = len << SECTOR_SHIFT;
clone->bi_end_io = clone_endio;
clone->bi_private = ci->io;
clone->bi_vcnt = 1;
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
}
ci->sector += len;
ci->sector_count -= len;
return clone;
}
/*
* Creates a bio that consists of range of complete bvecs.
*/
static struct bio *clone_bio(struct bio *bio, sector_t sector,
unsigned short idx, unsigned short bv_count,
unsigned int len)
{
struct bio *clone;
__map_bio(ti, clone);
clone = bio_clone(bio, GFP_NOIO);
clone->bi_sector = sector;
clone->bi_idx = idx;
clone->bi_vcnt = idx + bv_count;
clone->bi_size = to_bytes(len);
return clone;
}
static void __clone_and_map(struct clone_info *ci)
{
struct bio *clone, *bio = ci->bio;
struct dm_target *ti = dm_table_find_target(ci->md->map, ci->sector);
sector_t len = max_io_len(ci->md, bio->bi_sector, ti);
sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
/* shorter than current target ? */
if (ci->sector_count < len)
len = ci->sector_count;
if (ci->sector_count <= max) {
/*
* Optimise for the simple case where we can do all of
* the remaining io with a single clone.
*/
clone = clone_bio(bio, ci->sector, ci->idx,
bio->bi_vcnt - ci->idx, ci->sector_count);
__map_bio(ti, clone, ci->io);
ci->sector_count = 0;
/* create the clone */
clone = bio_clone(ci->bio, GFP_NOIO);
clone->bi_sector = ci->sector;
clone->bi_idx = ci->idx;
clone->bi_size = len << SECTOR_SHIFT;
clone->bi_end_io = clone_endio;
clone->bi_private = ci->io;
} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
/*
* There are some bvecs that don't span targets.
* Do as many of these as possible.
*/
int i;
sector_t remaining = max;
sector_t bv_len;
for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
bv_len = to_sector(bio->bi_io_vec[i].bv_len);
if (bv_len > remaining)
break;
remaining -= bv_len;
len += bv_len;
}
clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);
__map_bio(ti, clone, ci->io);
/* adjust the remaining io */
ci->sector += len;
ci->sector_count -= len;
__map_bio(ti, clone);
ci->idx = i;
} else {
/*
* If we are not performing all remaining io in this
* clone then we need to calculate ci->idx for the next
* time round.
*/
if (ci->sector_count) {
while (len) {
struct bio_vec *bv = clone->bi_io_vec + ci->idx;
sector_t bv_len = bv->bv_len >> SECTOR_SHIFT;
if (bv_len <= len)
len -= bv_len;
else {
__split_page(ci, bv_len - len);
len = 0;
* Create two copy bios to deal with io that has
* been split across a target.
*/
struct bio_vec *bv = bio->bi_io_vec + ci->idx;
clone = split_bvec(bio, ci->sector, ci->idx,
bv->bv_offset, max);
if (!clone) {
dec_pending(ci->io, -ENOMEM);
return;
}
ci->idx++;
__map_bio(ti, clone, ci->io);
ci->sector += max;
ci->sector_count -= max;
ti = dm_table_find_target(ci->md->map, ci->sector);
len = to_sector(bv->bv_len) - max;
clone = split_bvec(bio, ci->sector, ci->idx,
bv->bv_offset + to_bytes(max), len);
if (!clone) {
dec_pending(ci->io, -ENOMEM);
return;
}
__map_bio(ti, clone, ci->io);
ci->sector += len;
ci->sector_count -= len;
ci->idx++;
}
}
......@@ -420,7 +468,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
ci.md = md;
ci.bio = bio;
ci.io = alloc_io();
ci.io = alloc_io(md);
ci.io->error = 0;
atomic_set(&ci.io->io_count, 1);
ci.io->bio = bio;
......@@ -429,6 +477,7 @@ static void __split_bio(struct mapped_device *md, struct bio *bio)
ci.sector_count = bio_sectors(bio);
ci.idx = 0;
atomic_inc(&md->pending);
while (ci.sector_count)
__clone_and_map(&ci);
......@@ -459,13 +508,13 @@ static int dm_request(request_queue_t *q, struct bio *bio)
up_read(&md->lock);
if (bio_rw(bio) == READA) {
bio_io_error(bio, 0);
bio_io_error(bio, bio->bi_size);
return 0;
}
r = queue_io(md, bio);
if (r < 0) {
bio_io_error(bio, 0);
bio_io_error(bio, bio->bi_size);
return 0;
} else if (r == 0)
......@@ -559,8 +608,17 @@ static struct mapped_device *alloc_dev(int minor)
md->queue.queuedata = md;
blk_queue_make_request(&md->queue, dm_request);
md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
mempool_free_slab, _io_cache);
if (!md->io_pool) {
free_minor(md->disk->first_minor);
kfree(md);
return NULL;
}
md->disk = alloc_disk(1);
if (!md->disk) {
mempool_destroy(md->io_pool);
free_minor(md->disk->first_minor);
kfree(md);
return NULL;
......@@ -582,6 +640,7 @@ static struct mapped_device *alloc_dev(int minor)
static void free_dev(struct mapped_device *md)
{
free_minor(md->disk->first_minor);
mempool_destroy(md->io_pool);
del_gendisk(md->disk);
put_disk(md->disk);
kfree(md);
......@@ -711,15 +770,14 @@ int dm_suspend(struct mapped_device *md)
}
set_bit(DMF_BLOCK_IO, &md->flags);
add_wait_queue(&md->wait, &wait);
up_write(&md->lock);
/*
* Then we wait for the already mapped ios to
* complete.
*/
down_read(&md->lock);
add_wait_queue(&md->wait, &wait);
blk_run_queues();
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
......@@ -730,11 +788,11 @@ int dm_suspend(struct mapped_device *md)
}
current->state = TASK_RUNNING;
remove_wait_queue(&md->wait, &wait);
up_read(&md->lock);
/* set_bit is atomic */
down_write(&md->lock);
remove_wait_queue(&md->wait, &wait);
set_bit(DMF_SUSPENDED, &md->flags);
up_write(&md->lock);
return 0;
}
......
......@@ -29,6 +29,8 @@
#define SECTOR_FORMAT "%lu"
#endif
#define SECTOR_SHIFT 9
extern struct block_device_operations dm_blk_dops;
/*
......
......@@ -42,6 +42,8 @@
* task switches.
*/
enum fixed_addresses {
FIX_VSYSCALL,
FIX_HOLE,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
......@@ -96,10 +98,9 @@ extern void __set_fixmap (enum fixed_addresses idx,
* used by vmalloc.c.
*
* Leave one empty page between vmalloc'ed areas and
* the start of the fixmap, and leave one page empty
* at the top of mem..
* the start of the fixmap.
*/
#define FIXADDR_TOP (0xffffe000UL)
#define FIXADDR_TOP (0xfffff000UL)
#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
......
......@@ -9,8 +9,8 @@
* 2 - reserved
* 3 - reserved
*
* 4 - default user CS <==== new cacheline
* 5 - default user DS
* 4 - unused <==== new cacheline
* 5 - unused
*
* ------- start of TLS (Thread-Local Storage) segments:
*
......@@ -25,16 +25,18 @@
*
* 12 - kernel code segment <==== new cacheline
* 13 - kernel data segment
* 14 - TSS
* 15 - LDT
* 16 - PNPBIOS support (16->32 gate)
* 17 - PNPBIOS support
* 18 - PNPBIOS support
* 14 - default user CS
* 15 - default user DS
* 16 - TSS
* 17 - LDT
* 18 - PNPBIOS support (16->32 gate)
* 19 - PNPBIOS support
* 20 - PNPBIOS support
* 21 - APM BIOS support
* 22 - APM BIOS support
* 21 - PNPBIOS support
* 22 - PNPBIOS support
* 23 - APM BIOS support
* 24 - APM BIOS support
* 25 - APM BIOS support
*/
#define GDT_ENTRY_TLS_ENTRIES 3
#define GDT_ENTRY_TLS_MIN 6
......@@ -42,10 +44,10 @@
#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
#define GDT_ENTRY_DEFAULT_USER_CS 4
#define GDT_ENTRY_DEFAULT_USER_CS 14
#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
#define GDT_ENTRY_DEFAULT_USER_DS 5
#define GDT_ENTRY_DEFAULT_USER_DS 15
#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
#define GDT_ENTRY_KERNEL_BASE 12
......@@ -56,14 +58,14 @@
#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 2)
#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 3)
#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 4)
#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 9)
#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
/*
* The GDT has 21 entries but we pad it to cacheline boundary:
* The GDT has 23 entries but we pad it to cacheline boundary:
*/
#define GDT_ENTRIES 24
......
......@@ -51,6 +51,7 @@ enum chipset_type {
INTEL_I850,
INTEL_I860,
INTEL_460GX,
INTEL_I7505,
VIA_GENERIC,
VIA_VP3,
VIA_MVP3,
......
......@@ -7,13 +7,6 @@
#ifndef _LINUX_DEVICE_MAPPER_H
#define _LINUX_DEVICE_MAPPER_H
#define DM_DIR "mapper" /* Slashes not supported */
#define DM_MAX_TYPE_NAME 16
#define DM_NAME_LEN 128
#define DM_UUID_LEN 129
#ifdef __KERNEL__
struct dm_target;
struct dm_table;
struct dm_dev;
......@@ -101,6 +94,4 @@ struct dm_target {
int dm_register_target(struct target_type *t);
int dm_unregister_target(struct target_type *t);
#endif /* __KERNEL__ */
#endif /* _LINUX_DEVICE_MAPPER_H */
......@@ -7,9 +7,13 @@
#ifndef _LINUX_DM_IOCTL_H
#define _LINUX_DM_IOCTL_H
#include <linux/device-mapper.h>
#include <linux/types.h>
#define DM_DIR "mapper" /* Slashes not supported */
#define DM_MAX_TYPE_NAME 16
#define DM_NAME_LEN 128
#define DM_UUID_LEN 129
/*
* Implements a traditional ioctl interface to the device mapper.
*/
......
......@@ -1764,6 +1764,9 @@
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
#define PCI_DEVICE_ID_INTEL_82092AA_1 0x1222
#define PCI_DEVICE_ID_INTEL_7116 0x1223
#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
#define PCI_DEVICE_ID_INTEL_7505_1 0x2552
#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82596 0x1226
#define PCI_DEVICE_ID_INTEL_82865 0x1227
#define PCI_DEVICE_ID_INTEL_82557 0x1229
......
......@@ -452,8 +452,6 @@ inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
mempool_t *pool;
int bio_gfp;
BUG_ON((*bio_orig)->bi_idx);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment