Commit 366e077b authored by Jeff Garzik's avatar Jeff Garzik

Merge mandrakesoft.com:/home/jgarzik/repo/linus-2.5

into mandrakesoft.com:/home/jgarzik/repo/net-drivers-2.5
parents db1df5cb c2dd03a9
...@@ -173,6 +173,9 @@ noconfig_targets := xconfig menuconfig config oldconfig randconfig \ ...@@ -173,6 +173,9 @@ noconfig_targets := xconfig menuconfig config oldconfig randconfig \
help tags TAGS sgmldocs psdocs pdfdocs htmldocs \ help tags TAGS sgmldocs psdocs pdfdocs htmldocs \
checkconfig checkhelp checkincludes checkconfig checkhelp checkincludes
RCS_FIND_IGNORE := \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \) -prune -o
RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exclude CVS
# Helpers built in scripts/ # Helpers built in scripts/
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
...@@ -581,13 +584,13 @@ spec: ...@@ -581,13 +584,13 @@ spec:
# will become invalid # will become invalid
rpm: clean spec rpm: clean spec
find . -name SCCS -prune -o -name BitKeeper -prune -o \ find . $(RCS_FIND_IGNORE) \
\( -size 0 -o -name .depend -o -name .hdepend \) \ \( -size 0 -o -name .depend -o -name .hdepend \) \
-type f -print | xargs rm -f -type f -print | xargs rm -f
set -e; \ set -e; \
cd $(TOPDIR)/.. ; \ cd $(TOPDIR)/.. ; \
ln -sf $(TOPDIR) $(KERNELPATH) ; \ ln -sf $(TOPDIR) $(KERNELPATH) ; \
tar -cvz --exclude CVS -f $(KERNELPATH).tar.gz $(KERNELPATH)/. ; \ tar -cvz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/. ; \
rm $(KERNELPATH) ; \ rm $(KERNELPATH) ; \
cd $(TOPDIR) ; \ cd $(TOPDIR) ; \
. scripts/mkversion > .version ; \ . scripts/mkversion > .version ; \
...@@ -717,7 +720,7 @@ include arch/$(ARCH)/Makefile ...@@ -717,7 +720,7 @@ include arch/$(ARCH)/Makefile
clean: archclean clean: archclean
@echo 'Cleaning up' @echo 'Cleaning up'
@find . -name SCCS -prune -o -name BitKeeper -prune -o \ @find . $(RCS_FIND_IGNORE) \
\( -name \*.[oas] -o -name core -o -name .\*.cmd -o \ \( -name \*.[oas] -o -name core -o -name .\*.cmd -o \
-name .\*.tmp -o -name .\*.d \) -type f -print \ -name .\*.tmp -o -name .\*.d \) -type f -print \
| grep -v lxdialog/ | xargs rm -f | grep -v lxdialog/ | xargs rm -f
...@@ -726,7 +729,7 @@ clean: archclean ...@@ -726,7 +729,7 @@ clean: archclean
mrproper: clean archmrproper mrproper: clean archmrproper
@echo 'Making mrproper' @echo 'Making mrproper'
@find . -name SCCS -prune -o -name BitKeeper -prune -o \ @find . $(RCS_FIND_IGNORE) \
\( -name .depend -o -name .\*.cmd \) \ \( -name .depend -o -name .\*.cmd \) \
-type f -print | xargs rm -f -type f -print | xargs rm -f
@rm -f $(MRPROPER_FILES) @rm -f $(MRPROPER_FILES)
...@@ -736,7 +739,7 @@ mrproper: clean archmrproper ...@@ -736,7 +739,7 @@ mrproper: clean archmrproper
distclean: mrproper distclean: mrproper
@echo 'Making distclean' @echo 'Making distclean'
@find . -name SCCS -prune -o -name BitKeeper -prune -o \ @find . $(RCS_FIND_IGNORE) \
\( -not -type d \) -and \ \( -not -type d \) -and \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \ \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \ -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
...@@ -747,18 +750,18 @@ distclean: mrproper ...@@ -747,18 +750,18 @@ distclean: mrproper
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
define all-sources define all-sources
( find . \( -name SCCS -o -name BitKeeper -o -name include -o \ ( find . $(RCS_FIND_IGNORE) \
-name arch \) -prune \ \( -name include -o -name arch \) -prune -o \
-o -name '*.[chS]' -print; \ -name '*.[chS]' -print; \
find arch/$(ARCH) \( -name SCCS -o -name BitKeeper \) -prune \ find arch/$(ARCH) $(RCS_FIND_IGNORE) \
-o -name '*.[chS]' -print; \ -name '*.[chS]' -print; \
find include \( -name SCCS -o -name BitKeeper -o -name config -o \ find include $(RCS_FIND_IGNORE) \
-name 'asm-*' \) -prune \ \( -name config -o -name 'asm-*' \) -prune -o \
-o -name '*.[chS]' -print; \
find include/asm-$(ARCH) \( -name SCCS -o -name BitKeeper \) -prune \
-o -name '*.[chS]' -print; \ -o -name '*.[chS]' -print; \
find include/asm-generic \( -name SCCS -o -name BitKeeper \) -prune \ find include/asm-$(ARCH) $(RCS_FIND_IGNORE) \
-o -name '*.[chS]' -print ) -name '*.[chS]' -print; \
find include/asm-generic $(RCS_FIND_IGNORE) \
-name '*.[chS]' -print )
endef endef
quiet_cmd_TAGS = MAKE $@ quiet_cmd_TAGS = MAKE $@
...@@ -825,17 +828,17 @@ sgmldocs psdocs pdfdocs htmldocs: scripts ...@@ -825,17 +828,17 @@ sgmldocs psdocs pdfdocs htmldocs: scripts
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
checkconfig: checkconfig:
find * -name SCCS -prune -o -name BitKeeper -prune -o \ find * $(RCS_FIND_IGNORE) \
-name '*.[hcS]' -type f -print | sort \ -name '*.[hcS]' -type f -print | sort \
| xargs $(PERL) -w scripts/checkconfig.pl | xargs $(PERL) -w scripts/checkconfig.pl
checkhelp: checkhelp:
find * -name SCCS -prune -o -name BitKeeper -prune -o \ find * $(RCS_FIND_IGNORE) \
-name [cC]onfig.in -print | sort \ -name [cC]onfig.in -print | sort \
| xargs $(PERL) -w scripts/checkhelp.pl | xargs $(PERL) -w scripts/checkhelp.pl
checkincludes: checkincludes:
find * -name SCCS -prune -o -name BitKeeper -prune -o \ find * $(RCS_FIND_IGNORE) \
-name '*.[hcS]' -type f -print | sort \ -name '*.[hcS]' -type f -print | sort \
| xargs $(PERL) -w scripts/checkincludes.pl | xargs $(PERL) -w scripts/checkincludes.pl
......
...@@ -34,6 +34,7 @@ if [ "$CONFIG_X86" = "y" ]; then ...@@ -34,6 +34,7 @@ if [ "$CONFIG_X86" = "y" ]; then
define_bool CONFIG_ACPI_EC y define_bool CONFIG_ACPI_EC y
define_bool CONFIG_ACPI_POWER y define_bool CONFIG_ACPI_POWER y
define_bool CONFIG_ACPI_PCI $CONFIG_PCI define_bool CONFIG_ACPI_PCI $CONFIG_PCI
define_bool CONFIG_ACPI_SLEEP $CONFIG_SOFTWARE_SUSPEND
define_bool CONFIG_ACPI_SYSTEM y define_bool CONFIG_ACPI_SYSTEM y
fi fi
fi fi
......
...@@ -55,7 +55,8 @@ static char *acpi_table_signatures[ACPI_TABLE_COUNT] = { ...@@ -55,7 +55,8 @@ static char *acpi_table_signatures[ACPI_TABLE_COUNT] = {
[ACPI_SPCR] = "SPCR", [ACPI_SPCR] = "SPCR",
[ACPI_SRAT] = "SRAT", [ACPI_SRAT] = "SRAT",
[ACPI_SSDT] = "SSDT", [ACPI_SSDT] = "SSDT",
[ACPI_SPMI] = "SPMI" [ACPI_SPMI] = "SPMI",
[ACPI_HPET] = "HPET"
}; };
/* System Description Table (RSDT/XSDT) */ /* System Description Table (RSDT/XSDT) */
...@@ -320,7 +321,7 @@ acpi_table_parse_madt_family ( ...@@ -320,7 +321,7 @@ acpi_table_parse_madt_family (
handler(entry); handler(entry);
} }
entry = (acpi_table_entry_header *) entry = (acpi_table_entry_header *)
((unsigned long) entry += entry->length); ((unsigned long) entry + entry->length);
} }
return count; return count;
......
...@@ -137,8 +137,24 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset) ...@@ -137,8 +137,24 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
return rq; return rq;
} }
static sector_t deadline_get_last_sector(struct deadline_data *dd)
{
sector_t last_sec = dd->last_sector;
/*
* if dispatch is non-empty, disregard last_sector and check last one
*/
if (!list_empty(dd->dispatch)) {
struct request *__rq = list_entry_rq(dd->dispatch->prev);
last_sec = __rq->sector + __rq->nr_sectors;
}
return last_sec;
}
static int static int
deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
{ {
struct deadline_data *dd = q->elevator.elevator_data; struct deadline_data *dd = q->elevator.elevator_data;
const int data_dir = bio_data_dir(bio); const int data_dir = bio_data_dir(bio);
...@@ -150,9 +166,11 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) ...@@ -150,9 +166,11 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
/* /*
* try last_merge to avoid going to hash * try last_merge to avoid going to hash
*/ */
ret = elv_try_last_merge(q, req, bio); ret = elv_try_last_merge(q, bio);
if (ret != ELEVATOR_NO_MERGE) if (ret != ELEVATOR_NO_MERGE) {
*insert = q->last_merge;
goto out; goto out;
}
/* /*
* see if the merge hash can satisfy a back merge * see if the merge hash can satisfy a back merge
...@@ -161,12 +179,15 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) ...@@ -161,12 +179,15 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
if (elv_rq_merge_ok(__rq, bio)) { if (elv_rq_merge_ok(__rq, bio)) {
*req = __rq; *insert = &__rq->queuelist;
ret = ELEVATOR_BACK_MERGE; ret = ELEVATOR_BACK_MERGE;
goto out; goto out;
} }
} }
/*
* scan list from back to find insertion point.
*/
entry = sort_list = &dd->sort_list[data_dir]; entry = sort_list = &dd->sort_list[data_dir];
while ((entry = entry->prev) != sort_list) { while ((entry = entry->prev) != sort_list) {
__rq = list_entry_rq(entry); __rq = list_entry_rq(entry);
...@@ -177,8 +198,8 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) ...@@ -177,8 +198,8 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
if (!(__rq->flags & REQ_CMD)) if (!(__rq->flags & REQ_CMD))
continue; continue;
if (!*req && bio_rq_in_between(bio, __rq, sort_list)) if (!*insert && bio_rq_in_between(bio, __rq, sort_list))
*req = __rq; *insert = &__rq->queuelist;
if (__rq->flags & REQ_BARRIER) if (__rq->flags & REQ_BARRIER)
break; break;
...@@ -189,12 +210,23 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio) ...@@ -189,12 +210,23 @@ deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
if (__rq->sector - bio_sectors(bio) == bio->bi_sector) { if (__rq->sector - bio_sectors(bio) == bio->bi_sector) {
ret = elv_try_merge(__rq, bio); ret = elv_try_merge(__rq, bio);
if (ret != ELEVATOR_NO_MERGE) { if (ret != ELEVATOR_NO_MERGE) {
*req = __rq; *insert = &__rq->queuelist;
break; break;
} }
} }
} }
/*
* no insertion point found, check the very front
*/
if (!*insert && !list_empty(sort_list)) {
__rq = list_entry_rq(sort_list->next);
if (bio->bi_sector + bio_sectors(bio) < __rq->sector &&
bio->bi_sector > deadline_get_last_sector(dd))
*insert = sort_list;
}
out: out:
return ret; return ret;
} }
...@@ -254,18 +286,9 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) ...@@ -254,18 +286,9 @@ deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
static void deadline_move_requests(struct deadline_data *dd, struct request *rq) static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
{ {
struct list_head *sort_head = &dd->sort_list[rq_data_dir(rq)]; struct list_head *sort_head = &dd->sort_list[rq_data_dir(rq)];
sector_t last_sec = dd->last_sector; sector_t last_sec = deadline_get_last_sector(dd);
int batch_count = dd->fifo_batch; int batch_count = dd->fifo_batch;
/*
* if dispatch is non-empty, disregard last_sector and check last one
*/
if (!list_empty(dd->dispatch)) {
struct request *__rq = list_entry_rq(dd->dispatch->prev);
last_sec = __rq->sector + __rq->nr_sectors;
}
do { do {
struct list_head *nxt = rq->queuelist.next; struct list_head *nxt = rq->queuelist.next;
int this_rq_cost; int this_rq_cost;
......
...@@ -136,8 +136,7 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio) ...@@ -136,8 +136,7 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
return ret; return ret;
} }
inline int elv_try_last_merge(request_queue_t *q, struct request **req, inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
struct bio *bio)
{ {
int ret = ELEVATOR_NO_MERGE; int ret = ELEVATOR_NO_MERGE;
...@@ -150,8 +149,8 @@ inline int elv_try_last_merge(request_queue_t *q, struct request **req, ...@@ -150,8 +149,8 @@ inline int elv_try_last_merge(request_queue_t *q, struct request **req,
if (!rq_mergeable(__rq)) if (!rq_mergeable(__rq))
q->last_merge = NULL; q->last_merge = NULL;
else if ((ret = elv_try_merge(__rq, bio))) else
*req = __rq; ret = elv_try_merge(__rq, bio);
} }
return ret; return ret;
...@@ -162,15 +161,17 @@ inline int elv_try_last_merge(request_queue_t *q, struct request **req, ...@@ -162,15 +161,17 @@ inline int elv_try_last_merge(request_queue_t *q, struct request **req,
* *
* See if we can find a request that this buffer can be coalesced with. * See if we can find a request that this buffer can be coalesced with.
*/ */
int elevator_noop_merge(request_queue_t *q, struct request **req, int elevator_noop_merge(request_queue_t *q, struct list_head **insert,
struct bio *bio) struct bio *bio)
{ {
struct list_head *entry = &q->queue_head; struct list_head *entry = &q->queue_head;
struct request *__rq; struct request *__rq;
int ret; int ret;
if ((ret = elv_try_last_merge(q, req, bio))) if ((ret = elv_try_last_merge(q, bio))) {
*insert = q->last_merge;
return ret; return ret;
}
while ((entry = entry->prev) != &q->queue_head) { while ((entry = entry->prev) != &q->queue_head) {
__rq = list_entry_rq(entry); __rq = list_entry_rq(entry);
...@@ -182,7 +183,7 @@ int elevator_noop_merge(request_queue_t *q, struct request **req, ...@@ -182,7 +183,7 @@ int elevator_noop_merge(request_queue_t *q, struct request **req,
continue; continue;
if ((ret = elv_try_merge(__rq, bio))) { if ((ret = elv_try_merge(__rq, bio))) {
*req = __rq; *insert = &__rq->queuelist;
q->last_merge = &__rq->queuelist; q->last_merge = &__rq->queuelist;
return ret; return ret;
} }
...@@ -240,12 +241,12 @@ int elevator_global_init(void) ...@@ -240,12 +241,12 @@ int elevator_global_init(void)
return 0; return 0;
} }
int elv_merge(request_queue_t *q, struct request **rq, struct bio *bio) int elv_merge(request_queue_t *q, struct list_head **entry, struct bio *bio)
{ {
elevator_t *e = &q->elevator; elevator_t *e = &q->elevator;
if (e->elevator_merge_fn) if (e->elevator_merge_fn)
return e->elevator_merge_fn(q, rq, bio); return e->elevator_merge_fn(q, entry, bio);
return ELEVATOR_NO_MERGE; return ELEVATOR_NO_MERGE;
} }
......
...@@ -1583,7 +1583,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1583,7 +1583,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
again: again:
req = NULL;
insert_here = NULL; insert_here = NULL;
if (blk_queue_empty(q)) { if (blk_queue_empty(q)) {
...@@ -1593,10 +1592,13 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1593,10 +1592,13 @@ static int __make_request(request_queue_t *q, struct bio *bio)
if (barrier) if (barrier)
goto get_rq; goto get_rq;
el_ret = elv_merge(q, &req, bio); el_ret = elv_merge(q, &insert_here, bio);
switch (el_ret) { switch (el_ret) {
case ELEVATOR_BACK_MERGE: case ELEVATOR_BACK_MERGE:
req = list_entry_rq(insert_here);
BUG_ON(!rq_mergeable(req)); BUG_ON(!rq_mergeable(req));
if (!q->back_merge_fn(q, req, bio)) { if (!q->back_merge_fn(q, req, bio)) {
insert_here = &req->queuelist; insert_here = &req->queuelist;
break; break;
...@@ -1611,7 +1613,10 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1611,7 +1613,10 @@ static int __make_request(request_queue_t *q, struct bio *bio)
goto out; goto out;
case ELEVATOR_FRONT_MERGE: case ELEVATOR_FRONT_MERGE:
req = list_entry_rq(insert_here);
BUG_ON(!rq_mergeable(req)); BUG_ON(!rq_mergeable(req));
if (!q->front_merge_fn(q, req, bio)) { if (!q->front_merge_fn(q, req, bio)) {
insert_here = req->queuelist.prev; insert_here = req->queuelist.prev;
break; break;
...@@ -1638,13 +1643,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1638,13 +1643,6 @@ static int __make_request(request_queue_t *q, struct bio *bio)
* elevator says don't/can't merge. get new request * elevator says don't/can't merge. get new request
*/ */
case ELEVATOR_NO_MERGE: case ELEVATOR_NO_MERGE:
/*
* use elevator hints as to where to insert the
* request. if no hints, just add it to the back
* of the queue
*/
if (req)
insert_here = &req->queuelist;
break; break;
default: default:
......
dep_tristate '/dev/agpgart (AGP Support)' CONFIG_AGP $CONFIG_DRM_AGP
if [ "$CONFIG_GART_IOMMU" = "y" ]; then
dep_bool '/dev/agpgart (AGP Support)' CONFIG_AGP $CONFIG_DRM_AGP
else
dep_tristate '/dev/agpgart (AGP Support)' CONFIG_AGP $CONFIG_DRM_AGP
fi
if [ "$CONFIG_AGP" != "n" ]; then if [ "$CONFIG_AGP" != "n" ]; then
bool ' Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support' CONFIG_AGP_INTEL bool ' Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support' CONFIG_AGP_INTEL
bool ' Intel I810/I815/I830M (on-board) support' CONFIG_AGP_I810 bool ' Intel I810/I815/I830M (on-board) support' CONFIG_AGP_I810
...@@ -7,6 +12,9 @@ if [ "$CONFIG_AGP" != "n" ]; then ...@@ -7,6 +12,9 @@ if [ "$CONFIG_AGP" != "n" ]; then
bool ' Generic SiS support' CONFIG_AGP_SIS bool ' Generic SiS support' CONFIG_AGP_SIS
bool ' ALI chipset support' CONFIG_AGP_ALI bool ' ALI chipset support' CONFIG_AGP_ALI
bool ' Serverworks LE/HE support' CONFIG_AGP_SWORKS bool ' Serverworks LE/HE support' CONFIG_AGP_SWORKS
if [ "$CONFIG_GART_IOMMU" != "y" ]; then
bool ' AMD 8151 support' CONFIG_AGP_AMD_8151
fi
if [ "$CONFIG_IA64" = "y" ]; then if [ "$CONFIG_IA64" = "y" ]; then
bool ' Intel 460GX support' CONFIG_AGP_I460 bool ' Intel 460GX support' CONFIG_AGP_I460
bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1 bool ' HP ZX1 AGP support' CONFIG_AGP_HP_ZX1
......
...@@ -16,6 +16,7 @@ agpgart-$(CONFIG_AGP_ALI) += ali-agp.o ...@@ -16,6 +16,7 @@ agpgart-$(CONFIG_AGP_ALI) += ali-agp.o
agpgart-$(CONFIG_AGP_SWORKS) += sworks-agp.o agpgart-$(CONFIG_AGP_SWORKS) += sworks-agp.o
agpgart-$(CONFIG_AGP_I460) += i460-agp.o agpgart-$(CONFIG_AGP_I460) += i460-agp.o
agpgart-$(CONFIG_AGP_HP_ZX1) += hp-agp.o agpgart-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
agpgart-$(CONFIG_AGP_AMD_8151) += k8-agp.o
agpgart-objs := $(agpgart-y) agpgart-objs := $(agpgart-y)
obj-$(CONFIG_AGP) += agpgart.o obj-$(CONFIG_AGP) += agpgart.o
......
...@@ -50,6 +50,9 @@ EXPORT_SYMBOL(agp_backend_release); ...@@ -50,6 +50,9 @@ EXPORT_SYMBOL(agp_backend_release);
struct agp_bridge_data agp_bridge = { type: NOT_SUPPORTED }; struct agp_bridge_data agp_bridge = { type: NOT_SUPPORTED };
static int agp_try_unsupported __initdata = 0; static int agp_try_unsupported __initdata = 0;
int agp_memory_reserved;
__u32 *agp_gatt_table;
int agp_backend_acquire(void) int agp_backend_acquire(void)
{ {
if (agp_bridge.type == NOT_SUPPORTED) if (agp_bridge.type == NOT_SUPPORTED)
...@@ -243,7 +246,7 @@ static int agp_return_size(void) ...@@ -243,7 +246,7 @@ static int agp_return_size(void)
/* Routine to copy over information structure */ /* Routine to copy over information structure */
void agp_copy_info(agp_kern_info * info) int agp_copy_info(agp_kern_info * info)
{ {
unsigned long page_mask = 0; unsigned long page_mask = 0;
int i; int i;
...@@ -251,7 +254,7 @@ void agp_copy_info(agp_kern_info * info) ...@@ -251,7 +254,7 @@ void agp_copy_info(agp_kern_info * info)
memset(info, 0, sizeof(agp_kern_info)); memset(info, 0, sizeof(agp_kern_info));
if (agp_bridge.type == NOT_SUPPORTED) { if (agp_bridge.type == NOT_SUPPORTED) {
info->chipset = agp_bridge.type; info->chipset = agp_bridge.type;
return; return -EIO;
} }
info->version.major = agp_bridge.version->major; info->version.major = agp_bridge.version->major;
info->version.minor = agp_bridge.version->minor; info->version.minor = agp_bridge.version->minor;
...@@ -268,6 +271,7 @@ void agp_copy_info(agp_kern_info * info) ...@@ -268,6 +271,7 @@ void agp_copy_info(agp_kern_info * info)
page_mask |= agp_bridge.mask_memory(page_mask, i); page_mask |= agp_bridge.mask_memory(page_mask, i);
info->page_mask = ~page_mask; info->page_mask = ~page_mask;
return 0;
} }
/* End - Routine to copy over information structure */ /* End - Routine to copy over information structure */
...@@ -518,6 +522,7 @@ int agp_generic_create_gatt_table(void) ...@@ -518,6 +522,7 @@ int agp_generic_create_gatt_table(void)
SetPageReserved(page); SetPageReserved(page);
agp_bridge.gatt_table_real = (unsigned long *) table; agp_bridge.gatt_table_real = (unsigned long *) table;
agp_gatt_table = (void *)table;
CACHE_FLUSH(); CACHE_FLUSH();
agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table), agp_bridge.gatt_table = ioremap_nocache(virt_to_phys(table),
(PAGE_SIZE * (1 << page_order))); (PAGE_SIZE * (1 << page_order)));
...@@ -625,6 +630,9 @@ int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type) ...@@ -625,6 +630,9 @@ int agp_generic_insert_memory(agp_memory * mem, off_t pg_start, int type)
break; break;
} }
num_entries -= agp_memory_reserved/PAGE_SIZE;
if (num_entries < 0) num_entries = 0;
if (type != 0 || mem->type != 0) { if (type != 0 || mem->type != 0) {
/* The generic routines know nothing of memory types */ /* The generic routines know nothing of memory types */
return -EINVAL; return -EINVAL;
...@@ -824,6 +832,17 @@ static struct { ...@@ -824,6 +832,17 @@ static struct {
}, },
#endif /* CONFIG_AGP_ALI */ #endif /* CONFIG_AGP_ALI */
#ifdef CONFIG_AGP_AMD_8151
{
.device_id = PCI_DEVICE_ID_AMD_8151_0,
.vendor_id = PCI_VENDOR_ID_AMD,
.chipset = AMD_8151,
.vendor_name = "AMD",
.chipset_name = "8151",
.chipset_setup = amd_8151_setup
},
#endif /* CONFIG_AGP_AMD */
#ifdef CONFIG_AGP_AMD #ifdef CONFIG_AGP_AMD
{ {
.device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006, .device_id = PCI_DEVICE_ID_AMD_FE_GATE_7006,
...@@ -858,7 +877,6 @@ static struct { ...@@ -858,7 +877,6 @@ static struct {
.chipset_setup = amd_irongate_setup, .chipset_setup = amd_irongate_setup,
}, },
#endif /* CONFIG_AGP_AMD */ #endif /* CONFIG_AGP_AMD */
#ifdef CONFIG_AGP_INTEL #ifdef CONFIG_AGP_INTEL
{ {
.device_id = PCI_DEVICE_ID_INTEL_82443LX_0, .device_id = PCI_DEVICE_ID_INTEL_82443LX_0,
...@@ -1632,7 +1650,7 @@ static struct pci_driver agp_pci_driver = { ...@@ -1632,7 +1650,7 @@ static struct pci_driver agp_pci_driver = {
.probe = agp_probe, .probe = agp_probe,
}; };
static int __init agp_init(void) int __init agp_init(void)
{ {
int ret_val; int ret_val;
...@@ -1658,5 +1676,7 @@ static void __exit agp_cleanup(void) ...@@ -1658,5 +1676,7 @@ static void __exit agp_cleanup(void)
} }
} }
#ifndef CONFIG_GART_IOMMU
module_init(agp_init); module_init(agp_init);
module_exit(agp_cleanup); module_exit(agp_cleanup);
#endif
...@@ -49,6 +49,7 @@ void agp_free_key(int key); ...@@ -49,6 +49,7 @@ void agp_free_key(int key);
/* chipset specific init routines. */ /* chipset specific init routines. */
int __init ali_generic_setup (struct pci_dev *pdev); int __init ali_generic_setup (struct pci_dev *pdev);
int __init amd_irongate_setup (struct pci_dev *pdev); int __init amd_irongate_setup (struct pci_dev *pdev);
int __init amd_8151_setup (struct pci_dev *pdev);
int __init hp_zx1_setup (struct pci_dev *pdev); int __init hp_zx1_setup (struct pci_dev *pdev);
int __init intel_i460_setup (struct pci_dev *pdev); int __init intel_i460_setup (struct pci_dev *pdev);
int __init intel_generic_setup (struct pci_dev *pdev); int __init intel_generic_setup (struct pci_dev *pdev);
...@@ -319,6 +320,22 @@ struct agp_bridge_data { ...@@ -319,6 +320,22 @@ struct agp_bridge_data {
#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */ #define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */ #define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
#define AMD_8151_APSIZE 0xb4
#define AMD_8151_GARTBLOCK 0xb8
#define AMD_X86_64_GARTAPERTURECTL 0x90
#define AMD_X86_64_GARTAPERTUREBASE 0x94
#define AMD_X86_64_GARTTABLEBASE 0x98
#define AMD_X86_64_GARTCACHECTL 0x9c
#define AMD_X86_64_GARTEN 1<<0
#define AMD_8151_VMAPERTURE 0x10
#define AMD_8151_AGP_CTL 0xb0
#define AMD_8151_APERTURESIZE 0xb4
#define AMD_8151_GARTPTR 0xb8
#define AMD_8151_GTLBEN 1<<7
#define AMD_8151_APEREN 1<<8
/* ALi registers */ /* ALi registers */
#define ALI_APBASE 0x10 #define ALI_APBASE 0x10
#define ALI_AGPCTRL 0xb8 #define ALI_AGPCTRL 0xb8
......
/*
* Copyright 2001,2002 SuSE Labs
* Distributed under the GNU public license, v2.
*
* This is a GART driver for the AMD K8 northbridge and the AMD 8151
* AGP bridge. The main work is done in the northbridge. The configuration
* is only mirrored in the 8151 for compatibility (could be likely
* removed now).
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/agp_backend.h>
#include "agp.h"
extern int agp_memory_reserved;
extern __u32 *agp_gatt_table;
static u_int64_t pci_read64 (struct pci_dev *dev, int reg)
{
union {
u64 full;
struct {
u32 high;
u32 low;
} split;
} tmp;
pci_read_config_dword(dev, reg, &tmp.split.high);
pci_read_config_dword(dev, reg+4, &tmp.split.low);
return tmp.full;
}
static void pci_write64 (struct pci_dev *dev, int reg, u64 value)
{
union {
u64 full;
struct {
u32 high;
u32 low;
} split;
} tmp;
tmp.full = value;
pci_write_config_dword(dev, reg, tmp.split.high);
pci_write_config_dword(dev, reg+4, tmp.split.low);
}
static int x86_64_insert_memory(agp_memory * mem, off_t pg_start, int type)
{
int i, j, num_entries;
void *temp;
long tmp;
u32 pte;
u64 addr;
temp = agp_bridge.current_size;
num_entries = A_SIZE_32(temp)->num_entries;
num_entries -= agp_memory_reserved>>PAGE_SHIFT;
if (type != 0 || mem->type != 0)
return -EINVAL;
/* Make sure we can fit the range in the gatt table. */
if ((pg_start + mem->page_count) > num_entries)
return -EINVAL;
j = pg_start;
/* gatt table should be empty. */
while (j < (pg_start + mem->page_count)) {
if (!PGE_EMPTY(agp_bridge.gatt_table[j]))
return -EBUSY;
j++;
}
if (mem->is_flushed == FALSE) {
CACHE_FLUSH();
mem->is_flushed = TRUE;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = mem->memory[i];
tmp = addr;
BUG_ON(tmp & 0xffffff0000000ffc);
pte = (tmp & 0x000000ff00000000) >> 28;
pte |=(tmp & 0x00000000fffff000);
pte |= 1<<1|1<<0;
agp_bridge.gatt_table[j] = pte;
}
agp_bridge.tlb_flush(mem);
return 0;
}
/*
* This hack alters the order element according
* to the size of a long. It sucks. I totally disown this, even
* though it does appear to work for the most part.
*/
static struct aper_size_info_32 x86_64_aperture_sizes[7] =
{
{32, 8192, 3+(sizeof(long)/8), 0 },
{64, 16384, 4+(sizeof(long)/8), 1<<1 },
{128, 32768, 5+(sizeof(long)/8), 1<<2 },
{256, 65536, 6+(sizeof(long)/8), 1<<1 | 1<<2 },
{512, 131072, 7+(sizeof(long)/8), 1<<3 },
{1024, 262144, 8+(sizeof(long)/8), 1<<1 | 1<<3},
{2048, 524288, 9+(sizeof(long)/8), 1<<2 | 1<<3}
};
/*
* Get the current Aperture size from the x86-64.
* Note, that there may be multiple x86-64's, but we just return
* the value from the first one we find. The set_size functions
* keep the rest coherent anyway. Or at least should do.
*/
static int amd_x86_64_fetch_size(void)
{
struct pci_dev *dev;
int i;
u32 temp;
struct aper_size_info_32 *values;
pci_for_each_dev(dev) {
if (dev->bus->number==0 &&
PCI_FUNC(dev->devfn)==3 &&
PCI_SLOT(dev->devfn)>=24 && PCI_SLOT(dev->devfn)<=31) {
pci_read_config_dword(dev, AMD_X86_64_GARTAPERTURECTL, &temp);
temp = (temp & 0xe);
values = A_SIZE_32(x86_64_aperture_sizes);
for (i = 0; i < agp_bridge.num_aperture_sizes; i++) {
if (temp == values[i].size_value) {
agp_bridge.previous_size =
agp_bridge.current_size = (void *) (values + i);
agp_bridge.aperture_size_idx = i;
return values[i].size;
}
}
}
}
/* erk, couldn't find an x86-64 ? */
return 0;
}
static void inline flush_x86_64_tlb(struct pci_dev *dev)
{
u32 tmp;
pci_read_config_dword (dev, AMD_X86_64_GARTCACHECTL, &tmp);
tmp |= 1<<0;
pci_write_config_dword (dev, AMD_X86_64_GARTCACHECTL, tmp);
}
void amd_x86_64_tlbflush(agp_memory * temp)
{
struct pci_dev *dev;
pci_for_each_dev(dev) {
if (dev->bus->number==0 && PCI_FUNC(dev->devfn)==3 &&
PCI_SLOT(dev->devfn) >=24 && PCI_SLOT(dev->devfn) <=31) {
flush_x86_64_tlb (dev);
}
}
}
/*
* In a multiprocessor x86-64 system, this function gets
* called once for each CPU.
*/
u64 amd_x86_64_configure (struct pci_dev *hammer, u64 gatt_table)
{
u64 aperturebase;
u32 tmp;
u64 addr, aper_base;
/* Address to map to */
pci_read_config_dword (hammer, AMD_X86_64_GARTAPERTUREBASE, &tmp);
aperturebase = tmp << 25;
aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
/* address of the mappings table */
addr = (u64) gatt_table;
addr >>= 12;
tmp = (u32) addr<<4;
tmp &= ~0xf;
pci_write_config_dword (hammer, AMD_X86_64_GARTTABLEBASE, tmp);
/* Enable GART translation for this hammer. */
pci_read_config_dword(hammer, AMD_X86_64_GARTAPERTURECTL, &tmp);
tmp &= 0x3f;
tmp |= 1<<0;
pci_write_config_dword(hammer, AMD_X86_64_GARTAPERTURECTL, tmp);
/* keep CPU's coherent. */
flush_x86_64_tlb (hammer);
return aper_base;
}
static struct aper_size_info_32 amd_8151_sizes[7] =
{
{2048, 524288, 9, 0x00000000 }, /* 0 0 0 0 0 0 */
{1024, 262144, 8, 0x00000400 }, /* 1 0 0 0 0 0 */
{512, 131072, 7, 0x00000600 }, /* 1 1 0 0 0 0 */
{256, 65536, 6, 0x00000700 }, /* 1 1 1 0 0 0 */
{128, 32768, 5, 0x00000720 }, /* 1 1 1 1 0 0 */
{64, 16384, 4, 0x00000730 }, /* 1 1 1 1 1 0 */
{32, 8192, 3, 0x00000738 } /* 1 1 1 1 1 1 */
};
static int amd_8151_configure(void)
{
struct pci_dev *dev, *hammer=NULL;
int current_size;
int tmp, tmp2, i;
u64 aperbar;
unsigned long gatt_bus = virt_to_phys(agp_bridge.gatt_table_real);
/* Configure AGP regs in each x86-64 host bridge. */
pci_for_each_dev(dev) {
if (dev->bus->number==0 &&
PCI_FUNC(dev->devfn)==3 &&
PCI_SLOT(dev->devfn)>=24 && PCI_SLOT(dev->devfn)<=31) {
agp_bridge.gart_bus_addr = amd_x86_64_configure(dev,gatt_bus);
hammer = dev;
/*
* TODO: Cache pci_dev's of x86-64's in private struct to save us
* having to scan the pci list each time.
*/
}
}
if (hammer == NULL) {
return -ENODEV;
}
/* Shadow x86-64 registers into 8151 registers. */
dev = agp_bridge.dev;
if (!dev)
return -ENODEV;
current_size = amd_x86_64_fetch_size();
pci_read_config_dword(dev, AMD_8151_APERTURESIZE, &tmp);
tmp &= ~(0xfff);
/* translate x86-64 size bits to 8151 size bits*/
for (i=0 ; i<7; i++) {
if (amd_8151_sizes[i].size == current_size)
tmp |= (amd_8151_sizes[i].size_value) << 3;
}
pci_write_config_dword(dev, AMD_8151_APERTURESIZE, tmp);
pci_read_config_dword (hammer, AMD_X86_64_GARTAPERTUREBASE, &tmp);
aperbar = pci_read64 (dev, AMD_8151_VMAPERTURE);
aperbar |= (tmp & 0x7fff) <<25;
aperbar &= 0x000000ffffffffff;
aperbar |= 1<<2; /* This address is a 64bit ptr FIXME: Make conditional in 32bit mode */
pci_write64 (dev, AMD_8151_VMAPERTURE, aperbar);
pci_read_config_dword(dev, AMD_8151_AGP_CTL , &tmp);
tmp &= ~(AMD_8151_GTLBEN | AMD_8151_APEREN);
pci_read_config_dword(hammer, AMD_X86_64_GARTAPERTURECTL, &tmp2);
if (tmp2 & AMD_X86_64_GARTEN)
tmp |= AMD_8151_APEREN;
// FIXME: bit 7 of AMD_8151_AGP_CTL (GTLBEN) must be copied if set.
// But where is it set ?
pci_write_config_dword(dev, AMD_8151_AGP_CTL, tmp);
return 0;
}
static void amd_8151_cleanup(void)
{
struct pci_dev *dev;
u32 tmp;
pci_for_each_dev(dev) {
/* disable gart translation */
if (dev->bus->number==0 && PCI_FUNC(dev->devfn)==3 &&
(PCI_SLOT(dev->devfn) >=24) && (PCI_SLOT(dev->devfn) <=31)) {
pci_read_config_dword (dev, AMD_X86_64_GARTAPERTURECTL, &tmp);
tmp &= ~(AMD_X86_64_GARTEN);
pci_write_config_dword (dev, AMD_X86_64_GARTAPERTURECTL, tmp);
}
/* Now shadow the disable in the 8151 */
if (dev->vendor == PCI_VENDOR_ID_AMD &&
dev->device == PCI_DEVICE_ID_AMD_8151_0) {
pci_read_config_dword (dev, AMD_8151_AGP_CTL, &tmp);
tmp &= ~(AMD_8151_APEREN);
pci_write_config_dword (dev, AMD_8151_AGP_CTL, tmp);
}
}
}
static unsigned long amd_8151_mask_memory(unsigned long addr, int type)
{
return addr | agp_bridge.masks[0].mask;
}
static struct gatt_mask amd_8151_masks[] =
{
{0x00000001, 0}
};
/*
* Try to configure an AGP v3 capable setup.
* If we fail (typically because we don't have an AGP v3
* card in the system) we fall back to the generic AGP v2
* routines.
*/
static void agp_x86_64_agp_enable(u32 mode)
{
struct pci_dev *device = NULL;
u32 command, scratch;
u8 cap_ptr;
u8 agp_v3;
u8 v3_devs=0;
/* FIXME: If 'mode' is x1/x2/x4 should we call the AGPv2 routines directly ?
* Messy, as some AGPv3 cards can only do x4 as a minimum.
*/
/* PASS1: Count # of devs capable of AGPv3 mode. */
pci_for_each_dev(device) {
cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
if (cap_ptr != 0x00) {
pci_read_config_dword(device, cap_ptr, &scratch);
scratch &= (1<<20|1<<21|1<<22|1<<23);
scratch = scratch>>20;
/* AGP v3 capable ? */
if (scratch>=3) {
v3_devs++;
printk (KERN_INFO "AGP: Found AGPv3 capable device at %d:%d:%d\n",
device->bus->number, PCI_FUNC(device->devfn), PCI_SLOT(device->devfn));
} else {
printk (KERN_INFO "AGP: Meh. version %x AGP device found.\n", scratch);
}
}
}
/* If not enough, go to AGP v2 setup */
if (v3_devs<2) {
printk (KERN_INFO "AGP: Only %d devices found, not enough, trying AGPv2\n", v3_devs);
return agp_generic_agp_enable(mode);
} else {
printk (KERN_INFO "AGP: Enough AGPv3 devices found, setting up...\n");
}
pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 4, &command);
/*
* PASS2: go through all devices that claim to be
* AGP devices and collect their data.
*/
pci_for_each_dev(device) {
cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
if (cap_ptr != 0x00) {
/*
* Ok, here we have a AGP device. Disable impossible
* settings, and adjust the readqueue to the minimum.
*/
printk (KERN_INFO "AGP: Setting up AGPv3 capable device at %d:%d:%d\n",
device->bus->number, PCI_FUNC(device->devfn), PCI_SLOT(device->devfn));
pci_read_config_dword(device, cap_ptr + 4, &scratch);
agp_v3 = (scratch & (1<<3) ) >>3;
/* adjust RQ depth */
command =
((command & ~0xff000000) |
min_t(u32, (mode & 0xff000000),
min_t(u32, (command & 0xff000000),
(scratch & 0xff000000))));
/* disable SBA if it's not supported */
if (!((command & 0x200) && (scratch & 0x200) && (mode & 0x200)))
command &= ~0x200;
/* disable FW if it's not supported */
if (!((command & 0x10) && (scratch & 0x10) && (mode & 0x10)))
command &= ~0x10;
if (!((command & 2) && (scratch & 2) && (mode & 2))) {
command &= ~2; /* 8x */
printk (KERN_INFO "AGP: Putting device into 8x mode\n");
}
if (!((command & 1) && (scratch & 1) && (mode & 1))) {
command &= ~1; /* 4x */
printk (KERN_INFO "AGP: Putting device into 4x mode\n");
}
}
}
/*
* PASS3: Figure out the 8X/4X setting and enable the
* target (our motherboard chipset).
*/
if (command & 2)
command &= ~5; /* 8X */
if (command & 1)
command &= ~6; /* 4X */
command |= 0x100;
pci_write_config_dword(agp_bridge.dev, agp_bridge.capndx + 8, command);
/*
* PASS4: Go through all AGP devices and update the
* command registers.
*/
pci_for_each_dev(device) {
cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
if (cap_ptr != 0x00)
pci_write_config_dword(device, cap_ptr + 8, command);
}
}
int __init amd_8151_setup (struct pci_dev *pdev)
{
agp_bridge.masks = amd_8151_masks;
agp_bridge.num_of_masks = 1;
agp_bridge.aperture_sizes = (void *) amd_8151_sizes;
agp_bridge.size_type = U32_APER_SIZE;
agp_bridge.num_aperture_sizes = 7;
agp_bridge.dev_private_data = NULL;
agp_bridge.needs_scratch_page = FALSE;
agp_bridge.configure = amd_8151_configure;
agp_bridge.fetch_size = amd_x86_64_fetch_size;
agp_bridge.cleanup = amd_8151_cleanup;
agp_bridge.tlb_flush = amd_x86_64_tlbflush;
agp_bridge.mask_memory = amd_8151_mask_memory;
agp_bridge.agp_enable = agp_x86_64_agp_enable;
agp_bridge.cache_flush = global_cache_flush;
agp_bridge.create_gatt_table = agp_generic_create_gatt_table;
agp_bridge.free_gatt_table = agp_generic_free_gatt_table;
agp_bridge.insert_memory = x86_64_insert_memory;
agp_bridge.remove_memory = agp_generic_remove_memory;
agp_bridge.alloc_by_type = agp_generic_alloc_by_type;
agp_bridge.free_by_type = agp_generic_free_by_type;
agp_bridge.agp_alloc_page = agp_generic_alloc_page;
agp_bridge.agp_destroy_page = agp_generic_destroy_page;
agp_bridge.suspend = agp_generic_suspend;
agp_bridge.resume = agp_generic_resume;
agp_bridge.cant_use_aperture = 0;
return 0;
}
...@@ -121,6 +121,8 @@ extern struct tty_driver ptm_driver[]; /* Unix98 pty masters; for /dev/ptmx */ ...@@ -121,6 +121,8 @@ extern struct tty_driver ptm_driver[]; /* Unix98 pty masters; for /dev/ptmx */
extern struct tty_driver pts_driver[]; /* Unix98 pty slaves; for /dev/ptmx */ extern struct tty_driver pts_driver[]; /* Unix98 pty slaves; for /dev/ptmx */
#endif #endif
extern void disable_early_printk(void);
/* /*
* redirect is the pseudo-tty that console output * redirect is the pseudo-tty that console output
* is redirected to if asked by TIOCCONS. * is redirected to if asked by TIOCCONS.
...@@ -2185,6 +2187,9 @@ void __init console_init(void) ...@@ -2185,6 +2187,9 @@ void __init console_init(void)
* set up the console device so that later boot sequences can * set up the console device so that later boot sequences can
* inform about problems etc.. * inform about problems etc..
*/ */
#ifdef CONFIG_EARLY_PRINTK
disable_early_printk();
#endif
#ifdef CONFIG_VT #ifdef CONFIG_VT
con_init(); con_init();
#endif #endif
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include "i82092aa.h" #include "i82092aa.h"
#include "i82365.h" #include "i82365.h"
MODULE_LICENSE("GPL");
/* PCI core routines */ /* PCI core routines */
static struct pci_device_id i82092aa_pci_ids[] = { static struct pci_device_id i82092aa_pci_ids[] = {
{ {
......
...@@ -1048,11 +1048,19 @@ int isapnp_cfg_begin(int csn, int logdev) ...@@ -1048,11 +1048,19 @@ int isapnp_cfg_begin(int csn, int logdev)
isapnp_wait(); isapnp_wait();
isapnp_key(); isapnp_key();
isapnp_wake(csn); isapnp_wake(csn);
#if 1 /* to avoid malfunction when the isapnptools package is used */ #if 1
isapnp_set_rdp(); /* to avoid malfunction when the isapnptools package is used */
udelay(1000); /* delay 1000us */ /* we must set RDP to our value again */
write_address(0x01); /* it is possible to set RDP only in the isolation phase */
/* Jens Thoms Toerring <Jens.Toerring@physik.fu-berlin.de> */
isapnp_write_byte(0x02, 0x04); /* clear CSN of card */
mdelay(2); /* is this necessary? */
isapnp_wake(csn); /* bring card into sleep state */
isapnp_wake(0); /* bring card into isolation state */
isapnp_set_rdp(); /* reset the RDP port */
udelay(1000); /* delay 1000us */ udelay(1000); /* delay 1000us */
isapnp_write_byte(0x06, csn); /* reset CSN to previous value */
udelay(250); /* is this necessary? */
#endif #endif
if (logdev >= 0) if (logdev >= 0)
isapnp_device(logdev); isapnp_device(logdev);
......
...@@ -276,6 +276,7 @@ static void DAC1064_setmclk(WPMINFO int oscinfo, unsigned long fmem) { ...@@ -276,6 +276,7 @@ static void DAC1064_setmclk(WPMINFO int oscinfo, unsigned long fmem) {
hw->MXoptionReg = mx; hw->MXoptionReg = mx;
} }
#ifdef CONFIG_FB_MATROX_G450
static void g450_set_plls(WPMINFO2) { static void g450_set_plls(WPMINFO2) {
u_int32_t c2_ctl; u_int32_t c2_ctl;
unsigned int pxc; unsigned int pxc;
...@@ -365,6 +366,7 @@ static void g450_set_plls(WPMINFO2) { ...@@ -365,6 +366,7 @@ static void g450_set_plls(WPMINFO2) {
} }
} }
} }
#endif
void DAC1064_global_init(WPMINFO2) { void DAC1064_global_init(WPMINFO2) {
struct matrox_hw_state* hw = &ACCESS_FBINFO(hw); struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
...@@ -372,6 +374,7 @@ void DAC1064_global_init(WPMINFO2) { ...@@ -372,6 +374,7 @@ void DAC1064_global_init(WPMINFO2) {
hw->DACreg[POS1064_XMISCCTRL] &= M1064_XMISCCTRL_DAC_WIDTHMASK; hw->DACreg[POS1064_XMISCCTRL] &= M1064_XMISCCTRL_DAC_WIDTHMASK;
hw->DACreg[POS1064_XMISCCTRL] |= M1064_XMISCCTRL_LUT_EN; hw->DACreg[POS1064_XMISCCTRL] |= M1064_XMISCCTRL_LUT_EN;
hw->DACreg[POS1064_XPIXCLKCTRL] = M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_PLL; hw->DACreg[POS1064_XPIXCLKCTRL] = M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_PLL;
#ifdef CONFIG_FB_MATROX_G450
if (ACCESS_FBINFO(devflags.g450dac)) { if (ACCESS_FBINFO(devflags.g450dac)) {
hw->DACreg[POS1064_XPWRCTRL] = 0x1F; /* powerup everything */ hw->DACreg[POS1064_XPWRCTRL] = 0x1F; /* powerup everything */
hw->DACreg[POS1064_XOUTPUTCONN] = 0x00; /* disable outputs */ hw->DACreg[POS1064_XOUTPUTCONN] = 0x00; /* disable outputs */
...@@ -420,7 +423,9 @@ void DAC1064_global_init(WPMINFO2) { ...@@ -420,7 +423,9 @@ void DAC1064_global_init(WPMINFO2) {
} }
/* Now set timming related variables... */ /* Now set timming related variables... */
g450_set_plls(PMINFO2); g450_set_plls(PMINFO2);
} else { } else
#endif
{
if (ACCESS_FBINFO(outputs[1]).src == MATROXFB_SRC_CRTC1) { if (ACCESS_FBINFO(outputs[1]).src == MATROXFB_SRC_CRTC1) {
hw->DACreg[POS1064_XPIXCLKCTRL] = M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_EXT; hw->DACreg[POS1064_XPIXCLKCTRL] = M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_EXT;
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_MAFC | G400_XMISCCTRL_VDO_MAFC12; hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_MAFC | G400_XMISCCTRL_VDO_MAFC12;
...@@ -621,6 +626,7 @@ static struct matrox_altout m1064 = { ...@@ -621,6 +626,7 @@ static struct matrox_altout m1064 = {
.compute = m1064_compute, .compute = m1064_compute,
}; };
#ifdef CONFIG_FB_MATROX_G450
static int g450_compute(void* out, struct my_timming* m) { static int g450_compute(void* out, struct my_timming* m) {
#define minfo ((struct matrox_fb_info*)out) #define minfo ((struct matrox_fb_info*)out)
if (m->mnp < 0) { if (m->mnp < 0) {
...@@ -637,6 +643,7 @@ static struct matrox_altout g450out = { ...@@ -637,6 +643,7 @@ static struct matrox_altout g450out = {
.name = "Primary output", .name = "Primary output",
.compute = g450_compute, .compute = g450_compute,
}; };
#endif
#endif /* NEED_DAC1064 */ #endif /* NEED_DAC1064 */
...@@ -819,6 +826,7 @@ static void MGA1064_reset(WPMINFO2) { ...@@ -819,6 +826,7 @@ static void MGA1064_reset(WPMINFO2) {
#endif #endif
#ifdef CONFIG_FB_MATROX_G100 #ifdef CONFIG_FB_MATROX_G100
#ifdef CONFIG_FB_MATROX_G450
static void g450_mclk_init(WPMINFO2) { static void g450_mclk_init(WPMINFO2) {
/* switch all clocks to PCI source */ /* switch all clocks to PCI source */
pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg | 4); pci_write_config_dword(ACCESS_FBINFO(pcidev), PCI_OPTION_REG, ACCESS_FBINFO(hw).MXoptionReg | 4);
...@@ -936,6 +944,10 @@ static void g450_preinit(WPMINFO2) { ...@@ -936,6 +944,10 @@ static void g450_preinit(WPMINFO2) {
return; return;
} }
#else
static inline void g450_preinit(WPMINFO2) {
}
#endif
static int MGAG100_preinit(WPMINFO2) { static int MGAG100_preinit(WPMINFO2) {
static const int vxres_g100[] = { 512, 640, 768, 800, 832, 960, static const int vxres_g100[] = { 512, 640, 768, 800, 832, 960,
...@@ -973,9 +985,12 @@ static int MGAG100_preinit(WPMINFO2) { ...@@ -973,9 +985,12 @@ static int MGAG100_preinit(WPMINFO2) {
ACCESS_FBINFO(capable.plnwt) = ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG100 ACCESS_FBINFO(capable.plnwt) = ACCESS_FBINFO(devflags.accelerator) == FB_ACCEL_MATROX_MGAG100
? ACCESS_FBINFO(devflags.sgram) : 1; ? ACCESS_FBINFO(devflags.sgram) : 1;
#ifdef CONFIG_FB_MATROX_G450
if (ACCESS_FBINFO(devflags.g450dac)) { if (ACCESS_FBINFO(devflags.g450dac)) {
ACCESS_FBINFO(outputs[0]).output = &g450out; ACCESS_FBINFO(outputs[0]).output = &g450out;
} else { } else
#endif
{
ACCESS_FBINFO(outputs[0]).output = &m1064; ACCESS_FBINFO(outputs[0]).output = &m1064;
} }
ACCESS_FBINFO(outputs[0]).src = MATROXFB_SRC_CRTC1; ACCESS_FBINFO(outputs[0]).src = MATROXFB_SRC_CRTC1;
......
...@@ -105,7 +105,7 @@ ...@@ -105,7 +105,7 @@
#endif #endif
#endif #endif
#if defined(__alpha__) || defined(__m68k__) #if defined(__alpha__) || defined(__mc68000__)
#define READx_WORKS #define READx_WORKS
#define MEMCPYTOIO_WORKS #define MEMCPYTOIO_WORKS
#else #else
...@@ -121,7 +121,7 @@ ...@@ -121,7 +121,7 @@
#endif #endif
#endif #endif
#if defined(__m68k__) #if defined(__mc68000__)
#define MAP_BUSTOVIRT #define MAP_BUSTOVIRT
#else #else
#define MAP_IOREMAP #define MAP_IOREMAP
......
...@@ -2830,6 +2830,86 @@ void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot) ...@@ -2830,6 +2830,86 @@ void dtInitRoot(tid_t tid, struct inode *ip, u32 idotdot)
return; return;
} }
/*
* add_missing_indices()
*
* function: Fix dtree page in which one or more entries has an invalid index.
* fsck.jfs should really fix this, but it currently does not.
* Called from jfs_readdir when bad index is detected.
*/
static void add_missing_indices(struct inode *inode, s64 bn)
{
struct ldtentry *d;
struct dt_lock *dtlck;
int i;
uint index;
struct lv *lv;
struct metapage *mp;
dtpage_t *p;
int rc;
s8 *stbl;
tid_t tid;
struct tlock *tlck;
tid = txBegin(inode->i_sb, 0);
DT_GETPAGE(inode, bn, mp, PSIZE, p, rc);
if (rc) {
printk(KERN_ERR "DT_GETPAGE failed!\n");
goto end;
}
BT_MARK_DIRTY(mp, inode);
ASSERT(p->header.flag & BT_LEAF);
tlck = txLock(tid, inode, mp, tlckDTREE | tlckENTRY);
dtlck = (struct dt_lock *) &tlck->lock;
stbl = DT_GETSTBL(p);
for (i = 0; i < p->header.nextindex; i++) {
d = (struct ldtentry *) &p->slot[stbl[i]];
index = le32_to_cpu(d->index);
if ((index < 2) || (index >= JFS_IP(inode)->next_index)) {
d->index = cpu_to_le32(add_index(tid, inode, bn, i));
if (dtlck->index >= dtlck->maxcnt)
dtlck = (struct dt_lock *) txLinelock(dtlck);
lv = dtlck->lv;
lv->offset = stbl[i];
lv->length = 1;
dtlck->index++;
}
}
DT_PUTPAGE(mp);
(void) txCommit(tid, 1, &inode, 0);
end:
txEnd(tid);
}
/*
* Buffer to hold directory entry info while traversing a dtree page
* before being fed to the filldir function
*/
struct jfs_dirent {
loff_t position;
int ino;
u16 name_len;
char name[0];
};
/*
* function to determine next variable-sized jfs_dirent in buffer
*/
inline struct jfs_dirent *next_jfs_dirent(struct jfs_dirent *dirent)
{
return (struct jfs_dirent *)
((char *)dirent +
((sizeof (struct jfs_dirent) + dirent->name_len + 1 +
sizeof (loff_t) - 1) &
~(sizeof (loff_t) - 1)));
}
/* /*
* jfs_readdir() * jfs_readdir()
* *
...@@ -2846,11 +2926,12 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -2846,11 +2926,12 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
struct inode *ip = filp->f_dentry->d_inode; struct inode *ip = filp->f_dentry->d_inode;
struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab; struct nls_table *codepage = JFS_SBI(ip->i_sb)->nls_tab;
int rc = 0; int rc = 0;
loff_t dtpos; /* legacy OS/2 style position */
struct dtoffset { struct dtoffset {
s16 pn; s16 pn;
s16 index; s16 index;
s32 unused; s32 unused;
} *dtoffset = (struct dtoffset *) &filp->f_pos; } *dtoffset = (struct dtoffset *) &dtpos;
s64 bn; s64 bn;
struct metapage *mp; struct metapage *mp;
dtpage_t *p; dtpage_t *p;
...@@ -2860,12 +2941,17 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -2860,12 +2941,17 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
int i, next; int i, next;
struct ldtentry *d; struct ldtentry *d;
struct dtslot *t; struct dtslot *t;
int d_namleft, d_namlen, len, outlen; int d_namleft, len, outlen;
char *d_name, *name_ptr; unsigned long dirent_buf;
char *name_ptr;
int dtlhdrdatalen; int dtlhdrdatalen;
u32 dir_index; u32 dir_index;
int do_index = 0; int do_index = 0;
uint loop_count = 0; uint loop_count = 0;
struct jfs_dirent *jfs_dirent;
int jfs_dirents;
int overflow, fix_page, page_fixed = 0;
static int unique_pos = 2; /* If we can't fix broken index */
if (filp->f_pos == DIREND) if (filp->f_pos == DIREND)
return 0; return 0;
...@@ -2885,7 +2971,9 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -2885,7 +2971,9 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (dir_index > 1) { if (dir_index > 1) {
struct dir_table_slot dirtab_slot; struct dir_table_slot dirtab_slot;
if (dtEmpty(ip)) { if (dtEmpty(ip) ||
(dir_index >= JFS_IP(ip)->next_index)) {
/* Stale position. Directory has shrunk */
filp->f_pos = DIREND; filp->f_pos = DIREND;
return 0; return 0;
} }
...@@ -2963,13 +3051,15 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -2963,13 +3051,15 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
*/ */
dtlhdrdatalen = DTLHDRDATALEN_LEGACY; dtlhdrdatalen = DTLHDRDATALEN_LEGACY;
if (filp->f_pos == 0) { dtpos = filp->f_pos;
if (dtpos == 0) {
/* build "." entry */ /* build "." entry */
if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino, if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino,
DT_DIR)) DT_DIR))
return 0; return 0;
dtoffset->index = 1; dtoffset->index = 1;
filp->f_pos = dtpos;
} }
if (dtoffset->pn == 0) { if (dtoffset->pn == 0) {
...@@ -2985,6 +3075,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -2985,6 +3075,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
} }
dtoffset->pn = 1; dtoffset->pn = 1;
dtoffset->index = 0; dtoffset->index = 0;
filp->f_pos = dtpos;
} }
if (dtEmpty(ip)) { if (dtEmpty(ip)) {
...@@ -3009,32 +3100,72 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -3009,32 +3100,72 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
} }
} }
d_name = kmalloc((JFS_NAME_MAX + 1) * sizeof(wchar_t), GFP_NOFS); dirent_buf = __get_free_page(GFP_KERNEL);
if (d_name == NULL) { if (dirent_buf == 0) {
DT_PUTPAGE(mp); DT_PUTPAGE(mp);
jERROR(1, ("jfs_readdir: kmalloc failed!\n")); jERROR(1, ("jfs_readdir: __get_free_page failed!\n"));
filp->f_pos = DIREND; filp->f_pos = DIREND;
return 0; return -ENOMEM;
} }
while (1) { while (1) {
jfs_dirent = (struct jfs_dirent *) dirent_buf;
jfs_dirents = 0;
overflow = fix_page = 0;
stbl = DT_GETSTBL(p); stbl = DT_GETSTBL(p);
for (i = index; i < p->header.nextindex; i++) { for (i = index; i < p->header.nextindex; i++) {
d = (struct ldtentry *) & p->slot[stbl[i]]; d = (struct ldtentry *) & p->slot[stbl[i]];
if (((long) jfs_dirent + d->namlen + 1) >
(dirent_buf + PSIZE)) {
/* DBCS codepages could overrun dirent_buf */
index = i;
overflow = 1;
break;
}
d_namleft = d->namlen; d_namleft = d->namlen;
name_ptr = d_name; name_ptr = jfs_dirent->name;
jfs_dirent->ino = le32_to_cpu(d->inumber);
if (do_index) { if (do_index) {
filp->f_pos = le32_to_cpu(d->index);
len = min(d_namleft, DTLHDRDATALEN); len = min(d_namleft, DTLHDRDATALEN);
} else jfs_dirent->position = le32_to_cpu(d->index);
/*
* d->index should always be valid, but it
* isn't. fsck.jfs doesn't create the
* directory index for the lost+found
* directory. Rather than let it go,
* we can try to fix it.
*/
if ((jfs_dirent->position < 2) ||
(jfs_dirent->position >=
JFS_IP(ip)->next_index)) {
if (!page_fixed && !isReadOnly(ip)) {
fix_page = 1;
/*
* setting overflow and setting
* index to i will cause the
* same page to be processed
* again starting here
*/
overflow = 1;
index = i;
break;
}
jfs_dirent->position = unique_pos++;
}
} else {
jfs_dirent->position = dtpos;
len = min(d_namleft, DTLHDRDATALEN_LEGACY); len = min(d_namleft, DTLHDRDATALEN_LEGACY);
}
/* copy the name of head/only segment */ /* copy the name of head/only segment */
outlen = jfs_strfromUCS_le(name_ptr, d->name, len, outlen = jfs_strfromUCS_le(name_ptr, d->name, len,
codepage); codepage);
d_namlen = outlen; jfs_dirent->name_len = outlen;
/* copy name in the additional segment(s) */ /* copy name in the additional segment(s) */
next = d->next; next = d->next;
...@@ -3053,56 +3184,66 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) ...@@ -3053,56 +3184,66 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
len = min(d_namleft, DTSLOTDATALEN); len = min(d_namleft, DTSLOTDATALEN);
outlen = jfs_strfromUCS_le(name_ptr, t->name, outlen = jfs_strfromUCS_le(name_ptr, t->name,
len, codepage); len, codepage);
d_namlen+= outlen; jfs_dirent->name_len += outlen;
next = t->next; next = t->next;
} }
if (filldir(dirent, d_name, d_namlen, filp->f_pos, jfs_dirents++;
le32_to_cpu(d->inumber), DT_UNKNOWN)) jfs_dirent = next_jfs_dirent(jfs_dirent);
goto out;
skip_one: skip_one:
if (!do_index) if (!do_index)
dtoffset->index++; dtoffset->index++;
} }
/* if (!overflow) {
* get next leaf page /* Point to next leaf page */
*/ if (p->header.flag & BT_ROOT)
bn = 0;
else {
bn = le64_to_cpu(p->header.next);
index = 0;
/* update offset (pn:index) for new page */
if (!do_index) {
dtoffset->pn++;
dtoffset->index = 0;
}
}
page_fixed = 0;
}
if (p->header.flag & BT_ROOT) { /* unpin previous leaf page */
filp->f_pos = DIREND; DT_PUTPAGE(mp);
break;
jfs_dirent = (struct jfs_dirent *) dirent_buf;
while (jfs_dirents--) {
filp->f_pos = jfs_dirent->position;
if (filldir(dirent, jfs_dirent->name,
jfs_dirent->name_len, filp->f_pos,
jfs_dirent->ino, DT_UNKNOWN))
goto out;
jfs_dirent = next_jfs_dirent(jfs_dirent);
} }
bn = le64_to_cpu(p->header.next); if (fix_page) {
if (bn == 0) { add_missing_indices(ip, bn);
page_fixed = 1;
}
if (!overflow && (bn == 0)) {
filp->f_pos = DIREND; filp->f_pos = DIREND;
break; break;
} }
/* unpin previous leaf page */
DT_PUTPAGE(mp);
/* get next leaf page */
DT_GETPAGE(ip, bn, mp, PSIZE, p, rc); DT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
if (rc) { if (rc) {
kfree(d_name); free_page(dirent_buf);
return -rc; return -rc;
} }
/* update offset (pn:index) for new page */
index = 0;
if (!do_index) {
dtoffset->pn++;
dtoffset->index = 0;
}
} }
out: out:
kfree(d_name); free_page(dirent_buf);
DT_PUTPAGE(mp);
return rc; return rc;
} }
......
...@@ -365,11 +365,7 @@ int diRead(struct inode *ip) ...@@ -365,11 +365,7 @@ int diRead(struct inode *ip)
if ((lengthPXD(&iagp->inoext[extno]) != imap->im_nbperiext) || if ((lengthPXD(&iagp->inoext[extno]) != imap->im_nbperiext) ||
(addressPXD(&iagp->inoext[extno]) == 0)) { (addressPXD(&iagp->inoext[extno]) == 0)) {
jERROR(1, ("diRead: Bad inoext: 0x%lx, 0x%lx\n",
(ulong) addressPXD(&iagp->inoext[extno]),
(ulong) lengthPXD(&iagp->inoext[extno])));
release_metapage(mp); release_metapage(mp);
updateSuper(ip->i_sb, FM_DIRTY);
return ESTALE; return ESTALE;
} }
...@@ -416,12 +412,9 @@ int diRead(struct inode *ip) ...@@ -416,12 +412,9 @@ int diRead(struct inode *ip)
jERROR(1, ("diRead: i_ino != di_number\n")); jERROR(1, ("diRead: i_ino != di_number\n"));
updateSuper(ip->i_sb, FM_DIRTY); updateSuper(ip->i_sb, FM_DIRTY);
rc = EIO; rc = EIO;
} else if (le32_to_cpu(dp->di_nlink) == 0) { } else if (le32_to_cpu(dp->di_nlink) == 0)
jERROR(1,
("diRead: di_nlink is zero. ino=%ld\n", ip->i_ino));
updateSuper(ip->i_sb, FM_DIRTY);
rc = ESTALE; rc = ESTALE;
} else else
/* copy the disk inode to the in-memory inode */ /* copy the disk inode to the in-memory inode */
rc = copy_from_dinode(dp, ip); rc = copy_from_dinode(dp, ip);
......
...@@ -1552,7 +1552,12 @@ static int lmLogFileSystem(struct jfs_log * log, char *uuid, int activate) ...@@ -1552,7 +1552,12 @@ static int lmLogFileSystem(struct jfs_log * log, char *uuid, int activate)
memcpy(logsuper->active[i].uuid, NULL_UUID, 16); memcpy(logsuper->active[i].uuid, NULL_UUID, 16);
break; break;
} }
assert(i < MAX_ACTIVE); if (i == MAX_ACTIVE) {
jERROR(1,("Somebody stomped on the journal!\n"));
lbmFree(bpsuper);
return EIO;
}
} }
/* /*
......
...@@ -1459,10 +1459,8 @@ int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock *l) ...@@ -1459,10 +1459,8 @@ int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock *l)
break; break;
} }
out: out:
if (error) {
locks_free_lock(file_lock); locks_free_lock(file_lock);
}
return error; return error;
} }
...@@ -1601,11 +1599,8 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 *l) ...@@ -1601,11 +1599,8 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 *l)
break; break;
} }
out: out:
if (error) {
locks_free_lock(file_lock); locks_free_lock(file_lock);
}
return error; return error;
} }
#endif /* BITS_PER_LONG == 32 */ #endif /* BITS_PER_LONG == 32 */
......
...@@ -228,6 +228,26 @@ enum acpi_interrupt_id { ...@@ -228,6 +228,26 @@ enum acpi_interrupt_id {
ACPI_INTERRUPT_COUNT ACPI_INTERRUPT_COUNT
}; };
#define ACPI_SPACE_MEM 0
struct acpi_gen_regaddr {
u8 space_id;
u8 bit_width;
u8 bit_offset;
u8 resv;
u32 addrl;
u32 addrh;
} __attribute__ ((packed));
struct acpi_table_hpet {
struct acpi_table_header header;
u32 id;
struct acpi_gen_regaddr addr;
u8 number;
u16 min_tick;
u8 page_protect;
} __attribute__ ((packed));
/* /*
* System Resource Affinity Table (SRAT) * System Resource Affinity Table (SRAT)
* see http://www.microsoft.com/hwdev/design/srat.htm * see http://www.microsoft.com/hwdev/design/srat.htm
...@@ -333,6 +353,7 @@ enum acpi_table_id { ...@@ -333,6 +353,7 @@ enum acpi_table_id {
ACPI_SRAT, ACPI_SRAT,
ACPI_SSDT, ACPI_SSDT,
ACPI_SPMI, ACPI_SPMI,
ACPI_HPET,
ACPI_TABLE_COUNT ACPI_TABLE_COUNT
}; };
......
...@@ -66,6 +66,7 @@ enum chipset_type { ...@@ -66,6 +66,7 @@ enum chipset_type {
AMD_IRONGATE, AMD_IRONGATE,
AMD_761, AMD_761,
AMD_762, AMD_762,
AMD_8151,
ALI_M1541, ALI_M1541,
ALI_M1621, ALI_M1621,
ALI_M1631, ALI_M1631,
...@@ -161,7 +162,7 @@ extern agp_memory *agp_allocate_memory(size_t, u32); ...@@ -161,7 +162,7 @@ extern agp_memory *agp_allocate_memory(size_t, u32);
* *
*/ */
extern void agp_copy_info(agp_kern_info *); extern int agp_copy_info(agp_kern_info *);
/* /*
* agp_copy_info : * agp_copy_info :
...@@ -257,7 +258,7 @@ typedef struct { ...@@ -257,7 +258,7 @@ typedef struct {
void (*enable)(u32); void (*enable)(u32);
int (*acquire)(void); int (*acquire)(void);
void (*release)(void); void (*release)(void);
void (*copy_info)(agp_kern_info *); int (*copy_info)(agp_kern_info *);
} drm_agp_t; } drm_agp_t;
extern const drm_agp_t *drm_agp_p; extern const drm_agp_t *drm_agp_p;
......
#ifndef _LINUX_ELEVATOR_H #ifndef _LINUX_ELEVATOR_H
#define _LINUX_ELEVATOR_H #define _LINUX_ELEVATOR_H
typedef int (elevator_merge_fn) (request_queue_t *, struct request **, typedef int (elevator_merge_fn) (request_queue_t *, struct list_head **,
struct bio *); struct bio *);
typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *); typedef void (elevator_merge_req_fn) (request_queue_t *, struct request *, struct request *);
...@@ -42,7 +42,7 @@ struct elevator_s ...@@ -42,7 +42,7 @@ struct elevator_s
*/ */
extern void __elv_add_request(request_queue_t *, struct request *, extern void __elv_add_request(request_queue_t *, struct request *,
struct list_head *); struct list_head *);
extern int elv_merge(request_queue_t *, struct request **, struct bio *); extern int elv_merge(request_queue_t *, struct list_head **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *, extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *); struct request *);
extern void elv_merged_request(request_queue_t *, struct request *); extern void elv_merged_request(request_queue_t *, struct request *);
...@@ -78,7 +78,7 @@ extern void elevator_exit(request_queue_t *, elevator_t *); ...@@ -78,7 +78,7 @@ extern void elevator_exit(request_queue_t *, elevator_t *);
extern inline int bio_rq_in_between(struct bio *, struct request *, struct list_head *); extern inline int bio_rq_in_between(struct bio *, struct request *, struct list_head *);
extern inline int elv_rq_merge_ok(struct request *, struct bio *); extern inline int elv_rq_merge_ok(struct request *, struct bio *);
extern inline int elv_try_merge(struct request *, struct bio *); extern inline int elv_try_merge(struct request *, struct bio *);
extern inline int elv_try_last_merge(request_queue_t *, struct request **, struct bio *); extern inline int elv_try_last_merge(request_queue_t *, struct bio *);
/* /*
* Return values from elevator merger * Return values from elevator merger
......
...@@ -75,7 +75,7 @@ typedef __s64 Elf64_Sxword; ...@@ -75,7 +75,7 @@ typedef __s64 Elf64_Sxword;
#define EM_IA_64 50 /* HP/Intel IA-64 */ #define EM_IA_64 50 /* HP/Intel IA-64 */
#define EM_X8664 62 /* AMD x86-64 */ #define EM_X86_64 62 /* AMD x86-64 */
#define EM_S390 22 /* IBM S/390 */ #define EM_S390 22 /* IBM S/390 */
......
...@@ -398,6 +398,7 @@ ...@@ -398,6 +398,7 @@
#define PCI_DEVICE_ID_AMD_8111_LAN 0x7462 #define PCI_DEVICE_ID_AMD_8111_LAN 0x7462
#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
#define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d #define PCI_DEVICE_ID_AMD_8111_AUDIO 0x746d
#define PCI_DEVICE_ID_AMD_8151_0 0x7454
#define PCI_VENDOR_ID_TRIDENT 0x1023 #define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000 #define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment