Commit 8baa8006 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://gkernel.bkbits.net/misc-2.5

into home.transmeta.com:/home/torvalds/v2.5/linux
parents 27b727c8 97565ed3
...@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset) ...@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
while ((entry = next) != hash_list) { while ((entry = next) != hash_list) {
next = entry->next; next = entry->next;
prefetch(next);
drq = list_entry_hash(entry); drq = list_entry_hash(entry);
BUG_ON(!drq->hash_valid_count); BUG_ON(!drq->hash_valid_count);
...@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio) ...@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
while ((entry = entry->prev) != sort_list) { while ((entry = entry->prev) != sort_list) {
__rq = list_entry_rq(entry); __rq = list_entry_rq(entry);
prefetch(entry->prev);
BUG_ON(__rq->flags & REQ_STARTED); BUG_ON(__rq->flags & REQ_STARTED);
if (!(__rq->flags & REQ_CMD)) if (!(__rq->flags & REQ_CMD))
...@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq) ...@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
struct list_head *nxt = rq->queuelist.next; struct list_head *nxt = rq->queuelist.next;
int this_rq_cost; int this_rq_cost;
prefetch(nxt);
/* /*
* take it off the sort and fifo list, move * take it off the sort and fifo list, move
* to dispatch queue * to dispatch queue
......
...@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq, ...@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e->elevator_merge_req_fn(q, rq, next); e->elevator_merge_req_fn(q, rq, next);
} }
/* void __elv_add_request(request_queue_t *q, struct request *rq, int at_end,
* add_request and next_request are required to be supported, naturally int plug)
*/ {
void __elv_add_request(request_queue_t *q, struct request *rq, struct list_head *insert = &q->queue_head;
struct list_head *insert_here)
if (at_end)
insert = insert->prev;
if (plug)
blk_plug_device(q);
q->elevator.elevator_add_req_fn(q, rq, insert);
}
void elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{ {
q->elevator.elevator_add_req_fn(q, rq, insert_here); unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, at_end, plug);
spin_unlock_irqrestore(q->queue_lock, flags);
} }
static inline struct request *__elv_next_request(request_queue_t *q) static inline struct request *__elv_next_request(request_queue_t *q)
...@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q) ...@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q)
struct request *elv_next_request(request_queue_t *q) struct request *elv_next_request(request_queue_t *q)
{ {
struct request *rq; struct request *rq;
int ret;
while ((rq = __elv_next_request(q))) { while ((rq = __elv_next_request(q))) {
/*
* just mark as started even if we don't start it, a request
* that has been delayed should not be passed by new incoming
* requests
*/
rq->flags |= REQ_STARTED; rq->flags |= REQ_STARTED;
if (&rq->queuelist == q->last_merge) if (&rq->queuelist == q->last_merge)
...@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q) ...@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q)
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn) if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break; break;
/* ret = q->prep_rq_fn(q, rq);
* all ok, break and return it if (ret == BLKPREP_OK) {
*/
if (!q->prep_rq_fn(q, rq))
break; break;
} else if (ret == BLKPREP_DEFER) {
/* rq = NULL;
* prep said no-go, kill it break;
*/ } else if (ret == BLKPREP_KILL) {
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
if (end_that_request_first(rq, 0, rq->nr_sectors)) rq->flags |= REQ_QUIET;
BUG(); while (end_that_request_first(rq, 0, rq->nr_sectors))
;
end_that_request_last(rq); end_that_request_last(rq);
} else {
printk("%s: bad return=%d\n", __FUNCTION__, ret);
break;
}
} }
return rq; return rq;
...@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq) ...@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
{ {
elevator_t *e = &q->elevator; elevator_t *e = &q->elevator;
/*
* the main clearing point for q->last_merge is on retrieval of
* request by driver (it calls elv_next_request()), but it _can_
* also happen here if a request is added to the queue but later
* deleted without ever being given to driver (merged with another
* request).
*/
if (&rq->queuelist == q->last_merge)
q->last_merge = NULL;
if (e->elevator_remove_req_fn) if (e->elevator_remove_req_fn)
e->elevator_remove_req_fn(q, rq); e->elevator_remove_req_fn(q, rq);
} }
...@@ -357,6 +389,7 @@ module_init(elevator_global_init); ...@@ -357,6 +389,7 @@ module_init(elevator_global_init);
EXPORT_SYMBOL(elevator_noop); EXPORT_SYMBOL(elevator_noop);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_next_request); EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_remove_request); EXPORT_SYMBOL(elv_remove_request);
......
This diff is collapsed.
...@@ -548,12 +548,7 @@ static void process_page(unsigned long data) ...@@ -548,12 +548,7 @@ static void process_page(unsigned long data)
return_bio = bio->bi_next; return_bio = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
/* should use bio_endio(), however already cleared bio_endio(bio, bio->bi_size, 0);
* BIO_UPTODATE. so set bio->bi_size = 0 manually to indicate
* completely done
*/
bio->bi_size = 0;
bio->bi_end_io(bio, bytes, 0);
} }
} }
......
...@@ -1610,56 +1610,6 @@ static void idedisk_add_settings(ide_drive_t *drive) ...@@ -1610,56 +1610,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
#endif #endif
} }
static int idedisk_suspend(struct device *dev, u32 state, u32 level)
{
ide_drive_t *drive = dev->driver_data;
printk("Suspending device %p\n", dev->driver_data);
/* I hope that every freeze operation from the upper levels have
* already been done...
*/
if (level != SUSPEND_SAVE_STATE)
return 0;
BUG_ON(in_interrupt());
printk("Waiting for commands to finish\n");
/* wait until all commands are finished */
/* FIXME: waiting for spinlocks should be done instead. */
if (!(HWGROUP(drive)))
printk("No hwgroup?\n");
while (HWGROUP(drive)->handler)
yield();
/* set the drive to standby */
printk(KERN_INFO "suspending: %s ", drive->name);
if (drive->driver) {
if (drive->driver->standby)
drive->driver->standby(drive);
}
drive->blocked = 1;
while (HWGROUP(drive)->handler)
yield();
return 0;
}
static int idedisk_resume(struct device *dev, u32 level)
{
ide_drive_t *drive = dev->driver_data;
if (level != RESUME_RESTORE_STATE)
return 0;
if (!drive->blocked)
panic("ide: Resume but not suspended?\n");
drive->blocked = 0;
return 0;
}
/* This is just a hook for the overall driver tree. /* This is just a hook for the overall driver tree.
*/ */
......
...@@ -1238,6 +1238,21 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t ...@@ -1238,6 +1238,21 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
set_bit(PC_DMA_RECOMMENDED, &pc->flags); set_bit(PC_DMA_RECOMMENDED, &pc->flags);
} }
static int
idefloppy_blockpc_cmd(idefloppy_floppy_t *floppy, idefloppy_pc_t *pc, struct request *rq)
{
/*
* just support eject for now, it would not be hard to make the
* REQ_BLOCK_PC support fully-featured
*/
if (rq->cmd[0] != IDEFLOPPY_START_STOP_CMD)
return 1;
idefloppy_init_pc(pc);
memcpy(pc->c, rq->cmd, sizeof(pc->c));
return 0;
}
/* /*
* idefloppy_do_request is our request handling function. * idefloppy_do_request is our request handling function.
*/ */
...@@ -1280,6 +1295,12 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request ...@@ -1280,6 +1295,12 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
idefloppy_create_rw_cmd(floppy, pc, rq, block); idefloppy_create_rw_cmd(floppy, pc, rq, block);
} else if (rq->flags & REQ_SPECIAL) { } else if (rq->flags & REQ_SPECIAL) {
pc = (idefloppy_pc_t *) rq->buffer; pc = (idefloppy_pc_t *) rq->buffer;
} else if (rq->flags & REQ_BLOCK_PC) {
pc = idefloppy_next_pc_storage(drive);
if (idefloppy_blockpc_cmd(floppy, pc, rq)) {
idefloppy_do_end_request(drive, 0, 0);
return ide_stopped;
}
} else { } else {
blk_dump_rq_flags(rq, blk_dump_rq_flags(rq,
"ide-floppy: unsupported command in queue"); "ide-floppy: unsupported command in queue");
......
...@@ -878,13 +878,12 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) ...@@ -878,13 +878,12 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{ {
ide_startstop_t startstop; ide_startstop_t startstop;
unsigned long block; unsigned long block;
ide_hwif_t *hwif = HWIF(drive);
BUG_ON(!(rq->flags & REQ_STARTED)); BUG_ON(!(rq->flags & REQ_STARTED));
#ifdef DEBUG #ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n", printk("%s: start_request: current=0x%08lx\n",
hwif->name, (unsigned long) rq); HWIF(drive)->name, (unsigned long) rq);
#endif #endif
/* bail early if we've exceeded max_failures */ /* bail early if we've exceeded max_failures */
...@@ -910,7 +909,7 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) ...@@ -910,7 +909,7 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
block = 1; /* redirect MBR access to EZ-Drive partn table */ block = 1; /* redirect MBR access to EZ-Drive partn table */
#if (DISK_RECOVERY_TIME > 0) #if (DISK_RECOVERY_TIME > 0)
while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME); while ((read_timer() - HWIF(drive)->last_time) < DISK_RECOVERY_TIME);
#endif #endif
SELECT_DRIVE(drive); SELECT_DRIVE(drive);
...@@ -1128,9 +1127,15 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) ...@@ -1128,9 +1127,15 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
break; break;
} }
/*
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
rq = elv_next_request(&drive->queue); rq = elv_next_request(&drive->queue);
if (!rq) if (!rq) {
hwgroup->busy = !!ata_pending_commands(drive);
break; break;
}
if (!rq->bio && ata_pending_commands(drive)) if (!rq->bio && ata_pending_commands(drive))
break; break;
...@@ -1515,10 +1520,8 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio ...@@ -1515,10 +1520,8 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
{ {
unsigned long flags; unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive); ide_hwgroup_t *hwgroup = HWGROUP(drive);
unsigned int major = HWIF(drive)->major;
request_queue_t *q = &drive->queue;
struct list_head *queue_head = &q->queue_head;
DECLARE_COMPLETION(wait); DECLARE_COMPLETION(wait);
int insert_end = 1, err;
#ifdef CONFIG_BLK_DEV_PDC4030 #ifdef CONFIG_BLK_DEV_PDC4030
if (HWIF(drive)->chipset == ide_pdc4030 && rq->buffer != NULL) if (HWIF(drive)->chipset == ide_pdc4030 && rq->buffer != NULL)
...@@ -1540,29 +1543,35 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio ...@@ -1540,29 +1543,35 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
} }
rq->rq_disk = drive->disk; rq->rq_disk = drive->disk;
if (action == ide_wait)
/*
* we need to hold an extra reference to request for safe inspection
* after completion
*/
if (action == ide_wait) {
rq->ref_count++;
rq->waiting = &wait; rq->waiting = &wait;
}
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&ide_lock, flags);
if (blk_queue_empty(q) || action == ide_preempt) { if (action == ide_preempt) {
if (action == ide_preempt) hwgroup->rq = NULL;
hwgroup->rq = NULL; insert_end = 0;
} else {
if (action == ide_wait || action == ide_end) {
queue_head = queue_head->prev;
} else
queue_head = queue_head->next;
} }
q->elevator.elevator_add_req_fn(q, rq, queue_head); __elv_add_request(&drive->queue, rq, insert_end, 0);
ide_do_request(hwgroup, 0); ide_do_request(hwgroup, 0);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&ide_lock, flags);
err = 0;
if (action == ide_wait) { if (action == ide_wait) {
/* wait for it to be serviced */
wait_for_completion(&wait); wait_for_completion(&wait);
/* return -EIO if errors */ if (rq->errors)
return rq->errors ? -EIO : 0; err = -EIO;
blk_put_request(rq);
} }
return 0;
return err;
} }
EXPORT_SYMBOL(ide_do_drive_cmd); EXPORT_SYMBOL(ide_do_drive_cmd);
...@@ -3369,7 +3378,7 @@ int ide_register_driver(ide_driver_t *driver) ...@@ -3369,7 +3378,7 @@ int ide_register_driver(ide_driver_t *driver)
list_del_init(&drive->list); list_del_init(&drive->list);
ata_attach(drive); ata_attach(drive);
} }
driver->gen_driver.name = driver->name; driver->gen_driver.name = (char *) driver->name;
driver->gen_driver.bus = &ide_bus_type; driver->gen_driver.bus = &ide_bus_type;
driver->gen_driver.remove = ide_drive_remove; driver->gen_driver.remove = ide_drive_remove;
return driver_register(&driver->gen_driver); return driver_register(&driver->gen_driver);
......
...@@ -52,19 +52,21 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) ...@@ -52,19 +52,21 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector)
* @bio: the buffer head that's been built up so far * @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it. * @biovec: the request that could be merged to it.
* *
* Return 1 if the merge is not permitted (because the * Return amount of bytes we can take at this offset
* result would cross a device boundary), 0 otherwise.
*/ */
static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) static int linear_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
{ {
mddev_t *mddev = q->queuedata; mddev_t *mddev = q->queuedata;
dev_info_t *dev0, *dev1; dev_info_t *dev0;
int maxsectors, bio_sectors = (bio->bi_size + biovec->bv_len) >> 9;
dev0 = which_dev(mddev, bio->bi_sector); dev0 = which_dev(mddev, bio->bi_sector);
dev1 = which_dev(mddev, bio->bi_sector + maxsectors = (dev0->size << 1) - (bio->bi_sector - (dev0->offset<<1));
((bio->bi_size + biovec->bv_len - 1) >> 9));
return dev0 != dev1; if (bio_sectors <= maxsectors)
return biovec->bv_len;
return (maxsectors << 9) - bio->bi_size;
} }
static int linear_run (mddev_t *mddev) static int linear_run (mddev_t *mddev)
......
...@@ -168,8 +168,7 @@ static int create_strip_zones (mddev_t *mddev) ...@@ -168,8 +168,7 @@ static int create_strip_zones (mddev_t *mddev)
* @bio: the buffer head that's been built up so far * @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it. * @biovec: the request that could be merged to it.
* *
* Return 1 if the merge is not permitted (because the * Return amount of bytes we can accept at this offset
* result would cross a chunk boundary), 0 otherwise.
*/ */
static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
{ {
...@@ -182,7 +181,7 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_ ...@@ -182,7 +181,7 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
block = bio->bi_sector >> 1; block = bio->bi_sector >> 1;
bio_sz = (bio->bi_size + biovec->bv_len) >> 10; bio_sz = (bio->bi_size + biovec->bv_len) >> 10;
return chunk_size < ((block & (chunk_size - 1)) + bio_sz); return (chunk_size - ((block & (chunk_size - 1)) + bio_sz)) << 10;
} }
static int raid0_run (mddev_t *mddev) static int raid0_run (mddev_t *mddev)
......
...@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt) ...@@ -240,7 +240,7 @@ void scsi_queue_next_request(request_queue_t * q, Scsi_Cmnd * SCpnt)
SCpnt->request->special = (void *) SCpnt; SCpnt->request->special = (void *) SCpnt;
if(blk_rq_tagged(SCpnt->request)) if(blk_rq_tagged(SCpnt->request))
blk_queue_end_tag(q, SCpnt->request); blk_queue_end_tag(q, SCpnt->request);
_elv_add_request(q, SCpnt->request, 0, 0); __elv_add_request(q, SCpnt->request, 0, 0);
} }
/* /*
...@@ -951,7 +951,7 @@ void scsi_request_fn(request_queue_t * q) ...@@ -951,7 +951,7 @@ void scsi_request_fn(request_queue_t * q)
SCpnt->request->flags |= REQ_SPECIAL; SCpnt->request->flags |= REQ_SPECIAL;
if(blk_rq_tagged(SCpnt->request)) if(blk_rq_tagged(SCpnt->request))
blk_queue_end_tag(q, SCpnt->request); blk_queue_end_tag(q, SCpnt->request);
_elv_add_request(q, SCpnt->request, 0, 0); __elv_add_request(q, SCpnt->request, 0, 0);
break; break;
} }
......
...@@ -160,13 +160,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct cdrom_generic_command *cgc) ...@@ -160,13 +160,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct cdrom_generic_command *cgc)
if (!cgc->quiet) if (!cgc->quiet)
printk(KERN_ERR "%s: CDROM (ioctl) reports ILLEGAL " printk(KERN_ERR "%s: CDROM (ioctl) reports ILLEGAL "
"REQUEST.\n", cd->cdi.name); "REQUEST.\n", cd->cdi.name);
err = -EIO;
if (SRpnt->sr_sense_buffer[12] == 0x20 && if (SRpnt->sr_sense_buffer[12] == 0x20 &&
SRpnt->sr_sense_buffer[13] == 0x00) { SRpnt->sr_sense_buffer[13] == 0x00)
/* sense: Invalid command operation code */ /* sense: Invalid command operation code */
err = -EDRIVE_CANT_DO_THIS; err = -EDRIVE_CANT_DO_THIS;
} else {
err = -EINVAL;
}
#ifdef DEBUG #ifdef DEBUG
print_command(cgc->cmd); print_command(cgc->cmd);
print_req_sense("sr", SRpnt); print_req_sense("sr", SRpnt);
......
...@@ -122,6 +122,7 @@ inline void bio_init(struct bio *bio) ...@@ -122,6 +122,7 @@ inline void bio_init(struct bio *bio)
bio->bi_max_vecs = 0; bio->bi_max_vecs = 0;
bio->bi_end_io = NULL; bio->bi_end_io = NULL;
atomic_set(&bio->bi_cnt, 1); atomic_set(&bio->bi_cnt, 1);
bio->bi_private = NULL;
} }
/** /**
...@@ -354,7 +355,7 @@ int bio_get_nr_vecs(struct block_device *bdev) ...@@ -354,7 +355,7 @@ int bio_get_nr_vecs(struct block_device *bdev)
request_queue_t *q = bdev_get_queue(bdev); request_queue_t *q = bdev_get_queue(bdev);
int nr_pages; int nr_pages;
nr_pages = q->max_sectors >> (PAGE_SHIFT - 9); nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (nr_pages > q->max_phys_segments) if (nr_pages > q->max_phys_segments)
nr_pages = q->max_phys_segments; nr_pages = q->max_phys_segments;
if (nr_pages > q->max_hw_segments) if (nr_pages > q->max_hw_segments)
...@@ -385,13 +386,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, ...@@ -385,13 +386,13 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* cloned bio must not modify vec list * cloned bio must not modify vec list
*/ */
if (unlikely(bio_flagged(bio, BIO_CLONED))) if (unlikely(bio_flagged(bio, BIO_CLONED)))
return 1; return 0;
if (bio->bi_vcnt >= bio->bi_max_vecs) if (bio->bi_vcnt >= bio->bi_max_vecs)
return 1; return 0;
if (((bio->bi_size + len) >> 9) > q->max_sectors) if (((bio->bi_size + len) >> 9) > q->max_sectors)
return 1; return 0;
/* /*
* we might loose a segment or two here, but rather that than * we might loose a segment or two here, but rather that than
...@@ -404,7 +405,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, ...@@ -404,7 +405,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
if (fail_segments) { if (fail_segments) {
if (retried_segments) if (retried_segments)
return 1; return 0;
bio->bi_flags &= ~(1 << BIO_SEG_VALID); bio->bi_flags &= ~(1 << BIO_SEG_VALID);
retried_segments = 1; retried_segments = 1;
...@@ -425,18 +426,24 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, ...@@ -425,18 +426,24 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* depending on offset), it can specify a merge_bvec_fn in the * depending on offset), it can specify a merge_bvec_fn in the
* queue to get further control * queue to get further control
*/ */
if (q->merge_bvec_fn && q->merge_bvec_fn(q, bio, bvec)) { if (q->merge_bvec_fn) {
bvec->bv_page = NULL; /*
bvec->bv_len = 0; * merge_bvec_fn() returns number of bytes it can accept
bvec->bv_offset = 0; * at this offset
return 1; */
if (q->merge_bvec_fn(q, bio, bvec) < len) {
bvec->bv_page = NULL;
bvec->bv_len = 0;
bvec->bv_offset = 0;
return 0;
}
} }
bio->bi_vcnt++; bio->bi_vcnt++;
bio->bi_phys_segments++; bio->bi_phys_segments++;
bio->bi_hw_segments++; bio->bi_hw_segments++;
bio->bi_size += len; bio->bi_size += len;
return 0; return len;
} }
/** /**
...@@ -446,14 +453,15 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, ...@@ -446,14 +453,15 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
* @error: error, if any * @error: error, if any
* *
* Description: * Description:
* bio_endio() will end I/O @bytes_done number of bytes. This may be just * bio_endio() will end I/O on @bytes_done number of bytes. This may be
* a partial part of the bio, or it may be the whole bio. bio_endio() is * just a partial part of the bio, or it may be the whole bio. bio_endio()
* the preferred way to end I/O on a bio, it takes care of decrementing * is the preferred way to end I/O on a bio, it takes care of decrementing
* bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and * bi_size and clearing BIO_UPTODATE on error. @error is 0 on success, and
* and one of the established -Exxxx (-EIO, for instance) error values in * and one of the established -Exxxx (-EIO, for instance) error values in
* case something went wrong. * case something went wrong. Noone should call bi_end_io() directly on
* a bio unless they own it and thus know that it has an end_io function.
**/ **/
int bio_endio(struct bio *bio, unsigned int bytes_done, int error) void bio_endio(struct bio *bio, unsigned int bytes_done, int error)
{ {
if (error) if (error)
clear_bit(BIO_UPTODATE, &bio->bi_flags); clear_bit(BIO_UPTODATE, &bio->bi_flags);
...@@ -465,7 +473,9 @@ int bio_endio(struct bio *bio, unsigned int bytes_done, int error) ...@@ -465,7 +473,9 @@ int bio_endio(struct bio *bio, unsigned int bytes_done, int error)
} }
bio->bi_size -= bytes_done; bio->bi_size -= bytes_done;
return bio->bi_end_io(bio, bytes_done, error);
if (bio->bi_end_io)
bio->bi_end_io(bio, bytes_done, error);
} }
static void __init biovec_init_pools(void) static void __init biovec_init_pools(void)
...@@ -537,7 +547,7 @@ static int __init init_bio(void) ...@@ -537,7 +547,7 @@ static int __init init_bio(void)
return 0; return 0;
} }
module_init(init_bio); subsys_initcall(init_bio);
EXPORT_SYMBOL(bio_alloc); EXPORT_SYMBOL(bio_alloc);
EXPORT_SYMBOL(bio_put); EXPORT_SYMBOL(bio_put);
......
...@@ -417,12 +417,12 @@ dio_bio_add_page(struct dio *dio, struct page *page, ...@@ -417,12 +417,12 @@ dio_bio_add_page(struct dio *dio, struct page *page,
/* Take a ref against the page each time it is placed into a BIO */ /* Take a ref against the page each time it is placed into a BIO */
page_cache_get(page); page_cache_get(page);
if (bio_add_page(dio->bio, page, bv_len, bv_offset)) { if (bio_add_page(dio->bio, page, bv_len, bv_offset) < bv_len) {
dio_bio_submit(dio); dio_bio_submit(dio);
ret = dio_new_bio(dio, blkno); ret = dio_new_bio(dio, blkno);
if (ret == 0) { if (ret == 0) {
ret = bio_add_page(dio->bio, page, bv_len, bv_offset); ret = bio_add_page(dio->bio, page, bv_len, bv_offset);
BUG_ON(ret != 0); BUG_ON(ret < bv_len);
} else { } else {
/* The page didn't make it into a BIO */ /* The page didn't make it into a BIO */
page_cache_release(page); page_cache_release(page);
......
...@@ -176,6 +176,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, ...@@ -176,6 +176,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
unsigned first_hole = blocks_per_page; unsigned first_hole = blocks_per_page;
struct block_device *bdev = NULL; struct block_device *bdev = NULL;
struct buffer_head bh; struct buffer_head bh;
int length;
if (page_has_buffers(page)) if (page_has_buffers(page))
goto confused; goto confused;
...@@ -233,7 +234,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, ...@@ -233,7 +234,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto confused; goto confused;
} }
if (bio_add_page(bio, page, first_hole << blkbits, 0)) { length = first_hole << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
bio = mpage_bio_submit(READ, bio); bio = mpage_bio_submit(READ, bio);
goto alloc_new; goto alloc_new;
} }
...@@ -334,6 +336,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, ...@@ -334,6 +336,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
int boundary = 0; int boundary = 0;
sector_t boundary_block = 0; sector_t boundary_block = 0;
struct block_device *boundary_bdev = NULL; struct block_device *boundary_bdev = NULL;
int length;
if (page_has_buffers(page)) { if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page); struct buffer_head *head = page_buffers(page);
...@@ -467,7 +470,8 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, ...@@ -467,7 +470,8 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
try_to_free_buffers(page); try_to_free_buffers(page);
} }
if (bio_add_page(bio, page, first_unmapped << blkbits, 0)) { length = first_unmapped << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
bio = mpage_bio_submit(WRITE, bio); bio = mpage_bio_submit(WRITE, bio);
goto alloc_new; goto alloc_new;
} }
......
...@@ -109,7 +109,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos) ...@@ -109,7 +109,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
break; break;
} }
if (do_wakeup) { if (do_wakeup) {
wake_up_interruptible(PIPE_WAIT(*inode)); wake_up_interruptible_sync(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
} }
pipe_wait(inode); pipe_wait(inode);
...@@ -117,7 +117,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos) ...@@ -117,7 +117,7 @@ pipe_read(struct file *filp, char *buf, size_t count, loff_t *ppos)
up(PIPE_SEM(*inode)); up(PIPE_SEM(*inode));
/* Signal writers asynchronously that there is more room. */ /* Signal writers asynchronously that there is more room. */
if (do_wakeup) { if (do_wakeup) {
wake_up_interruptible_sync(PIPE_WAIT(*inode)); wake_up_interruptible(PIPE_WAIT(*inode));
kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT); kill_fasync(PIPE_FASYNC_WRITERS(*inode), SIGIO, POLL_OUT);
} }
if (ret > 0) if (ret > 0)
......
...@@ -1448,7 +1448,7 @@ pagebuf_iorequest( /* start real I/O */ ...@@ -1448,7 +1448,7 @@ pagebuf_iorequest( /* start real I/O */
if (nbytes > size) if (nbytes > size)
nbytes = size; nbytes = size;
if (bio_add_page(bio, pb->pb_pages[map_i], nbytes, offset)) if (bio_add_page(bio, pb->pb_pages[map_i], nbytes, offset) < nbytes)
break; break;
offset = 0; offset = 0;
......
...@@ -70,6 +70,7 @@ static __inline__ void ide_init_default_hwifs(void) ...@@ -70,6 +70,7 @@ static __inline__ void ide_init_default_hwifs(void)
int index; int index;
for(index = 0; index < MAX_HWIFS; index++) { for(index = 0; index < MAX_HWIFS; index++) {
memset(&hw, 0, sizeof hw);
ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL); ide_init_hwif_ports(&hw, ide_default_io_base(index), 0, NULL);
hw.irq = ide_default_irq(ide_default_io_base(index)); hw.irq = ide_default_irq(ide_default_io_base(index));
ide_register_hw(&hw, NULL); ide_register_hw(&hw, NULL);
......
...@@ -101,6 +101,7 @@ struct bio { ...@@ -101,6 +101,7 @@ struct bio {
#define BIO_EOF 2 /* out-out-bounds error */ #define BIO_EOF 2 /* out-out-bounds error */
#define BIO_SEG_VALID 3 /* nr_hw_seg valid */ #define BIO_SEG_VALID 3 /* nr_hw_seg valid */
#define BIO_CLONED 4 /* doesn't own data */ #define BIO_CLONED 4 /* doesn't own data */
#define BIO_BOUNCED 5 /* bio is a bounce bio */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/* /*
...@@ -201,7 +202,7 @@ struct bio { ...@@ -201,7 +202,7 @@ struct bio {
extern struct bio *bio_alloc(int, int); extern struct bio *bio_alloc(int, int);
extern void bio_put(struct bio *); extern void bio_put(struct bio *);
extern int bio_endio(struct bio *, unsigned int, int); extern void bio_endio(struct bio *, unsigned int, int);
struct request_queue; struct request_queue;
extern inline int bio_phys_segments(struct request_queue *, struct bio *); extern inline int bio_phys_segments(struct request_queue *, struct bio *);
extern inline int bio_hw_segments(struct request_queue *, struct bio *); extern inline int bio_hw_segments(struct request_queue *, struct bio *);
......
...@@ -39,33 +39,20 @@ void initrd_init(void); ...@@ -39,33 +39,20 @@ void initrd_init(void);
*/ */
extern int end_that_request_first(struct request *, int, int); extern int end_that_request_first(struct request *, int, int);
extern int end_that_request_chunk(struct request *, int, int);
extern void end_that_request_last(struct request *); extern void end_that_request_last(struct request *);
struct request *elv_next_request(request_queue_t *q); struct request *elv_next_request(request_queue_t *q);
static inline void blkdev_dequeue_request(struct request *req) static inline void blkdev_dequeue_request(struct request *req)
{ {
list_del(&req->queuelist); BUG_ON(list_empty(&req->queuelist));
list_del_init(&req->queuelist);
if (req->q) if (req->q)
elv_remove_request(req->q, req); elv_remove_request(req->q, req);
} }
#define _elv_add_request_core(q, rq, where, plug) \
do { \
if ((plug)) \
blk_plug_device((q)); \
(q)->elevator.elevator_add_req_fn((q), (rq), (where)); \
} while (0)
#define _elv_add_request(q, rq, back, p) do { \
if ((back)) \
_elv_add_request_core((q), (rq), (q)->queue_head.prev, (p)); \
else \
_elv_add_request_core((q), (rq), &(q)->queue_head, (p)); \
} while (0)
#define elv_add_request(q, rq, back) _elv_add_request((q), (rq), (back), 1)
#if defined(MAJOR_NR) || defined(IDE_DRIVER) #if defined(MAJOR_NR) || defined(IDE_DRIVER)
#if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR) #if (MAJOR_NR != SCSI_TAPE_MAJOR) && (MAJOR_NR != OSST_MAJOR)
#if !defined(IDE_DRIVER) #if !defined(IDE_DRIVER)
......
...@@ -26,6 +26,8 @@ struct request { ...@@ -26,6 +26,8 @@ struct request {
struct list_head queuelist; /* looking for ->queue? you must _not_ struct list_head queuelist; /* looking for ->queue? you must _not_
* access it directly, use * access it directly, use
* blkdev_dequeue_request! */ * blkdev_dequeue_request! */
int ref_count;
void *elevator_private; void *elevator_private;
unsigned char cmd[16]; unsigned char cmd[16];
...@@ -215,6 +217,7 @@ struct request_queue ...@@ -215,6 +217,7 @@ struct request_queue
unsigned int max_segment_size; unsigned int max_segment_size;
unsigned long seg_boundary_mask; unsigned long seg_boundary_mask;
unsigned int dma_alignment;
wait_queue_head_t queue_wait; wait_queue_head_t queue_wait;
...@@ -254,6 +257,13 @@ struct request_queue ...@@ -254,6 +257,13 @@ struct request_queue
*/ */
#define blk_queue_headactive(q, head_active) #define blk_queue_headactive(q, head_active)
/*
* q->prep_rq_fn return values
*/
#define BLKPREP_OK 0 /* serve it */
#define BLKPREP_KILL 1 /* fatal error, kill */
#define BLKPREP_DEFER 2 /* leave on queue */
extern unsigned long blk_max_low_pfn, blk_max_pfn; extern unsigned long blk_max_low_pfn, blk_max_pfn;
/* /*
...@@ -268,7 +278,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; ...@@ -268,7 +278,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
extern int init_emergency_isa_pool(void); extern int init_emergency_isa_pool(void);
void blk_queue_bounce(request_queue_t *q, struct bio **bio); inline void blk_queue_bounce(request_queue_t *q, struct bio **bio);
#define rq_for_each_bio(bio, rq) \ #define rq_for_each_bio(bio, rq) \
if ((rq->bio)) \ if ((rq->bio)) \
...@@ -339,6 +349,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); ...@@ -339,6 +349,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long);
extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *); extern void blk_queue_assign_lock(request_queue_t *, spinlock_t *);
extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *);
extern void blk_queue_dma_alignment(request_queue_t *, int);
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
...@@ -385,6 +396,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev) ...@@ -385,6 +396,21 @@ static inline int bdev_hardsect_size(struct block_device *bdev)
return queue_hardsect_size(bdev_get_queue(bdev)); return queue_hardsect_size(bdev_get_queue(bdev));
} }
static inline int queue_dma_alignment(request_queue_t *q)
{
int retval = 511;
if (q && q->dma_alignment)
retval = q->dma_alignment;
return retval;
}
static inline int bdev_dma_aligment(struct block_device *bdev)
{
return queue_dma_alignment(bdev_get_queue(bdev));
}
#define blk_finished_io(nsects) do { } while (0) #define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0) #define blk_started_io(nsects) do { } while (0)
......
...@@ -40,8 +40,8 @@ struct elevator_s ...@@ -40,8 +40,8 @@ struct elevator_s
/* /*
* block elevator interface * block elevator interface
*/ */
extern void __elv_add_request(request_queue_t *, struct request *, extern void elv_add_request(request_queue_t *, struct request *, int, int);
struct list_head *); extern void __elv_add_request(request_queue_t *, struct request *, int, int);
extern int elv_merge(request_queue_t *, struct list_head **, struct bio *); extern int elv_merge(request_queue_t *, struct list_head **, struct bio *);
extern void elv_merge_requests(request_queue_t *, struct request *, extern void elv_merge_requests(request_queue_t *, struct request *,
struct request *); struct request *);
...@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *); ...@@ -50,6 +50,9 @@ extern void elv_remove_request(request_queue_t *, struct request *);
extern int elv_queue_empty(request_queue_t *); extern int elv_queue_empty(request_queue_t *);
extern inline struct list_head *elv_get_sort_head(request_queue_t *, struct request *); extern inline struct list_head *elv_get_sort_head(request_queue_t *, struct request *);
#define __elv_add_request_pos(q, rq, pos) \
(q)->elevator.elevator_add_req_fn((q), (rq), (pos))
/* /*
* noop I/O scheduler. always merges, always inserts new request at tail * noop I/O scheduler. always merges, always inserts new request at tail
*/ */
......
...@@ -366,34 +366,13 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int ...@@ -366,34 +366,13 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
return 0; return 0;
} }
void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, int bio_gfp,
mempool_t *pool)
{ {
struct page *page; struct page *page;
struct bio *bio = NULL; struct bio *bio = NULL;
int i, rw = bio_data_dir(*bio_orig), bio_gfp; int i, rw = bio_data_dir(*bio_orig);
struct bio_vec *to, *from; struct bio_vec *to, *from;
mempool_t *pool;
unsigned long pfn = q->bounce_pfn;
int gfp = q->bounce_gfp;
BUG_ON((*bio_orig)->bi_idx);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(gfp & GFP_DMA)) {
if (pfn >= blk_max_pfn)
return;
bio_gfp = GFP_NOHIGHIO;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
bio_gfp = GFP_NOIO;
pool = isa_page_pool;
}
bio_for_each_segment(from, *bio_orig, i) { bio_for_each_segment(from, *bio_orig, i) {
page = from->bv_page; page = from->bv_page;
...@@ -401,7 +380,7 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) ...@@ -401,7 +380,7 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
/* /*
* is destination page below bounce pfn? * is destination page below bounce pfn?
*/ */
if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < pfn) if ((page - page_zone(page)->zone_mem_map) + (page_zone(page)->zone_start_pfn) < q->bounce_pfn)
continue; continue;
/* /*
...@@ -412,11 +391,11 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) ...@@ -412,11 +391,11 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
to = bio->bi_io_vec + i; to = bio->bi_io_vec + i;
to->bv_page = mempool_alloc(pool, gfp); to->bv_page = mempool_alloc(pool, q->bounce_gfp);
to->bv_len = from->bv_len; to->bv_len = from->bv_len;
to->bv_offset = from->bv_offset; to->bv_offset = from->bv_offset;
if (rw & WRITE) { if (rw == WRITE) {
char *vto, *vfrom; char *vto, *vfrom;
vto = page_address(to->bv_page) + to->bv_offset; vto = page_address(to->bv_page) + to->bv_offset;
...@@ -437,15 +416,16 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) ...@@ -437,15 +416,16 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
* pages * pages
*/ */
bio_for_each_segment(from, *bio_orig, i) { bio_for_each_segment(from, *bio_orig, i) {
to = &bio->bi_io_vec[i]; to = bio_iovec_idx(bio, i);
if (!to->bv_page) { if (!to->bv_page) {
to->bv_page = from->bv_page; to->bv_page = from->bv_page;
to->bv_len = from->bv_len; to->bv_len = from->bv_len;
to->bv_offset = to->bv_offset; to->bv_offset = from->bv_offset;
} }
} }
bio->bi_bdev = (*bio_orig)->bi_bdev; bio->bi_bdev = (*bio_orig)->bi_bdev;
bio->bi_flags |= (1 << BIO_BOUNCED);
bio->bi_sector = (*bio_orig)->bi_sector; bio->bi_sector = (*bio_orig)->bi_sector;
bio->bi_rw = (*bio_orig)->bi_rw; bio->bi_rw = (*bio_orig)->bi_rw;
...@@ -454,14 +434,12 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) ...@@ -454,14 +434,12 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
bio->bi_size = (*bio_orig)->bi_size; bio->bi_size = (*bio_orig)->bi_size;
if (pool == page_pool) { if (pool == page_pool) {
if (rw & WRITE) bio->bi_end_io = bounce_end_io_write;
bio->bi_end_io = bounce_end_io_write; if (rw == READ)
else
bio->bi_end_io = bounce_end_io_read; bio->bi_end_io = bounce_end_io_read;
} else { } else {
if (rw & WRITE) bio->bi_end_io = bounce_end_io_write_isa;
bio->bi_end_io = bounce_end_io_write_isa; if (rw == READ)
else
bio->bi_end_io = bounce_end_io_read_isa; bio->bi_end_io = bounce_end_io_read_isa;
} }
...@@ -469,6 +447,37 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) ...@@ -469,6 +447,37 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
*bio_orig = bio; *bio_orig = bio;
} }
inline void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
{
mempool_t *pool;
int bio_gfp;
BUG_ON((*bio_orig)->bi_idx);
/*
* for non-isa bounce case, just check if the bounce pfn is equal
* to or bigger than the highest pfn in the system -- in that case,
* don't waste time iterating over bio segments
*/
if (!(q->bounce_gfp & GFP_DMA)) {
if (q->bounce_pfn >= blk_max_pfn)
return;
bio_gfp = GFP_NOHIGHIO;
pool = page_pool;
} else {
BUG_ON(!isa_page_pool);
bio_gfp = GFP_NOIO;
pool = isa_page_pool;
}
/*
* slow path
*/
__blk_queue_bounce(q, bio_orig, bio_gfp, pool);
}
#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM) #if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_HIGHMEM)
void check_highmem_ptes(void) void check_highmem_ptes(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment