Commit d5bee775 authored by Linus Torvalds's avatar Linus Torvalds
parents 0827f2b6 fddfdeaf
...@@ -139,35 +139,16 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e, ...@@ -139,35 +139,16 @@ static int elevator_attach(request_queue_t *q, struct elevator_type *e,
static char chosen_elevator[16]; static char chosen_elevator[16];
static void elevator_setup_default(void) static int __init elevator_setup(char *str)
{ {
struct elevator_type *e;
/*
* If default has not been set, use the compiled-in selection.
*/
if (!chosen_elevator[0])
strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
/* /*
* Be backwards-compatible with previous kernels, so users * Be backwards-compatible with previous kernels, so users
* won't get the wrong elevator. * won't get the wrong elevator.
*/ */
if (!strcmp(chosen_elevator, "as")) if (!strcmp(str, "as"))
strcpy(chosen_elevator, "anticipatory"); strcpy(chosen_elevator, "anticipatory");
/*
* If the given scheduler is not available, fall back to the default
*/
if ((e = elevator_find(chosen_elevator)))
elevator_put(e);
else else
strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED); strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
}
static int __init elevator_setup(char *str)
{
strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
return 0; return 0;
} }
...@@ -184,14 +165,16 @@ int elevator_init(request_queue_t *q, char *name) ...@@ -184,14 +165,16 @@ int elevator_init(request_queue_t *q, char *name)
q->end_sector = 0; q->end_sector = 0;
q->boundary_rq = NULL; q->boundary_rq = NULL;
elevator_setup_default(); if (name && !(e = elevator_get(name)))
return -EINVAL;
if (!name) if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator)))
name = chosen_elevator; printk("I/O scheduler %s not found\n", chosen_elevator);
e = elevator_get(name); if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) {
if (!e) printk("Default I/O scheduler not found, using no-op\n");
return -EINVAL; e = elevator_get("noop");
}
eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
if (!eq) { if (!eq) {
...@@ -669,8 +652,10 @@ int elv_register(struct elevator_type *e) ...@@ -669,8 +652,10 @@ int elv_register(struct elevator_type *e)
spin_unlock_irq(&elv_list_lock); spin_unlock_irq(&elv_list_lock);
printk(KERN_INFO "io scheduler %s registered", e->elevator_name); printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
if (!strcmp(e->elevator_name, chosen_elevator)) if (!strcmp(e->elevator_name, chosen_elevator) ||
printk(" (default)"); (!*chosen_elevator &&
!strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
printk(" (default)");
printk("\n"); printk("\n");
return 0; return 0;
} }
......
...@@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) ...@@ -304,6 +304,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
* blk_queue_ordered - does this queue support ordered writes * blk_queue_ordered - does this queue support ordered writes
* @q: the request queue * @q: the request queue
* @ordered: one of QUEUE_ORDERED_* * @ordered: one of QUEUE_ORDERED_*
* @prepare_flush_fn: rq setup helper for cache flush ordered writes
* *
* Description: * Description:
* For journalled file systems, doing ordered writes on a commit * For journalled file systems, doing ordered writes on a commit
...@@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered, ...@@ -332,6 +333,7 @@ int blk_queue_ordered(request_queue_t *q, unsigned ordered,
return -EINVAL; return -EINVAL;
} }
q->ordered = ordered;
q->next_ordered = ordered; q->next_ordered = ordered;
q->prepare_flush_fn = prepare_flush_fn; q->prepare_flush_fn = prepare_flush_fn;
...@@ -662,7 +664,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); ...@@ -662,7 +664,7 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
* Enables a low level driver to set an upper limit on the size of * Enables a low level driver to set an upper limit on the size of
* received requests. * received requests.
**/ **/
void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) void blk_queue_max_sectors(request_queue_t *q, unsigned int max_sectors)
{ {
if ((max_sectors << 9) < PAGE_CACHE_SIZE) { if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
...@@ -2632,6 +2634,7 @@ EXPORT_SYMBOL(blk_put_request); ...@@ -2632,6 +2634,7 @@ EXPORT_SYMBOL(blk_put_request);
/** /**
* blk_end_sync_rq - executes a completion event on a request * blk_end_sync_rq - executes a completion event on a request
* @rq: request to complete * @rq: request to complete
* @error: end io status of the request
*/ */
void blk_end_sync_rq(struct request *rq, int error) void blk_end_sync_rq(struct request *rq, int error)
{ {
...@@ -3153,7 +3156,7 @@ static int __end_that_request_first(struct request *req, int uptodate, ...@@ -3153,7 +3156,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
if (blk_fs_request(req) && req->rq_disk) { if (blk_fs_request(req) && req->rq_disk) {
const int rw = rq_data_dir(req); const int rw = rq_data_dir(req);
__disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9); disk_stat_add(req->rq_disk, sectors[rw], nr_bytes >> 9);
} }
total_bytes = bio_nbytes = 0; total_bytes = bio_nbytes = 0;
......
...@@ -1700,6 +1700,31 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) ...@@ -1700,6 +1700,31 @@ static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
return sizeof(def_rw_recovery_mpage); return sizeof(def_rw_recovery_mpage);
} }
/*
* We can turn this into a real blacklist if it's needed, for now just
* blacklist any Maxtor BANC1G10 revision firmware
*/
static int ata_dev_supports_fua(u16 *id)
{
unsigned char model[41], fw[9];
if (!ata_id_has_fua(id))
return 0;
model[40] = '\0';
fw[8] = '\0';
ata_dev_id_string(id, model, ATA_ID_PROD_OFS, sizeof(model) - 1);
ata_dev_id_string(id, fw, ATA_ID_FW_REV_OFS, sizeof(fw) - 1);
if (strncmp(model, "Maxtor", 6))
return 1;
if (strncmp(fw, "BANC1G10", 8))
return 1;
return 0; /* blacklisted */
}
/** /**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
* @args: device IDENTIFY data / SCSI command of interest. * @args: device IDENTIFY data / SCSI command of interest.
...@@ -1797,7 +1822,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, ...@@ -1797,7 +1822,7 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
return 0; return 0;
dpofua = 0; dpofua = 0;
if (ata_id_has_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 && if (ata_dev_supports_fua(args->id) && dev->flags & ATA_DFLAG_LBA48 &&
(!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
dpofua = 1 << 4; dpofua = 1 << 4;
......
...@@ -411,6 +411,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page ...@@ -411,6 +411,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
/** /**
* bio_add_pc_page - attempt to add page to bio * bio_add_pc_page - attempt to add page to bio
* @q: the target queue
* @bio: destination bio * @bio: destination bio
* @page: page to add * @page: page to add
* @len: vec entry length * @len: vec entry length
......
...@@ -392,8 +392,8 @@ struct request_queue ...@@ -392,8 +392,8 @@ struct request_queue
unsigned int nr_congestion_off; unsigned int nr_congestion_off;
unsigned int nr_batching; unsigned int nr_batching;
unsigned short max_sectors; unsigned int max_sectors;
unsigned short max_hw_sectors; unsigned int max_hw_sectors;
unsigned short max_phys_segments; unsigned short max_phys_segments;
unsigned short max_hw_segments; unsigned short max_hw_segments;
unsigned short hardsect_size; unsigned short hardsect_size;
...@@ -697,7 +697,7 @@ extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); ...@@ -697,7 +697,7 @@ extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *);
extern void blk_cleanup_queue(request_queue_t *); extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *); extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64); extern void blk_queue_bounce_limit(request_queue_t *, u64);
extern void blk_queue_max_sectors(request_queue_t *, unsigned short); extern void blk_queue_max_sectors(request_queue_t *, unsigned int);
extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); extern void blk_queue_max_segment_size(request_queue_t *, unsigned int);
......
...@@ -554,7 +554,6 @@ struct Scsi_Host { ...@@ -554,7 +554,6 @@ struct Scsi_Host {
/* /*
* ordered write support * ordered write support
*/ */
unsigned ordered_flush:1;
unsigned ordered_tag:1; unsigned ordered_tag:1;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment