Commit 0e28a0d6 authored by James Simmons's avatar James Simmons

Merge http://linux.bkbits.net/linux-2.5

into heisenberg.transvirtual.com:/tmp/linus-2.5
parents 4b532a20 33e448ef
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 22
SUBLEVEL = 23
EXTRAVERSION =
# We are using a recursive build, so we need to do a little thinking
......
......@@ -161,6 +161,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
init_waitqueue_head(&q->queue_wait);
INIT_LIST_HEAD(&q->plug_list);
}
/**
......
......@@ -129,6 +129,8 @@ struct cardinfo {
*/
struct bio *bio, *currentbio, **biotail;
request_queue_t queue;
struct mm_page {
dma_addr_t page_dma;
struct mm_dma_desc *desc;
......@@ -142,8 +144,6 @@ struct cardinfo {
struct tasklet_struct tasklet;
unsigned int dma_status;
struct tq_struct plug_tq;
struct {
int good;
int warned;
......@@ -293,7 +293,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
* Whenever IO on the active page completes, the Ready page is activated
* and the ex-Active page is clean out and made Ready.
* Otherwise the Ready page is only activated when it becomes full, or
* when mm_unplug_device is called via run_task_queue(&tq_disk).
* when mm_unplug_device is called via blk_run_queues().
*
* If a request arrives while both pages a full, it is queued, and b_rdev is
* overloaded to record whether it was a read or a write.
......@@ -341,8 +341,9 @@ static void mm_start_io(struct cardinfo *card)
offset = ((char*)desc) - ((char*)page->desc);
writel(cpu_to_le32((page->page_dma+offset)&0xffffffff),
card->csr_remap + DMA_DESCRIPTOR_ADDR);
/* if sizeof(dma_addr_t) == 32, this will generate a warning, sorry */
writel(cpu_to_le32((page->page_dma)>>32),
/* Force the value to u64 before shifting otherwise >> 32 is undefined C
* and on some ports will do nothing ! */
writel(cpu_to_le32(((u64)page->page_dma)>>32),
card->csr_remap + DMA_DESCRIPTOR_ADDR + 4);
/* Go, go, go */
......@@ -384,9 +385,11 @@ static inline void reset_page(struct mm_page *page)
static void mm_unplug_device(void *data)
{
struct cardinfo *card = data;
request_queue_t *q = data;
struct cardinfo *card = q->queuedata;
spin_lock_bh(&card->lock);
if (blk_remove_plug(q))
activate(card);
spin_unlock_bh(&card->lock);
}
......@@ -565,8 +568,7 @@ static void process_page(unsigned long data)
*/
static int mm_make_request(request_queue_t *q, struct bio *bio)
{
struct cardinfo *card = &cards[DEVICE_NR(
bio->bi_bdev->bd_dev)];
struct cardinfo *card = q->queuedata;
PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
/* set uptodate now, and clear it if there are any errors */
......@@ -576,9 +578,9 @@ static int mm_make_request(request_queue_t *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
blk_plug_device(q);
spin_unlock_bh(&card->lock);
queue_task(&card->plug_tq, &tq_disk);
return 0;
}
......@@ -1065,11 +1067,12 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
card->bio = NULL;
card->biotail = &card->bio;
blk_queue_make_request(&card->queue, mm_make_request);
card->queue.queuedata = card;
card->queue.unplug_fn = mm_unplug_device;
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
card->plug_tq.sync = 0;
card->plug_tq.routine = &mm_unplug_device;
card->plug_tq.data = card;
card->check_batteries = 0;
mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY);
......@@ -1237,6 +1240,17 @@ static struct pci_driver mm_pci_driver = {
-- mm_init
-----------------------------------------------------------------------------------
*/
static request_queue_t * mm_queue_proc(kdev_t dev)
{
int c = DEVICE_NR(kdev_val(dev));
if (c < MM_MAXCARDS)
return &cards[c].queue;
else
return BLK_DEFAULT_QUEUE(MAJOR_NR);
}
int __init mm_init(void)
{
int retval, i;
......@@ -1276,11 +1290,9 @@ int __init mm_init(void)
mm_gendisk.part = mm_partitions;
mm_gendisk.nr_real = num_cards;
blk_dev[MAJOR_NR].queue = mm_queue_proc;
add_gendisk(&mm_gendisk);
blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR),
mm_make_request);
blk_size[MAJOR_NR] = mm_gendisk.sizes;
for (i = 0; i < num_cards; i++) {
register_disk(&mm_gendisk, mk_kdev(MAJOR_NR, i<<MM_SHIFT), MM_SHIFT,
......
......@@ -33,39 +33,45 @@ static int linear_run (mddev_t *mddev)
linear_conf_t *conf;
struct linear_hash *table;
mdk_rdev_t *rdev;
int size, i, j, nb_zone;
int size, i, nb_zone, cnt;
unsigned int curr_offset;
struct list_head *tmp;
MOD_INC_USE_COUNT;
conf = kmalloc (sizeof (*conf), GFP_KERNEL);
if (!conf)
goto out;
memset(conf, 0, sizeof(*conf));
mddev->private = conf;
if (md_check_ordering(mddev)) {
printk("linear: disks are not ordered, aborting!\n");
goto out;
}
/*
* Find the smallest device.
*/
conf->smallest = NULL;
curr_offset = 0;
ITERATE_RDEV_ORDERED(mddev,rdev,j) {
cnt = 0;
ITERATE_RDEV(mddev,rdev,tmp) {
int j = rdev->sb->this_disk.raid_disk;
dev_info_t *disk = conf->disks + j;
if (j < 0 || j > mddev->sb->raid_disks || disk->bdev) {
printk("linear: disk numbering problem. Aborting!\n");
goto out;
}
disk->dev = rdev->dev;
disk->bdev = rdev->bdev;
atomic_inc(&rdev->bdev->bd_count);
disk->size = rdev->size;
disk->offset = curr_offset;
curr_offset += disk->size;
if (!conf->smallest || (disk->size < conf->smallest->size))
conf->smallest = disk;
cnt++;
}
if (cnt != mddev->sb->raid_disks) {
printk("linear: not enough drives present. Aborting!\n");
goto out;
}
nb_zone = conf->nr_zones =
......@@ -81,10 +87,13 @@ static int linear_run (mddev_t *mddev)
* Here we generate the linear hash table
*/
table = conf->hash_table;
i = 0;
size = 0;
for (j = 0; j < mddev->nb_dev; j++) {
dev_info_t *disk = conf->disks + j;
curr_offset = 0;
for (i = 0; i < cnt; i++) {
dev_info_t *disk = conf->disks + i;
disk->offset = curr_offset;
curr_offset += disk->size;
if (size < 0) {
table[-1].dev1 = disk;
......@@ -130,8 +139,9 @@ static int linear_stop (mddev_t *mddev)
return 0;
}
static int linear_make_request (mddev_t *mddev, int rw, struct bio *bio)
static int linear_make_request (request_queue_t *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
linear_conf_t *conf = mddev_to_conf(mddev);
struct linear_hash *hash;
dev_info_t *tmp_dev;
......@@ -186,7 +196,7 @@ static int linear_status (char *page, mddev_t *mddev)
}
sz += sprintf(page+sz, "\n");
#endif
sz += sprintf(page+sz, " %dk rounding", mddev->param.chunk_size/1024);
sz += sprintf(page+sz, " %dk rounding", mddev->sb->chunk_size/1024);
return sz;
}
......
This diff is collapsed.
......@@ -244,27 +244,19 @@ static int multipath_read_balance (multipath_conf_t *conf)
return 0;
}
static int multipath_make_request (mddev_t *mddev, int rw, struct bio * bio)
static int multipath_make_request (request_queue_t *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
multipath_conf_t *conf = mddev_to_conf(mddev);
struct bio *real_bio;
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
/*
* make_request() can abort the operation when READA is being
* used and no empty request is available.
*
* Currently, just replace the command with READ/WRITE.
*/
if (rw == READA)
rw = READ;
mp_bh = multipath_alloc_mpbh (conf);
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
mp_bh->cmd = rw;
mp_bh->cmd = bio_data_dir(bio);
/*
* read balancing logic:
......@@ -273,7 +265,7 @@ static int multipath_make_request (mddev_t *mddev, int rw, struct bio * bio)
real_bio = bio_clone(bio, GFP_NOIO);
real_bio->bi_bdev = multipath->bdev;
real_bio->bi_rw = rw;
real_bio->bi_rw = bio_data_dir(bio);
real_bio->bi_end_io = multipath_end_request;
real_bio->bi_private = mp_bh;
mp_bh->bio = real_bio;
......@@ -708,7 +700,6 @@ static void multipathd (void *data)
mddev = mp_bh->mddev;
if (mddev->sb_dirty) {
printk(KERN_INFO "dirty sb detected, updating.\n");
mddev->sb_dirty = 0;
md_update_sb(mddev);
}
bio = mp_bh->bio;
......
......@@ -29,21 +29,26 @@
static int create_strip_zones (mddev_t *mddev)
{
int i, c, j, j1, j2;
int i, c, j;
unsigned long current_offset, curr_zone_offset;
raid0_conf_t *conf = mddev_to_conf(mddev);
mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
struct list_head *tmp1, *tmp2;
struct strip_zone *zone;
int cnt;
/*
* The number of 'same size groups'
*/
conf->nr_strip_zones = 0;
ITERATE_RDEV_ORDERED(mddev,rdev1,j1) {
ITERATE_RDEV(mddev,rdev1,tmp1) {
printk("raid0: looking at %s\n", partition_name(rdev1->dev));
c = 0;
ITERATE_RDEV_ORDERED(mddev,rdev2,j2) {
printk("raid0: comparing %s(%ld) with %s(%ld)\n", partition_name(rdev1->dev), rdev1->size, partition_name(rdev2->dev), rdev2->size);
ITERATE_RDEV(mddev,rdev2,tmp2) {
printk("raid0: comparing %s(%ld) with %s(%ld)\n",
partition_name(rdev1->dev), rdev1->size,
partition_name(rdev2->dev), rdev2->size);
if (rdev2 == rdev1) {
printk("raid0: END\n");
break;
......@@ -51,7 +56,7 @@ static int create_strip_zones (mddev_t *mddev)
if (rdev2->size == rdev1->size)
{
/*
* Not unique, dont count it as a new
* Not unique, don't count it as a new
* group
*/
printk("raid0: EQUAL\n");
......@@ -73,22 +78,55 @@ static int create_strip_zones (mddev_t *mddev)
if (!conf->strip_zone)
return 1;
memset(conf->strip_zone, 0,sizeof(struct strip_zone)*
conf->nr_strip_zones);
/* The first zone must contain all devices, so here we check that
* there is a properly alignment of slots to devices and find them all
*/
zone = &conf->strip_zone[0];
cnt = 0;
smallest = NULL;
ITERATE_RDEV(mddev, rdev1, tmp1) {
int j = rdev1->sb->this_disk.raid_disk;
conf->smallest = NULL;
current_offset = 0;
curr_zone_offset = 0;
if (j < 0 || j >= mddev->sb->raid_disks) {
printk("raid0: bad disk number %d - aborting!\n", j);
goto abort;
}
if (zone->dev[j]) {
printk("raid0: multiple devices for %d - aborting!\n", j);
goto abort;
}
zone->dev[j] = rdev1;
if (!smallest || (rdev1->size <smallest->size))
smallest = rdev1;
cnt++;
}
if (cnt != mddev->sb->raid_disks) {
printk("raid0: too few disks (%d of %d) - aborting!\n", cnt,
mddev->sb->raid_disks);
goto abort;
}
zone->nb_dev = cnt;
zone->size = smallest->size * cnt;
zone->zone_offset = 0;
for (i = 0; i < conf->nr_strip_zones; i++)
conf->smallest = zone;
current_offset = smallest->size;
curr_zone_offset = zone->size;
/* now do the other zones */
for (i = 1; i < conf->nr_strip_zones; i++)
{
struct strip_zone *zone = conf->strip_zone + i;
zone = conf->strip_zone + i;
printk("raid0: zone %d\n", i);
zone->dev_offset = current_offset;
smallest = NULL;
c = 0;
ITERATE_RDEV_ORDERED(mddev,rdev,j) {
for (j=0; j<cnt; j++) {
rdev = conf->strip_zone[0].dev[j];
printk("raid0: checking %s ...", partition_name(rdev->dev));
if (rdev->size > current_offset)
{
......@@ -118,6 +156,9 @@ static int create_strip_zones (mddev_t *mddev)
}
printk("raid0: done.\n");
return 0;
abort:
vfree(conf->strip_zone);
return 1;
}
static int raid0_run (mddev_t *mddev)
......@@ -132,11 +173,6 @@ static int raid0_run (mddev_t *mddev)
goto out;
mddev->private = (void *)conf;
if (md_check_ordering(mddev)) {
printk("raid0: disks are not ordered, aborting!\n");
goto out_free_conf;
}
if (create_strip_zones (mddev))
goto out_free_conf;
......@@ -225,8 +261,9 @@ static int raid0_stop (mddev_t *mddev)
* Of course, those facts may not be valid anymore (and surely won't...)
* Hey guys, there's some work out there ;-)
*/
static int raid0_make_request (mddev_t *mddev, int rw, struct bio *bio)
static int raid0_make_request (request_queue_t *q, struct bio *bio)
{
mddev_t *mddev = q->queuedata;
unsigned int sect_in_chunk, chunksize_bits, chunk_size;
raid0_conf_t *conf = mddev_to_conf(mddev);
struct raid0_hash *hash;
......@@ -234,7 +271,7 @@ static int raid0_make_request (mddev_t *mddev, int rw, struct bio *bio)
mdk_rdev_t *tmp_dev;
unsigned long chunk, block, rsect;
chunk_size = mddev->param.chunk_size >> 10;
chunk_size = mddev->sb->chunk_size >> 10;
chunksize_bits = ffz(~chunk_size);
block = bio->bi_sector >> 1;
hash = conf->hash_table + block / conf->smallest->size;
......@@ -323,7 +360,7 @@ static int raid0_status (char *page, mddev_t *mddev)
conf->strip_zone[j].size);
}
#endif
sz += sprintf(page + sz, " %dk chunks", mddev->param.chunk_size/1024);
sz += sprintf(page + sz, " %dk chunks", mddev->sb->chunk_size/1024);
return sz;
}
......
......@@ -334,7 +334,7 @@ static int read_balance(conf_t *conf, struct bio *bio, r1bio_t *r1_bio)
* device if no resync is going on, or below the resync window.
* We take the first readable disk when above the resync window.
*/
if (conf->resync_mirrors && (this_sector + sectors >= conf->next_resync)) {
if (!conf->mddev->in_sync && (this_sector + sectors >= conf->next_resync)) {
/* make sure that disk is operational */
new_disk = 0;
while (!conf->mirrors[new_disk].operational || conf->mirrors[new_disk].write_only) {
......@@ -434,8 +434,9 @@ static void resume_device(conf_t *conf)
spin_unlock_irq(&conf->resync_lock);
}
static int make_request(mddev_t *mddev, int rw, struct bio * bio)
static int make_request(request_queue_t *q, struct bio * bio)
{
mddev_t *mddev = q->queuedata;
conf_t *conf = mddev_to_conf(mddev);
mirror_info_t *mirror;
r1bio_t *r1_bio;
......@@ -456,20 +457,16 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio)
* make_request() can abort the operation when READA is being
* used and no empty request is available.
*
* Currently, just replace the command with READ.
*/
if (rw == READA)
rw = READ;
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = bio;
r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector;
r1_bio->cmd = rw;
r1_bio->cmd = bio_data_dir(bio);
if (rw == READ) {
if (r1_bio->cmd == READ) {
/*
* read balancing logic:
*/
......@@ -483,7 +480,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio)
read_bio->bi_sector = r1_bio->sector;
read_bio->bi_bdev = mirror->bdev;
read_bio->bi_end_io = end_request;
read_bio->bi_rw = rw;
read_bio->bi_rw = r1_bio->cmd;
read_bio->bi_private = r1_bio;
generic_make_request(read_bio);
......@@ -507,7 +504,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio)
mbio->bi_sector = r1_bio->sector;
mbio->bi_bdev = conf->mirrors[i].bdev;
mbio->bi_end_io = end_request;
mbio->bi_rw = rw;
mbio->bi_rw = r1_bio->cmd;
mbio->bi_private = r1_bio;
sum_bios++;
......@@ -656,6 +653,9 @@ static void close_sync(conf_t *conf)
if (conf->barrier) BUG();
if (waitqueue_active(&conf->wait_idle)) BUG();
if (waitqueue_active(&conf->wait_resume)) BUG();
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
}
static int diskop(mddev_t *mddev, mdp_disk_t **d, int state)
......@@ -772,7 +772,6 @@ static int diskop(mddev_t *mddev, mdp_disk_t **d, int state)
* Deactivate a spare disk:
*/
case DISKOP_SPARE_INACTIVE:
close_sync(conf);
sdisk = conf->mirrors + spare_disk;
sdisk->operational = 0;
sdisk->write_only = 0;
......@@ -785,7 +784,6 @@ static int diskop(mddev_t *mddev, mdp_disk_t **d, int state)
* property)
*/
case DISKOP_SPARE_ACTIVE:
close_sync(conf);
sdisk = conf->mirrors + spare_disk;
fdisk = conf->mirrors + failed_disk;
......@@ -919,10 +917,6 @@ static int diskop(mddev_t *mddev, mdp_disk_t **d, int state)
}
abort:
spin_unlock_irq(&conf->device_lock);
if (state == DISKOP_SPARE_ACTIVE || state == DISKOP_SPARE_INACTIVE) {
mempool_destroy(conf->r1buf_pool);
conf->r1buf_pool = NULL;
}
print_conf(conf);
return err;
......@@ -1012,7 +1006,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* we read from here, no need to write
*/
continue;
if (i < conf->raid_disks && !conf->resync_mirrors)
if (i < conf->raid_disks && mddev->in_sync)
/*
* don't need to write this we are just rebuilding
*/
......@@ -1088,7 +1082,6 @@ static void raid1d(void *data)
conf = mddev_to_conf(mddev);
if (mddev->sb_dirty) {
printk(KERN_INFO "raid1: dirty sb detected, updating.\n");
mddev->sb_dirty = 0;
md_update_sb(mddev);
}
bio = r1_bio->master_bio;
......@@ -1118,31 +1111,6 @@ static void raid1d(void *data)
spin_unlock_irqrestore(&retry_list_lock, flags);
}
/*
* Private kernel thread to reconstruct mirrors after an unclean
* shutdown.
*/
static void raid1syncd(void *data)
{
conf_t *conf = data;
mddev_t *mddev = conf->mddev;
if (!conf->resync_mirrors)
return;
if (conf->resync_mirrors == 2)
return;
down(&mddev->recovery_sem);
if (!md_do_sync(mddev, NULL)) {
/*
* Only if everything went Ok.
*/
conf->resync_mirrors = 0;
}
close_sync(conf);
up(&mddev->recovery_sem);
}
static int init_resync(conf_t *conf)
{
......@@ -1177,9 +1145,16 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
sector_t max_sector, nr_sectors;
int disk, partial;
if (!sector_nr)
if (sector_nr == 0)
if (init_resync(conf))
return -ENOMEM;
max_sector = mddev->sb->size << 1;
if (sector_nr >= max_sector) {
close_sync(conf);
return 0;
}
/*
* If there is non-resync activity waiting for us then
* put in a delay to throttle resync.
......@@ -1216,10 +1191,6 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
r1_bio->sector = sector_nr;
r1_bio->cmd = SPECIAL;
max_sector = mddev->sb->size << 1;
if (sector_nr >= max_sector)
BUG();
bio = r1_bio->master_bio;
nr_sectors = RESYNC_BLOCK_SIZE >> 9;
if (max_sector - sector_nr < nr_sectors)
......@@ -1302,7 +1273,6 @@ static int run(mddev_t *mddev)
mdp_disk_t *descriptor;
mdk_rdev_t *rdev;
struct list_head *tmp;
int start_recovery = 0;
MOD_INC_USE_COUNT;
......@@ -1454,10 +1424,6 @@ static int run(mddev_t *mddev)
conf->last_used = j;
if (conf->working_disks != sb->raid_disks) {
printk(KERN_ALERT "raid1: md%d, not all disks are operational -- trying to recover array\n", mdidx(mddev));
start_recovery = 1;
}
{
const char * name = "raid1d";
......@@ -1469,20 +1435,6 @@ static int run(mddev_t *mddev)
}
}
if (!start_recovery && !(sb->state & (1 << MD_SB_CLEAN)) &&
(conf->working_disks > 1)) {
const char * name = "raid1syncd";
conf->resync_thread = md_register_thread(raid1syncd, conf, name);
if (!conf->resync_thread) {
printk(THREAD_ERROR, mdidx(mddev));
goto out_free_conf;
}
printk(START_RESYNC, mdidx(mddev));
conf->resync_mirrors = 1;
md_wakeup_thread(conf->resync_thread);
}
/*
* Regenerate the "device is in sync with the raid set" bit for
......@@ -1499,10 +1451,6 @@ static int run(mddev_t *mddev)
}
sb->active_disks = conf->working_disks;
if (start_recovery)
md_recover_arrays();
printk(ARRAY_IS_ACTIVE, mdidx(mddev), sb->active_disks, sb->raid_disks);
/*
* Ok, everything is just fine now
......@@ -1522,47 +1470,12 @@ static int run(mddev_t *mddev)
return -EIO;
}
static int stop_resync(mddev_t *mddev)
{
conf_t *conf = mddev_to_conf(mddev);
if (conf->resync_thread) {
if (conf->resync_mirrors) {
conf->resync_mirrors = 2;
md_interrupt_thread(conf->resync_thread);
printk(KERN_INFO "raid1: mirror resync was not fully finished, restarting next time.\n");
return 1;
}
return 0;
}
return 0;
}
static int restart_resync(mddev_t *mddev)
{
conf_t *conf = mddev_to_conf(mddev);
if (conf->resync_mirrors) {
if (!conf->resync_thread) {
MD_BUG();
return 0;
}
conf->resync_mirrors = 1;
md_wakeup_thread(conf->resync_thread);
return 1;
}
return 0;
}
static int stop(mddev_t *mddev)
{
conf_t *conf = mddev_to_conf(mddev);
int i;
md_unregister_thread(conf->thread);
if (conf->resync_thread)
md_unregister_thread(conf->resync_thread);
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
for (i = 0; i < MD_SB_DISKS; i++)
......@@ -1583,8 +1496,6 @@ static mdk_personality_t raid1_personality =
status: status,
error_handler: error,
diskop: diskop,
stop_resync: stop_resync,
restart_resync: restart_resync,
sync_request: sync_request
};
......
......@@ -634,7 +634,6 @@ static void copy_data(int frombio, struct bio *bio,
else
page_offset = (signed)(sector - bio->bi_sector) * -512;
bio_for_each_segment(bvl, bio, i) {
char *ba = __bio_kmap(bio, i);
int len = bio_iovec_idx(bio,i)->bv_len;
int clen;
int b_offset = 0;
......@@ -649,13 +648,16 @@ static void copy_data(int frombio, struct bio *bio,
clen = STRIPE_SIZE - page_offset;
else clen = len;
if (len > 0) {
if (clen > 0) {
char *ba = __bio_kmap(bio, i);
if (frombio)
memcpy(pa+page_offset, ba+b_offset, clen);
else
memcpy(ba+b_offset, pa+page_offset, clen);
}
__bio_kunmap(bio, i);
}
if (clen < len) /* hit end of page */
break;
page_offset += len;
}
}
......@@ -810,6 +812,8 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
spin_unlock_irq(&conf->device_lock);
spin_unlock(&sh->lock);
PRINTK("added bi b#%lu to stripe s#%lu, disk %d.\n", bi->bi_sector, sh->sector, dd_idx);
if (forwrite) {
/* check if page is coverred */
sector_t sector = sh->dev[dd_idx].sector;
......@@ -823,8 +827,6 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
}
PRINTK("added bi b#%lu to stripe s#%lu, disk %d.\n", bi->bi_sector, sh->sector, dd_idx);
}
......@@ -1036,7 +1038,7 @@ static void handle_stripe(struct stripe_head *sh)
) &&
!test_bit(R5_UPTODATE, &dev->flags)) {
if (conf->disks[i].operational
/* && !(conf->resync_parity && i == sh->pd_idx) */
/* && !(!mddev->insync && i == sh->pd_idx) */
)
rmw++;
else rmw += 2*disks; /* cannot read it */
......@@ -1226,14 +1228,15 @@ static inline void raid5_activate_delayed(raid5_conf_t *conf)
}
static void raid5_unplug_device(void *data)
{
raid5_conf_t *conf = (raid5_conf_t *)data;
request_queue_t *q = data;
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
unsigned long flags;
spin_lock_irqsave(&conf->device_lock, flags);
if (blk_remove_plug(q))
raid5_activate_delayed(conf);
conf->plugged = 0;
md_wakeup_thread(conf->thread);
spin_unlock_irqrestore(&conf->device_lock, flags);
......@@ -1242,31 +1245,21 @@ static void raid5_unplug_device(void *data)
static inline void raid5_plug_device(raid5_conf_t *conf)
{
spin_lock_irq(&conf->device_lock);
if (list_empty(&conf->delayed_list))
if (!conf->plugged) {
conf->plugged = 1;
queue_task(&conf->plug_tq, &tq_disk);
}
blk_plug_device(&conf->mddev->queue);
spin_unlock_irq(&conf->device_lock);
}
static int make_request (mddev_t *mddev, int rw, struct bio * bi)
static int make_request (request_queue_t *q, struct bio * bi)
{
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev_to_conf(mddev);
const unsigned int raid_disks = conf->raid_disks;
const unsigned int data_disks = raid_disks - 1;
unsigned int dd_idx, pd_idx;
sector_t new_sector;
sector_t logical_sector, last_sector;
int read_ahead = 0;
struct stripe_head *sh;
if (rw == READA) {
rw = READ;
read_ahead=1;
}
logical_sector = bi->bi_sector & ~(STRIPE_SECTORS-1);
last_sector = bi->bi_sector + (bi->bi_size>>9);
......@@ -1281,10 +1274,10 @@ static int make_request (mddev_t *mddev, int rw, struct bio * bi)
PRINTK("raid5: make_request, sector %ul logical %ul\n",
new_sector, logical_sector);
sh = get_active_stripe(conf, new_sector, pd_idx, read_ahead);
sh = get_active_stripe(conf, new_sector, pd_idx, (bi->bi_rw&RWA_MASK));
if (sh) {
add_stripe_bio(sh, bi, dd_idx, rw);
add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK));
raid5_plug_device(conf);
handle_stripe(sh);
......@@ -1311,6 +1304,10 @@ static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
int raid_disks = conf->raid_disks;
int data_disks = raid_disks-1;
if (sector_nr >= mddev->sb->size <<1)
/* just being told to finish up .. nothing to do */
return 0;
first_sector = raid5_compute_sector(stripe*data_disks*sectors_per_chunk
+ chunk_offset, raid_disks, data_disks, &dd_idx, &pd_idx, conf);
sh = get_active_stripe(conf, sector_nr, pd_idx, 0);
......@@ -1343,17 +1340,15 @@ static void raid5d (void *data)
handled = 0;
if (mddev->sb_dirty) {
mddev->sb_dirty = 0;
if (mddev->sb_dirty)
md_update_sb(mddev);
}
spin_lock_irq(&conf->device_lock);
while (1) {
struct list_head *first;
if (list_empty(&conf->handle_list) &&
atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD &&
!conf->plugged &&
!blk_queue_plugged(&mddev->queue) &&
!list_empty(&conf->delayed_list))
raid5_activate_delayed(conf);
......@@ -1382,31 +1377,6 @@ static void raid5d (void *data)
PRINTK("--- raid5d inactive\n");
}
/*
* Private kernel thread for parity reconstruction after an unclean
* shutdown. Reconstruction on spare drives in case of a failed drive
* is done by the generic mdsyncd.
*/
static void raid5syncd (void *data)
{
raid5_conf_t *conf = data;
mddev_t *mddev = conf->mddev;
if (!conf->resync_parity)
return;
if (conf->resync_parity == 2)
return;
down(&mddev->recovery_sem);
if (md_do_sync(mddev,NULL)) {
up(&mddev->recovery_sem);
printk("raid5: resync aborted!\n");
return;
}
conf->resync_parity = 0;
up(&mddev->recovery_sem);
printk("raid5: resync finished.\n");
}
static int run (mddev_t *mddev)
{
raid5_conf_t *conf;
......@@ -1416,7 +1386,6 @@ static int run (mddev_t *mddev)
mdk_rdev_t *rdev;
struct disk_info *disk;
struct list_head *tmp;
int start_recovery = 0;
MOD_INC_USE_COUNT;
......@@ -1444,10 +1413,7 @@ static int run (mddev_t *mddev)
atomic_set(&conf->active_stripes, 0);
atomic_set(&conf->preread_active_stripes, 0);
conf->plugged = 0;
conf->plug_tq.sync = 0;
conf->plug_tq.routine = &raid5_unplug_device;
conf->plug_tq.data = conf;
mddev->queue.unplug_fn = raid5_unplug_device;
PRINTK("raid5: run(md%d) called.\n", mdidx(mddev));
......@@ -1571,9 +1537,10 @@ static int run (mddev_t *mddev)
goto abort;
}
if (conf->working_disks != sb->raid_disks) {
printk(KERN_ALERT "raid5: md%d, not all disks are operational -- trying to recover array\n", mdidx(mddev));
start_recovery = 1;
if (conf->failed_disks == 1 &&
!(sb->state & (1<<MD_SB_CLEAN))) {
printk(KERN_ERR "raid5: cannot start dirty degraded array for md%d\n", mdidx(mddev));
goto abort;
}
{
......@@ -1587,10 +1554,11 @@ static int run (mddev_t *mddev)
}
memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
conf->raid_disks * ((sizeof(struct buffer_head) + PAGE_SIZE))) / 1024;
conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
if (grow_stripes(conf, conf->max_nr_stripes)) {
printk(KERN_ERR "raid5: couldn't allocate %dkB for buffers\n", memory);
shrink_stripes(conf);
md_unregister_thread(conf->thread);
goto abort;
} else
printk(KERN_INFO "raid5: allocated %dkB for md%d\n", memory, mdidx(mddev));
......@@ -1615,23 +1583,6 @@ static int run (mddev_t *mddev)
else
printk(KERN_ALERT "raid5: raid level %d set md%d active with %d out of %d devices, algorithm %d\n", conf->level, mdidx(mddev), sb->active_disks, sb->raid_disks, conf->algorithm);
if (!start_recovery && !(sb->state & (1 << MD_SB_CLEAN))) {
const char * name = "raid5syncd";
conf->resync_thread = md_register_thread(raid5syncd, conf,name);
if (!conf->resync_thread) {
printk(KERN_ERR "raid5: couldn't allocate thread for md%d\n", mdidx(mddev));
goto abort;
}
printk("raid5: raid set md%d not clean; reconstructing parity\n", mdidx(mddev));
conf->resync_parity = 1;
md_wakeup_thread(conf->resync_thread);
}
print_raid5_conf(conf);
if (start_recovery)
md_recover_arrays();
print_raid5_conf(conf);
/* Ok, everything is just fine now */
......@@ -1650,48 +1601,12 @@ static int run (mddev_t *mddev)
return -EIO;
}
static int stop_resync (mddev_t *mddev)
{
raid5_conf_t *conf = mddev_to_conf(mddev);
mdk_thread_t *thread = conf->resync_thread;
if (thread) {
if (conf->resync_parity) {
conf->resync_parity = 2;
md_interrupt_thread(thread);
printk(KERN_INFO "raid5: parity resync was not fully finished, restarting next time.\n");
return 1;
}
return 0;
}
return 0;
}
static int restart_resync (mddev_t *mddev)
{
raid5_conf_t *conf = mddev_to_conf(mddev);
if (conf->resync_parity) {
if (!conf->resync_thread) {
MD_BUG();
return 0;
}
printk("raid5: waking up raid5resync.\n");
conf->resync_parity = 1;
md_wakeup_thread(conf->resync_thread);
return 1;
} else
printk("raid5: no restart-resync needed.\n");
return 0;
}
static int stop (mddev_t *mddev)
{
raid5_conf_t *conf = (raid5_conf_t *) mddev->private;
if (conf->resync_thread)
md_unregister_thread(conf->resync_thread);
md_unregister_thread(conf->thread);
shrink_stripes(conf);
free_pages((unsigned long) conf->stripe_hashtbl, HASH_PAGES_ORDER);
......@@ -2066,8 +1981,6 @@ static mdk_personality_t raid5_personality=
status: status,
error_handler: error,
diskop: diskop,
stop_resync: stop_resync,
restart_resync: restart_resync,
sync_request: sync_request
};
......
......@@ -106,9 +106,7 @@ MODULE_PARM_DESC(ulangid, "The optional preferred USB Language ID for all device
MODULE_AUTHOR("NAGANO Daisuke <breeze.nagano@nifty.ne.jp>");
MODULE_DESCRIPTION("USB-MIDI driver");
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,14)
MODULE_LICENSE("GPL");
#endif
/* ------------------------------------------------------------------------- */
......
......@@ -74,9 +74,9 @@ static void urb_print (struct urb * urb, char * str, int small)
static inline struct ed *
dma_to_ed (struct ohci_hcd *hc, dma_addr_t ed_dma);
#ifdef OHCI_VERBOSE_DEBUG
/* print non-empty branches of the periodic ed tree */
void ohci_dump_periodic (struct ohci_hcd *ohci, char *label)
static void __attribute__ ((unused))
ohci_dump_periodic (struct ohci_hcd *ohci, char *label)
{
int i, j;
u32 *ed_p;
......@@ -101,7 +101,6 @@ void ohci_dump_periodic (struct ohci_hcd *ohci, char *label)
printk (KERN_DEBUG "%s, ohci %s, empty periodic schedule\n",
label, ohci->hcd.self.bus_name);
}
#endif
static void ohci_dump_intr_mask (char *label, __u32 mask)
{
......@@ -241,6 +240,97 @@ static void ohci_dump (struct ohci_hcd *controller, int verbose)
ohci_dump_roothub (controller, 1);
}
static void ohci_dump_td (char *label, struct td *td)
{
u32 tmp = le32_to_cpup (&td->hwINFO);
dbg ("%s td %p; urb %p index %d; hw next td %08x",
label, td,
td->urb, td->index,
le32_to_cpup (&td->hwNextTD));
if ((tmp & TD_ISO) == 0) {
char *toggle, *pid;
u32 cbp, be;
switch (tmp & TD_T) {
case TD_T_DATA0: toggle = "DATA0"; break;
case TD_T_DATA1: toggle = "DATA1"; break;
case TD_T_TOGGLE: toggle = "(CARRY)"; break;
default: toggle = "(?)"; break;
}
switch (tmp & TD_DP) {
case TD_DP_SETUP: pid = "SETUP"; break;
case TD_DP_IN: pid = "IN"; break;
case TD_DP_OUT: pid = "OUT"; break;
default: pid = "(bad pid)"; break;
}
dbg (" info %08x CC=%x %s DI=%d %s %s", tmp,
TD_CC_GET(tmp), /* EC, */ toggle,
(tmp & TD_DI) >> 21, pid,
(tmp & TD_R) ? "R" : "");
cbp = le32_to_cpup (&td->hwCBP);
be = le32_to_cpup (&td->hwBE);
dbg (" cbp %08x be %08x (len %d)", cbp, be,
cbp ? (be + 1 - cbp) : 0);
} else {
unsigned i;
dbg (" info %08x CC=%x DI=%d START=%04x", tmp,
TD_CC_GET(tmp), /* FC, */
(tmp & TD_DI) >> 21,
tmp & 0x0000ffff);
dbg (" bp0 %08x be %08x",
le32_to_cpup (&td->hwCBP) & ~0x0fff,
le32_to_cpup (&td->hwBE));
for (i = 0; i < MAXPSW; i++) {
dbg (" psw [%d] = %2x", i,
le16_to_cpu (td->hwPSW [i]));
}
}
}
/* caller MUST own hcd spinlock if verbose is set! */
static void __attribute__((unused))
ohci_dump_ed (struct ohci_hcd *ohci, char *label, struct ed *ed, int verbose)
{
u32 tmp = ed->hwINFO;
char *type = "";
dbg ("%s: %s, ed %p state 0x%x type %d; next ed %08x",
ohci->hcd.self.bus_name, label,
ed, ed->state, ed->type,
le32_to_cpup (&ed->hwNextED));
switch (tmp & (ED_IN|ED_OUT)) {
case ED_OUT: type = "-OUT"; break;
case ED_IN: type = "-IN"; break;
/* else from TDs ... control */
}
dbg (" info %08x MAX=%d%s%s%s EP=%d%s DEV=%d", le32_to_cpu (tmp),
0x0fff & (le32_to_cpu (tmp) >> 16),
(tmp & ED_ISO) ? " ISO" : "",
(tmp & ED_SKIP) ? " SKIP" : "",
(tmp & ED_LOWSPEED) ? " LOW" : "",
0x000f & (le32_to_cpu (tmp) >> 7),
type,
0x007f & le32_to_cpu (tmp));
dbg (" tds: head %08x%s%s tail %08x%s",
tmp = le32_to_cpup (&ed->hwHeadP),
(ed->hwHeadP & ED_H) ? " HALT" : "",
(ed->hwHeadP & ED_C) ? " CARRY" : "",
le32_to_cpup (&ed->hwTailP),
verbose ? "" : " (not listing)");
if (verbose) {
struct list_head *tmp;
/* use ed->td_list because HC concurrently modifies
* hwNextTD as it accumulates ed_donelist.
*/
list_for_each (tmp, &ed->td_list) {
struct td *td;
td = list_entry (tmp, struct td, td_list);
ohci_dump_td (" ->", td);
}
}
}
#endif
......@@ -100,7 +100,7 @@
* - lots more testing!!
*/
#define DRIVER_VERSION "2002-Jun-10"
#define DRIVER_VERSION "2002-Jun-15"
#define DRIVER_AUTHOR "Roman Weissgaerber <weissg@vienna.at>, David Brownell"
#define DRIVER_DESC "USB 1.1 'Open' Host Controller (OHCI) Driver"
......@@ -145,8 +145,8 @@ static int ohci_urb_enqueue (
urb_print (urb, "SUB", usb_pipein (pipe));
#endif
/* every endpoint has a ed, locate and fill it */
if (! (ed = ep_add_ed (urb->dev, pipe, urb->interval, 1, mem_flags)))
/* every endpoint has a ed, locate and maybe (re)initialize it */
if (! (ed = ed_get (ohci, urb->dev, pipe, urb->interval)))
return -ENOMEM;
/* for the private part of the URB we need the number of TDs (size) */
......@@ -498,6 +498,7 @@ static void ohci_irq (struct usb_hcd *hcd)
struct ohci_regs *regs = ohci->regs;
int ints;
/* we can eliminate a (slow) readl() if _only_ WDH caused this irq */
if ((ohci->hcca->done_head != 0)
&& ! (le32_to_cpup (&ohci->hcca->done_head) & 0x01)) {
ints = OHCI_INTR_WDH;
......
......@@ -221,6 +221,7 @@ ed_alloc (struct ohci_hcd *hc, int mem_flags)
ed = pci_pool_alloc (hc->ed_cache, mem_flags, &dma);
if (ed) {
memset (ed, 0, sizeof (*ed));
INIT_LIST_HEAD (&ed->td_list);
ed->dma = dma;
/* hash it for later reverse mapping */
if (!hash_add_ed (hc, ed, mem_flags)) {
......
......@@ -131,8 +131,9 @@ static void intr_resub (struct ohci_hcd *hc, struct urb *urb)
/* search for the right branch to insert an interrupt ed into the int tree
* do some load balancing;
* returns the branch and
* sets the interval to interval = 2^integer (ld (interval))
* returns the branch
* FIXME allow for failure, when there's no bandwidth left;
* and consider iso loads too
*/
static int ep_int_balance (struct ohci_hcd *ohci, int interval, int load)
{
......@@ -152,19 +153,6 @@ static int ep_int_balance (struct ohci_hcd *ohci, int interval, int load)
/*-------------------------------------------------------------------------*/
/* 2^int ( ld (inter)) */
static int ep_2_n_interval (int inter)
{
int i;
for (i = 0; ((inter >> i) > 1 ) && (i < 5); i++)
continue;
return 1 << i;
}
/*-------------------------------------------------------------------------*/
/* the int tree is a binary tree
* in order to process it sequentially the indexes of the branches have
* to be mapped the mapping reverses the bits of a word of num_bits length
......@@ -230,8 +218,7 @@ static int ep_link (struct ohci_hcd *ohci, struct ed *edi)
case PIPE_INTERRUPT:
load = ed->intriso.intr_info.int_load;
interval = ep_2_n_interval (ed->intriso.intr_info.int_period);
ed->interval = interval;
interval = ed->interval;
int_branch = ep_int_balance (ohci, interval, load);
ed->intriso.intr_info.int_branch = int_branch;
......@@ -301,6 +288,7 @@ static void periodic_unlink (
* just the link to the ed is unlinked.
* the link from the ed still points to another operational ed or 0
* so the HC can eventually finish the processing of the unlinked ed
* caller guarantees the ED has no active TDs.
*/
static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
{
......@@ -387,84 +375,99 @@ static int start_ed_unlink (struct ohci_hcd *ohci, struct ed *ed)
/*-------------------------------------------------------------------------*/
/* (re)init an endpoint; this _should_ be done once at the
* usb_set_configuration command, but the USB stack is a bit stateless
* so we do it at every transaction.
* if the state of the ed is ED_NEW then a dummy td is added and the
* state is changed to ED_UNLINK
* in all other cases the state is left unchanged
* the ed info fields are set even though most of them should
* not change
/* get and maybe (re)init an endpoint. init _should_ be done only as part
* of usb_set_configuration() or usb_set_interface() ... but the USB stack
* isn't very stateful, so we re-init whenever the HC isn't looking.
*/
static struct ed *ep_add_ed (
static struct ed *ed_get (
struct ohci_hcd *ohci,
struct usb_device *udev,
unsigned int pipe,
int interval,
int load,
int mem_flags
int interval
) {
struct ohci_hcd *ohci = hcd_to_ohci (udev->bus->hcpriv);
int is_out = !usb_pipein (pipe);
int type = usb_pipetype (pipe);
int bus_msecs = 0;
struct hcd_dev *dev = (struct hcd_dev *) udev->hcpriv;
struct td *td;
struct ed *ed;
unsigned ep;
unsigned long flags;
spin_lock_irqsave (&ohci->lock, flags);
ep = usb_pipeendpoint (pipe) << 1;
if (!usb_pipecontrol (pipe) && usb_pipeout (pipe))
if (type != PIPE_CONTROL && is_out)
ep |= 1;
if (type == PIPE_INTERRUPT)
bus_msecs = usb_calc_bus_time (udev->speed, !is_out, 0,
usb_maxpacket (udev, pipe, is_out)) / 1000;
spin_lock_irqsave (&ohci->lock, flags);
if (!(ed = dev->ep [ep])) {
ed = ed_alloc (ohci, SLAB_ATOMIC);
if (!ed) {
/* out of memory */
spin_unlock_irqrestore (&ohci->lock, flags);
return NULL;
goto done;
}
dev->ep [ep] = ed;
}
if (ed->state & ED_URB_DEL) {
/* pending unlink request */
spin_unlock_irqrestore (&ohci->lock, flags);
return NULL;
ed = 0;
goto done;
}
if (ed->state == ED_NEW) {
struct td *td;
ed->hwINFO = ED_SKIP;
/* dummy td; end of td list for ed */
td = td_alloc (ohci, SLAB_ATOMIC);
if (!td) {
/* out of memory */
spin_unlock_irqrestore (&ohci->lock, flags);
return NULL;
ed = 0;
goto done;
}
ed->dummy = td;
ed->hwTailP = cpu_to_le32 (td->td_dma);
ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */
ed->state = ED_UNLINK;
ed->type = usb_pipetype (pipe);
ed->type = type;
}
// FIXME: don't do this if it's linked to the HC, or without knowing it's
// safe to clobber state/mode info tied to (previous) config/altsetting.
// (but dev0/ep0, used by set_address, must get clobbered)
ed->hwINFO = cpu_to_le32 (usb_pipedevice (pipe)
| usb_pipeendpoint (pipe) << 7
| (usb_pipeisoc (pipe)? 0x8000: 0)
| (usb_pipecontrol (pipe)
? 0: (usb_pipeout (pipe)? 0x800: 0x1000))
| (udev->speed == USB_SPEED_LOW) << 13
| usb_maxpacket (udev, pipe, usb_pipeout (pipe))
<< 16);
if (ed->type == PIPE_INTERRUPT && ed->state == ED_UNLINK) {
ed->intriso.intr_info.int_period = interval;
ed->intriso.intr_info.int_load = load;
/* FIXME: Don't do this without knowing it's safe to clobber this
* state/mode info. Currently the upper layers don't support such
* guarantees; we're lucky changing config/altsetting is rare.
*/
if (ed->state == ED_UNLINK) {
u32 info;
info = usb_pipedevice (pipe);
info |= (ep >> 1) << 7;
info |= usb_maxpacket (udev, pipe, is_out) << 16;
info = cpu_to_le32 (info);
if (udev->speed == USB_SPEED_LOW)
info |= ED_LOWSPEED;
/* control transfers store pids in tds */
if (type != PIPE_CONTROL) {
info |= is_out ? ED_OUT : ED_IN;
if (type == PIPE_ISOCHRONOUS)
info |= ED_ISO;
if (type == PIPE_INTERRUPT) {
ed->intriso.intr_info.int_load = bus_msecs;
if (interval > 32)
interval = 32;
}
}
ed->hwINFO = info;
/* value ignored except on periodic EDs, where
* we know it's already a power of 2
*/
ed->interval = interval;
}
done:
spin_unlock_irqrestore (&ohci->lock, flags);
return ed;
}
......@@ -736,8 +739,8 @@ static void td_done (struct urb *urb, struct td *td)
urb->iso_frame_desc [td->index].status = cc_to_error [cc];
if (cc != 0)
dbg (" urb %p iso TD %d len %d CC %d",
urb, td->index, dlen, cc);
dbg (" urb %p iso TD %p (%d) len %d CC %d",
urb, td, 1 + td->index, dlen, cc);
/* BULK, INT, CONTROL ... drivers see aggregate length/status,
* except that "setup" bytes aren't counted and "short" transfers
......@@ -776,9 +779,13 @@ static void td_done (struct urb *urb, struct td *td)
- td->data_dma;
}
#ifdef VERBOSE_DEBUG
if (cc != 0)
dbg (" urb %p TD %d CC %d, len=%d",
urb, td->index, cc, urb->actual_length);
dbg (" urb %p TD %p (%d) CC %d, len=%d/%d",
urb, td, 1 + td->index, cc,
urb->actual_length,
urb->transfer_buffer_length);
#endif
}
}
......@@ -812,8 +819,8 @@ static struct td *dl_reverse_done_list (struct ohci_hcd *ohci)
if (urb_priv && ((td_list->index + 1)
< urb_priv->length)) {
#ifdef OHCI_VERBOSE_DEBUG
dbg ("urb %p TD %d of %d, patch ED",
td_list->urb,
dbg ("urb %p TD %p (%d/%d), patch ED",
td_list->urb, td_list,
1 + td_list->index,
urb_priv->length);
#endif
......
......@@ -19,7 +19,7 @@ struct ed {
#define ED_SKIP __constant_cpu_to_le32(1 << 14)
#define ED_LOWSPEED __constant_cpu_to_le32(1 << 13)
#define ED_OUT __constant_cpu_to_le32(0x01 << 11)
#define ED_IN __constant_cpu_to_le32(0x10 << 11)
#define ED_IN __constant_cpu_to_le32(0x02 << 11)
__u32 hwTailP; /* tail of TD list */
__u32 hwHeadP; /* head of TD list */
#define ED_C __constant_cpu_to_le32(0x02) /* toggle carry */
......@@ -30,24 +30,24 @@ struct ed {
dma_addr_t dma; /* addr of ED */
struct ed *ed_prev; /* for non-interrupt EDs */
struct td *dummy;
struct list_head td_list; /* "shadow list" of our TDs */
u8 state; /* ED_{NEW,UNLINK,OPER} */
#define ED_NEW 0x00 /* unused, no dummy td */
#define ED_UNLINK 0x01 /* dummy td, maybe linked to hc */
#define ED_OPER 0x02 /* dummy td, _is_ linked to hc */
#define ED_URB_DEL 0x08 /* for unlinking; masked in */
u8 type; /* PIPE_{BULK,...} */
u8 interval; /* interrupt, isochronous */
u16 interval; /* interrupt, isochronous */
union {
struct intr_info { /* interrupt */
u8 int_period;
u8 int_branch;
u8 int_load;
} intr_info;
u16 last_iso; /* isochronous */
} intriso;
u8 state; /* ED_{NEW,UNLINK,OPER} */
#define ED_NEW 0x00 /* unused, no dummy td */
#define ED_UNLINK 0x01 /* dummy td, maybe linked to hc */
#define ED_OPER 0x02 /* dummy td, _is_ linked to hc */
#define ED_URB_DEL 0x08 /* for unlinking; masked in */
/* HC may see EDs on rm_list until next frame (frame_no == tick) */
u16 tick;
struct ed *ed_rm_list;
......@@ -108,6 +108,8 @@ struct td {
dma_addr_t td_dma; /* addr of this TD */
dma_addr_t data_dma; /* addr of data it points to */
struct list_head td_list; /* "shadow list", TDs on same ED */
} __attribute__ ((aligned(32))); /* c/b/i need 16; only iso needs 32 */
#define TD_MASK ((u32)~0x1f) /* strip hw status in low addr bits */
......
......@@ -221,9 +221,10 @@ struct kaweth_device
struct urb *tx_urb;
struct urb *irq_urb;
struct sk_buff *tx_skb;
__u8 *firmware_buf;
__u8 scratch[KAWETH_SCRATCH_SIZE];
__u8 tx_buf[KAWETH_BUF_SIZE];
__u8 rx_buf[KAWETH_BUF_SIZE];
__u8 intbuffer[INTBUFFERSIZE];
__u16 packet_filter_bitmap;
......@@ -650,11 +651,13 @@ static int kaweth_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
static void kaweth_usb_transmit_complete(struct urb *urb)
{
struct kaweth_device *kaweth = urb->context;
struct sk_buff *skb = kaweth->tx_skb;
if (unlikely(urb->status != 0))
kaweth_dbg("%s: TX status %d.", kaweth->net->name, urb->status);
netif_wake_queue(kaweth->net);
dev_kfree_skb(skb);
}
/****************************************************************
......@@ -663,7 +666,7 @@ static void kaweth_usb_transmit_complete(struct urb *urb)
static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net)
{
struct kaweth_device *kaweth = net->priv;
int count = skb->len;
char *private_header;
int res;
......@@ -679,15 +682,30 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net)
kaweth_async_set_rx_mode(kaweth);
netif_stop_queue(net);
*((__u16 *)kaweth->tx_buf) = cpu_to_le16(skb->len);
/* We now decide whether we can put our special header into the sk_buff */
if (skb_cloned(skb) || skb_headroom(skb) < 2) {
/* no such luck - we make our own */
struct sk_buff *copied_skb;
copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
dev_kfree_skb_any(skb);
skb = copied_skb;
if (!copied_skb) {
kaweth->stats.tx_errors++;
netif_start_queue(net);
spin_unlock(&kaweth->device_lock);
return 0;
}
}
memcpy(kaweth->tx_buf + 2, skb->data, skb->len);
private_header = __skb_push(skb, 2);
*private_header = cpu_to_le16(skb->len);
kaweth->tx_skb = skb;
FILL_BULK_URB(kaweth->tx_urb,
kaweth->dev,
usb_sndbulkpipe(kaweth->dev, 2),
kaweth->tx_buf,
count + 2,
private_header,
skb->len,
kaweth_usb_transmit_complete,
kaweth);
kaweth->end = 0;
......@@ -699,6 +717,7 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net)
kaweth->stats.tx_errors++;
netif_start_queue(net);
dev_kfree_skb(skb);
}
else
{
......@@ -707,8 +726,6 @@ static int kaweth_start_xmit(struct sk_buff *skb, struct net_device *net)
net->trans_start = jiffies;
}
dev_kfree_skb(skb);
spin_unlock(&kaweth->device_lock);
return 0;
......
......@@ -51,12 +51,6 @@
#include <linux/slab.h>
/*
* kernel thread actions
*/
#define US_ACT_COMMAND 1
#define US_ACT_EXIT 5
/***********************************************************************
* Host functions
......@@ -204,7 +198,7 @@ static int device_reset( Scsi_Cmnd *srb )
US_DEBUGP("device_reset() called\n" );
/* if the device was removed, then we're already reset */
if (atomic_read(&us->sm_state) == US_STATE_DETACHED)
if (!test_bit(DEV_ATTACHED, &us->bitflags))
return SUCCESS;
scsi_unlock(srb->host);
......@@ -235,7 +229,7 @@ static int bus_reset( Scsi_Cmnd *srb )
US_DEBUGP("bus_reset() called\n");
/* if the device has been removed, this worked */
if (atomic_read(&us->sm_state) == US_STATE_DETACHED) {
if (!test_bit(DEV_ATTACHED, &us->bitflags)) {
US_DEBUGP("-- device removed already\n");
return SUCCESS;
}
......@@ -337,8 +331,8 @@ static int proc_info (char *buffer, char **start, off_t offset, int length,
/* show the GUID of the device */
SPRINTF(" GUID: " GUID_FORMAT "\n", GUID_ARGS(us->guid));
SPRINTF(" Attached: %s\n", (atomic_read(&us->sm_state) ==
US_STATE_DETACHED) ? "Yes" : "No");
SPRINTF(" Attached: %s\n", (test_bit(DEV_ATTACHED, &us->bitflags)
? "Yes" : "No"));
/*
* Calculate start of next buffer, and return value.
......
......@@ -99,13 +99,6 @@ MODULE_LICENSE("GPL");
static int my_host_number;
/*
* kernel thread actions
*/
#define US_ACT_COMMAND 1
#define US_ACT_EXIT 5
/* The list of structures and the protective lock for them */
struct us_data *us_list;
struct semaphore us_list_semaphore;
......@@ -426,7 +419,7 @@ static int usb_stor_control_thread(void * __us)
down(&(us->dev_semaphore));
/* our device has gone - pretend not ready */
if (atomic_read(&us->device_state) == US_STATE_DETACHED) {
if (!test_bit(DEV_ATTACHED, &us->bitflags)) {
US_DEBUGP("Request is for removed device\n");
/* For REQUEST_SENSE, it's the data. But
* for anything else, it should look like
......@@ -450,7 +443,7 @@ static int usb_stor_control_thread(void * __us)
sizeof(usb_stor_sense_notready));
us->srb->result = CHECK_CONDITION << 1;
}
} else { /* atomic_read(&us->device_state) == STATE_DETACHED */
} else { /* test_bit(DEV_ATTACHED, &us->bitflags) */
/* Handle those devices which need us to fake
* their inquiry data */
......@@ -557,9 +550,8 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
unsigned int flags;
struct us_unusual_dev *unusual_dev;
struct us_data *ss = NULL;
#ifdef CONFIG_USB_STORAGE_SDDR09
int result;
#endif
int new_device = 0;
/* these are temporary copies -- we test on these, then put them
* in the us-data structure
......@@ -570,13 +562,13 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
u8 subclass = 0;
u8 protocol = 0;
/* the altsettting on the interface we're probing that matched our
/* the altsetting on the interface we're probing that matched our
* usb_match_id table
*/
struct usb_interface *intf = dev->actconfig->interface;
struct usb_interface_descriptor *altsetting =
intf[ifnum].altsetting + intf[ifnum].act_altsetting;
US_DEBUGP("act_altsettting is %d\n", intf[ifnum].act_altsetting);
US_DEBUGP("act_altsetting is %d\n", intf[ifnum].act_altsetting);
/* clear the temporary strings */
memset(mf, 0, sizeof(mf));
......@@ -663,7 +655,7 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
return NULL;
}
/* At this point, we're committed to using the device */
/* At this point, we've decided to try to use the device */
usb_get_dev(dev);
/* clear the GUID and fetch the strings */
......@@ -696,7 +688,8 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
*/
ss = us_list;
while ((ss != NULL) &&
((ss->pusb_dev) || !GUID_EQUAL(guid, ss->guid)))
(test_bit(DEV_ATTACHED, &ss->bitflags) ||
!GUID_EQUAL(guid, ss->guid)))
ss = ss->next;
if (ss != NULL) {
......@@ -710,29 +703,23 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
/* establish the connection to the new device upon reconnect */
ss->ifnum = ifnum;
ss->pusb_dev = dev;
atomic_set(&ss->device_state, US_STATE_ATTACHED);
set_bit(DEV_ATTACHED, &ss->bitflags);
/* copy over the endpoint data */
if (ep_in)
ss->ep_in = ep_in->bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK;
if (ep_out)
ss->ep_out = ep_out->bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK;
ss->ep_int = ep_int;
/* allocate an IRQ callback if one is needed */
if ((ss->protocol == US_PR_CBI) && usb_stor_allocate_irq(ss)) {
usb_put_dev(dev);
return NULL;
}
if ((ss->protocol == US_PR_CBI) && usb_stor_allocate_irq(ss))
goto BadDevice;
/* allocate the URB we're going to use */
ss->current_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ss->current_urb) {
usb_put_dev(dev);
return NULL;
}
if (!ss->current_urb)
goto BadDevice;
/* Re-Initialize the device if it needs it */
if (unusual_dev && unusual_dev->initFunction)
......@@ -752,14 +739,12 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
return NULL;
}
memset(ss, 0, sizeof(struct us_data));
new_device = 1;
/* allocate the URB we're going to use */
ss->current_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!ss->current_urb) {
kfree(ss);
usb_put_dev(dev);
return NULL;
}
if (!ss->current_urb)
goto BadDevice;
/* Initialize the mutexes only when the struct is new */
init_completion(&(ss->notify));
......@@ -776,10 +761,8 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
ss->unusual_dev = unusual_dev;
/* copy over the endpoint data */
if (ep_in)
ss->ep_in = ep_in->bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK;
if (ep_out)
ss->ep_out = ep_out->bEndpointAddress &
USB_ENDPOINT_NUMBER_MASK;
ss->ep_int = ep_int;
......@@ -904,12 +887,8 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
#endif
default:
ss->transport_name = "Unknown";
kfree(ss->current_urb);
kfree(ss);
usb_put_dev(dev);
return NULL;
break;
/* ss->transport_name = "Unknown"; */
goto BadDevice;
}
US_DEBUGP("Transport: %s\n", ss->transport_name);
......@@ -959,22 +938,14 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
#endif
default:
ss->protocol_name = "Unknown";
kfree(ss->current_urb);
kfree(ss);
usb_put_dev(dev);
return NULL;
break;
/* ss->protocol_name = "Unknown"; */
goto BadDevice;
}
US_DEBUGP("Protocol: %s\n", ss->protocol_name);
/* allocate an IRQ callback if one is needed */
if ((ss->protocol == US_PR_CBI) && usb_stor_allocate_irq(ss)) {
kfree(ss->current_urb);
kfree(ss);
usb_put_dev(dev);
return NULL;
}
if ((ss->protocol == US_PR_CBI) && usb_stor_allocate_irq(ss))
goto BadDevice;
/*
* Since this is a new device, we need to generate a scsi
......@@ -1001,16 +972,13 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
/* start up our control thread */
atomic_set(&ss->sm_state, US_STATE_IDLE);
atomic_set(&ss->device_state, US_STATE_ATTACHED);
set_bit(DEV_ATTACHED, &ss->bitflags);
ss->pid = kernel_thread(usb_stor_control_thread, ss,
CLONE_VM);
if (ss->pid < 0) {
printk(KERN_WARNING USB_STORAGE
"Unable to start control thread\n");
kfree(ss->current_urb);
kfree(ss);
usb_put_dev(dev);
return NULL;
goto BadDevice;
}
/* wait for the thread to start */
......@@ -1018,7 +986,17 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
/* now register - our detect function will be called */
ss->htmplt.module = THIS_MODULE;
scsi_register_host(&(ss->htmplt));
result = scsi_register_host(&(ss->htmplt));
if (result) {
printk(KERN_WARNING USB_STORAGE
"Unable to register the scsi host\n");
/* tell the control thread to exit */
ss->action = US_ACT_EXIT;
up(&ss->sema);
wait_for_completion(&ss->notify);
goto BadDevice;
}
/* lock access to the data structures */
down(&us_list_semaphore);
......@@ -1038,6 +1016,31 @@ static void * storage_probe(struct usb_device *dev, unsigned int ifnum,
/* return a pointer for the disconnect function */
return ss;
/* we come here if there are any problems */
BadDevice:
US_DEBUGP("storage_probe() failed\n");
down(&ss->irq_urb_sem);
if (ss->irq_urb) {
usb_unlink_urb(ss->irq_urb);
usb_free_urb(ss->irq_urb);
ss->irq_urb = NULL;
}
up(&ss->irq_urb_sem);
if (ss->current_urb) {
usb_unlink_urb(ss->current_urb);
usb_free_urb(ss->current_urb);
ss->current_urb = NULL;
}
clear_bit(DEV_ATTACHED, &ss->bitflags);
ss->pusb_dev = NULL;
if (new_device)
kfree(ss);
else
up(&ss->dev_semaphore);
usb_put_dev(dev);
return NULL;
}
/* Handle a disconnect event from the USB core */
......@@ -1078,7 +1081,7 @@ static void storage_disconnect(struct usb_device *dev, void *ptr)
/* mark the device as gone */
usb_put_dev(ss->pusb_dev);
ss->pusb_dev = NULL;
atomic_set(&ss->sm_state, US_STATE_DETACHED);
clear_bit(DEV_ATTACHED, &ss->bitflags);
/* unlock access to the device data structure */
up(&(ss->dev_semaphore));
......
......@@ -103,9 +103,10 @@ struct us_unusual_dev {
#define US_FL_SCM_MULT_TARG 0x00000020 /* supports multiple targets */
#define US_FL_FIX_INQUIRY 0x00000040 /* INQUIRY response needs fixing */
/* device attached/detached states */
#define US_STATE_DETACHED 1
#define US_STATE_ATTACHED 2
/* kernel thread actions */
#define US_ACT_COMMAND 1
#define US_ACT_EXIT 5
/* processing state machine states */
#define US_STATE_IDLE 1
......@@ -127,10 +128,9 @@ struct us_data {
/* The device we're working with
* It's important to note:
* (o) you must hold dev_semaphore to change pusb_dev
* (o) device_state should change whenever pusb_dev does
* (o) DEV_ATTACHED in bitflags should change whenever pusb_dev does
*/
struct semaphore dev_semaphore; /* protect pusb_dev */
atomic_t device_state; /* attached or detached */
struct usb_device *pusb_dev; /* this usb_device */
unsigned int flags; /* from filter initially */
......@@ -174,6 +174,7 @@ struct us_data {
struct semaphore ip_waitq; /* for CBI interrupts */
unsigned long bitflags; /* single-bit flags: */
#define IP_WANTED 1 /* is an IRQ expected? */
#define DEV_ATTACHED 2 /* is the dev. attached?*/
/* interrupt communications data */
struct semaphore irq_urb_sem; /* to protect irq_urb */
......
This diff is collapsed.
......@@ -10,32 +10,13 @@
#include <linux/mm.h>
#include <asm/uaccess.h>
#define POLL_INLINE_BYTES 256
#define FAST_SELECT_MAX 128
#define FAST_POLL_MAX 128
#define POLL_INLINE_ENTRIES (1+(POLL_INLINE_BYTES / sizeof(struct poll_table_entry)))
struct poll_table_entry {
struct file * filp;
wait_queue_t wait;
wait_queue_head_t * wait_address;
};
struct poll_table_page {
struct poll_table_page * next;
struct poll_table_entry * entry;
struct poll_table_entry entries[0];
};
struct poll_table_page;
typedef struct poll_table_struct {
int error;
struct poll_table_page * table;
struct poll_table_page inline_page;
struct poll_table_entry inline_table[POLL_INLINE_ENTRIES];
} poll_table;
#define POLL_INLINE_TABLE_LEN (sizeof(poll_table) - offsetof(poll_table, inline_page))
extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
......@@ -49,7 +30,6 @@ static inline void poll_initwait(poll_table* pt)
pt->error = 0;
pt->table = NULL;
}
extern void poll_freewait(poll_table* pt);
......@@ -69,6 +49,27 @@ typedef struct {
#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
/*
* We do a VERIFY_WRITE here even though we are only reading this time:
* we'll write to it eventually..
*
* Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
*/
static inline
int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
{
nr = FDS_BYTES(nr);
if (ufdset) {
int error;
error = verify_area(VERIFY_WRITE, ufdset, nr);
if (!error && __copy_from_user(fdset, ufdset, nr))
error = -EFAULT;
return error;
}
memset(fdset, 0, nr);
return 0;
}
static inline
void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
{
......@@ -76,6 +77,12 @@ void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
__copy_to_user(ufdset, fdset, FDS_BYTES(nr));
}
static inline
void zero_fd_set(unsigned long nr, unsigned long *fdset)
{
memset(fdset, 0, FDS_BYTES(nr));
}
extern int do_select(int n, fd_set_bits *fds, long *timeout);
#endif /* KERNEL */
......
......@@ -63,8 +63,6 @@
extern int md_size[MAX_MD_DEVS];
extern struct hd_struct md_hd_struct[MAX_MD_DEVS];
extern void add_mddev_mapping (mddev_t *mddev, kdev_t dev, void *data);
extern void del_mddev_mapping (mddev_t *mddev, kdev_t dev);
extern char * partition_name (kdev_t dev);
extern inline char * bdev_partition_name (struct block_device *bdev)
{
......@@ -77,14 +75,9 @@ extern mdk_thread_t * md_register_thread (void (*run) (void *data),
extern void md_unregister_thread (mdk_thread_t *thread);
extern void md_wakeup_thread(mdk_thread_t *thread);
extern void md_interrupt_thread (mdk_thread_t *thread);
extern int md_update_sb (mddev_t *mddev);
extern int md_do_sync(mddev_t *mddev, mdp_disk_t *spare);
extern void md_update_sb (mddev_t *mddev);
extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
extern void md_sync_acct(kdev_t dev, unsigned long nr_sectors);
extern void md_recover_arrays (void);
extern int md_check_ordering (mddev_t *mddev);
extern int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x);
extern int md_error (mddev_t *mddev, struct block_device *bdev);
extern int md_run_setup(void);
......
......@@ -64,24 +64,6 @@ typedef struct mdk_rdev_s mdk_rdev_t;
#define MAX_MD_DEVS (1<<MINORBITS) /* Max number of md dev */
/*
* Maps a kdev to an mddev/subdev. How 'data' is handled is up to
* the personality. (eg. HSM uses this to identify individual LVs)
*/
typedef struct dev_mapping_s {
mddev_t *mddev;
void *data;
} dev_mapping_t;
extern dev_mapping_t mddev_map [MAX_MD_DEVS];
static inline mddev_t * kdev_to_mddev (kdev_t dev)
{
if (major(dev) != MD_MAJOR)
BUG();
return mddev_map[minor(dev)].mddev;
}
/*
* options passed in raidrun:
*/
......@@ -196,31 +178,38 @@ struct mddev_s
mdk_personality_t *pers;
int __minor;
mdp_super_t *sb;
int nb_dev;
struct list_head disks;
int sb_dirty;
mdu_param_t param;
int ro;
struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
unsigned long curr_resync; /* blocks scheduled */
unsigned long resync_mark; /* a recent timestamp */
unsigned long resync_mark_cnt;/* blocks written at resync_mark */
char *name;
/* recovery_running is 0 for no recovery/resync,
* 1 for active recovery
* 2 for active resync
* -error for an error (e.g. -EINTR)
* it can only be set > 0 under reconfig_sem
*/
int recovery_running;
int in_sync; /* know to not need resync */
struct semaphore reconfig_sem;
struct semaphore recovery_sem;
struct semaphore resync_sem;
atomic_t active;
mdp_disk_t *spare;
atomic_t recovery_active; /* blocks scheduled, but not written */
wait_queue_head_t recovery_wait;
request_queue_t queue; /* for plugging ... */
struct list_head all_mddevs;
};
struct mdk_personality_s
{
char *name;
int (*make_request)(mddev_t *mddev, int rw, struct bio *bio);
int (*make_request)(request_queue_t *q, struct bio *bio);
int (*run)(mddev_t *mddev);
int (*stop)(mddev_t *mddev);
int (*status)(char *page, mddev_t *mddev);
......@@ -237,9 +226,6 @@ struct mdk_personality_s
* SPARE_ACTIVE expects such a change)
*/
int (*diskop) (mddev_t *mddev, mdp_disk_t **descriptor, int state);
int (*stop_resync)(mddev_t *mddev);
int (*restart_resync)(mddev_t *mddev);
int (*sync_request)(mddev_t *mddev, sector_t sector_nr, int go_faster);
};
......@@ -279,13 +265,6 @@ extern mdp_disk_t *get_spare(mddev_t *mddev);
#define ITERATE_RDEV(mddev,rdev,tmp) \
ITERATE_RDEV_GENERIC((mddev)->disks,same_set,rdev,tmp)
/*
* Same as above, but assumes that the device has rdev->desc_nr numbered
* from 0 to mddev->nb_dev, and iterates through rdevs in ascending order.
*/
#define ITERATE_RDEV_ORDERED(mddev,rdev,i) \
for (i = 0; rdev = find_rdev_nr(mddev, i), i < mddev->nb_dev; i++)
/*
* Iterates through all 'RAID managed disks'
......@@ -299,26 +278,6 @@ extern mdp_disk_t *get_spare(mddev_t *mddev);
#define ITERATE_RDEV_PENDING(rdev,tmp) \
ITERATE_RDEV_GENERIC(pending_raid_disks,pending,rdev,tmp)
/*
* iterates through all used mddevs in the system.
*/
#define ITERATE_MDDEV(mddev,tmp) \
\
for (tmp = all_mddevs.next; \
mddev = list_entry(tmp, mddev_t, all_mddevs), \
tmp = tmp->next, tmp->prev != &all_mddevs \
; )
static inline int lock_mddev (mddev_t * mddev)
{
return down_interruptible(&mddev->reconfig_sem);
}
static inline void unlock_mddev (mddev_t * mddev)
{
up(&mddev->reconfig_sem);
}
#define xchg_values(x,y) do { __typeof__(x) __tmp = x; \
x = y; y = __tmp; } while (0)
......
......@@ -33,8 +33,7 @@ struct r1_private_data_s {
int working_disks;
int last_used;
sector_t next_seq_sect;
mdk_thread_t *thread, *resync_thread;
int resync_mirrors;
mdk_thread_t *thread;
mirror_info_t *spare;
spinlock_t device_lock;
......
......@@ -177,7 +177,7 @@ struct stripe_head {
* is put on a "delayed" queue until there are no stripes currently
* in a pre-read phase. Further, if the "delayed" queue is empty when
* a stripe is put on it then we "plug" the queue and do not process it
* until an unplg call is made. (the tq_disk list is run).
* until an unplug call is made. (blk_run_queues is run).
*
* When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
* it to the count of prereading stripes.
......@@ -205,12 +205,11 @@ struct disk_info {
struct raid5_private_data {
struct stripe_head **stripe_hashtbl;
mddev_t *mddev;
mdk_thread_t *thread, *resync_thread;
mdk_thread_t *thread;
struct disk_info disks[MD_SB_DISKS];
struct disk_info *spare;
int chunk_size, level, algorithm;
int raid_disks, working_disks, failed_disks;
int resync_parity;
int max_nr_stripes;
struct list_head handle_list; /* stripes needing handling */
......@@ -229,9 +228,6 @@ struct raid5_private_data {
* waiting for 25% to be free
*/
spinlock_t device_lock;
int plugged;
struct tq_struct plug_tq;
};
typedef struct raid5_private_data raid5_conf_t;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment