Commit 356cebea authored by Jens Axboe's avatar Jens Axboe

[BLOCK] Kill blk_attempt_remerge()

It's a broken interface, it's done way too late. And apparently it triggers
slab problems in recent kernels as well (most likely after the generic dispatch
code was merged). So kill it, ide-cd is the only user of it.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 5a57be8d
......@@ -2734,30 +2734,6 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
return 0;
}
/**
* blk_attempt_remerge - attempt to remerge active head with next request
* @q: The &request_queue_t belonging to the device
* @rq: The head request (usually)
*
* Description:
* For head-active devices, the queue can easily be unplugged so quickly
* that proper merging is not done on the front request. This may hurt
* performance greatly for some devices. The block layer cannot safely
* do merging on that first request for these queues, but the driver can
* call this function and make it happen any way. Only the driver knows
* when it is safe to do so.
**/
void blk_attempt_remerge(request_queue_t *q, struct request *rq)
{
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
attempt_back_merge(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(blk_attempt_remerge);
static void init_request_from_bio(struct request *req, struct bio *bio)
{
req->flags |= REQ_CMD;
......
......@@ -1332,8 +1332,6 @@ static ide_startstop_t cdrom_start_read (ide_drive_t *drive, unsigned int block)
if (cdrom_read_from_buffer(drive))
return ide_stopped;
blk_attempt_remerge(drive->queue, rq);
/* Clear the local sector buffer. */
info->nsectors_buffered = 0;
......@@ -1874,14 +1872,6 @@ static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq)
return ide_stopped;
}
/*
* for dvd-ram and such media, it's a really big deal to get
* big writes all the time. so scour the queue and attempt to
* remerge requests, often the plugging will not have had time
* to do this properly
*/
blk_attempt_remerge(drive->queue, rq);
info->nsectors_buffered = 0;
/* use dma, if possible. we don't need to check more, since we
......
......@@ -592,7 +592,6 @@ extern void generic_make_request(struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(request_queue_t *, struct request *);
extern void blk_end_sync_rq(struct request *rq, int error);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, gfp_t);
extern void blk_insert_request(request_queue_t *, struct request *, int, void *);
extern void blk_requeue_request(request_queue_t *, struct request *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment