Commit 50e73a4a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20190809' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Revert of a bcache patch that caused an oops for some (Coly)

 - ata rb532 unused warning fix (Gustavo)

 - AoE kernel crash fix (He)

 - Error handling fixup for blkdev_get() (Jan)

 - libata read/write translation and SFF PIO fix (me)

 - Use after free and error handling fix for O_DIRECT fragments. There's
   still a nowait + sync oddity in there, we'll nail that start next
   week. If all else fails, I'll queue a revert of the NOWAIT change.
   (me)

 - Loop GFP_KERNEL -> GFP_NOIO deadlock fix (Mikulas)

 - Two BFQ regression fixes that caused crashes (Paolo)

* tag 'for-linus-20190809' of git://git.kernel.dk/linux-block:
  bcache: Revert "bcache: use sysfs_match_string() instead of __sysfs_match_string()"
  loop: set PF_MEMALLOC_NOIO for the worker thread
  bdev: Fixup error handling in blkdev_get()
  block, bfq: handle NULL return value by bfq_init_rq()
  block, bfq: move update of waker and woken list to queue freeing
  block, bfq: reset last_completed_rq_bfqq if the pointed queue is freed
  block: aoe: Fix kernel crash due to atomic sleep when exiting
  libata: add SG safety checks in SFF pio transfers
  libata: have ata_scsi_rw_xlat() fail invalid passthrough requests
  block: fix O_DIRECT error handling for bio fragments
  ata: rb532_cf: Fix unused variable warning in rb532_pata_driver_probe
parents 461d2815 20621fed
...@@ -1924,12 +1924,13 @@ static void bfq_add_request(struct request *rq) ...@@ -1924,12 +1924,13 @@ static void bfq_add_request(struct request *rq)
* confirmed no later than during the next * confirmed no later than during the next
* I/O-plugging interval for bfqq. * I/O-plugging interval for bfqq.
*/ */
if (!bfq_bfqq_has_short_ttime(bfqq) && if (bfqd->last_completed_rq_bfqq &&
!bfq_bfqq_has_short_ttime(bfqq) &&
ktime_get_ns() - bfqd->last_completion < ktime_get_ns() - bfqd->last_completion <
200 * NSEC_PER_USEC) { 200 * NSEC_PER_USEC) {
if (bfqd->last_completed_rq_bfqq != bfqq && if (bfqd->last_completed_rq_bfqq != bfqq &&
bfqd->last_completed_rq_bfqq != bfqd->last_completed_rq_bfqq !=
bfqq->waker_bfqq) { bfqq->waker_bfqq) {
/* /*
* First synchronization detected with * First synchronization detected with
* a candidate waker queue, or with a * a candidate waker queue, or with a
...@@ -2250,9 +2251,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req, ...@@ -2250,9 +2251,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
blk_rq_pos(container_of(rb_prev(&req->rb_node), blk_rq_pos(container_of(rb_prev(&req->rb_node),
struct request, rb_node))) { struct request, rb_node))) {
struct bfq_queue *bfqq = bfq_init_rq(req); struct bfq_queue *bfqq = bfq_init_rq(req);
struct bfq_data *bfqd = bfqq->bfqd; struct bfq_data *bfqd;
struct request *prev, *next_rq; struct request *prev, *next_rq;
if (!bfqq)
return;
bfqd = bfqq->bfqd;
/* Reposition request in its sort_list */ /* Reposition request in its sort_list */
elv_rb_del(&bfqq->sort_list, req); elv_rb_del(&bfqq->sort_list, req);
elv_rb_add(&bfqq->sort_list, req); elv_rb_add(&bfqq->sort_list, req);
...@@ -2299,6 +2305,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, ...@@ -2299,6 +2305,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
struct bfq_queue *bfqq = bfq_init_rq(rq), struct bfq_queue *bfqq = bfq_init_rq(rq),
*next_bfqq = bfq_init_rq(next); *next_bfqq = bfq_init_rq(next);
if (!bfqq)
return;
/* /*
* If next and rq belong to the same bfq_queue and next is older * If next and rq belong to the same bfq_queue and next is older
* than rq, then reposition rq in the fifo (by substituting next * than rq, then reposition rq in the fifo (by substituting next
...@@ -4764,6 +4773,8 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) ...@@ -4764,6 +4773,8 @@ static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx)
*/ */
void bfq_put_queue(struct bfq_queue *bfqq) void bfq_put_queue(struct bfq_queue *bfqq)
{ {
struct bfq_queue *item;
struct hlist_node *n;
#ifdef CONFIG_BFQ_GROUP_IOSCHED #ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_group *bfqg = bfqq_group(bfqq); struct bfq_group *bfqg = bfqq_group(bfqq);
#endif #endif
...@@ -4808,6 +4819,36 @@ void bfq_put_queue(struct bfq_queue *bfqq) ...@@ -4808,6 +4819,36 @@ void bfq_put_queue(struct bfq_queue *bfqq)
bfqq->bfqd->burst_size--; bfqq->bfqd->burst_size--;
} }
/*
* bfqq does not exist any longer, so it cannot be woken by
* any other queue, and cannot wake any other queue. Then bfqq
* must be removed from the woken list of its possible waker
* queue, and all queues in the woken list of bfqq must stop
* having a waker queue. Strictly speaking, these updates
* should be performed when bfqq remains with no I/O source
* attached to it, which happens before bfqq gets freed. In
* particular, this happens when the last process associated
* with bfqq exits or gets associated with a different
* queue. However, both events lead to bfqq being freed soon,
* and dangling references would come out only after bfqq gets
* freed. So these updates are done here, as a simple and safe
* way to handle all cases.
*/
/* remove bfqq from woken list */
if (!hlist_unhashed(&bfqq->woken_list_node))
hlist_del_init(&bfqq->woken_list_node);
/* reset waker for all queues in woken list */
hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
woken_list_node) {
item->waker_bfqq = NULL;
bfq_clear_bfqq_has_waker(item);
hlist_del_init(&item->woken_list_node);
}
if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq)
bfqq->bfqd->last_completed_rq_bfqq = NULL;
kmem_cache_free(bfq_pool, bfqq); kmem_cache_free(bfq_pool, bfqq);
#ifdef CONFIG_BFQ_GROUP_IOSCHED #ifdef CONFIG_BFQ_GROUP_IOSCHED
bfqg_and_blkg_put(bfqg); bfqg_and_blkg_put(bfqg);
...@@ -4835,9 +4876,6 @@ static void bfq_put_cooperator(struct bfq_queue *bfqq) ...@@ -4835,9 +4876,6 @@ static void bfq_put_cooperator(struct bfq_queue *bfqq)
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{ {
struct bfq_queue *item;
struct hlist_node *n;
if (bfqq == bfqd->in_service_queue) { if (bfqq == bfqd->in_service_queue) {
__bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT); __bfq_bfqq_expire(bfqd, bfqq, BFQQE_BUDGET_TIMEOUT);
bfq_schedule_dispatch(bfqd); bfq_schedule_dispatch(bfqd);
...@@ -4847,18 +4885,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) ...@@ -4847,18 +4885,6 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_put_cooperator(bfqq); bfq_put_cooperator(bfqq);
/* remove bfqq from woken list */
if (!hlist_unhashed(&bfqq->woken_list_node))
hlist_del_init(&bfqq->woken_list_node);
/* reset waker for all queues in woken list */
hlist_for_each_entry_safe(item, n, &bfqq->woken_list,
woken_list_node) {
item->waker_bfqq = NULL;
bfq_clear_bfqq_has_waker(item);
hlist_del_init(&item->woken_list_node);
}
bfq_put_queue(bfqq); /* release process reference */ bfq_put_queue(bfqq); /* release process reference */
} }
...@@ -5436,12 +5462,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -5436,12 +5462,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
spin_lock_irq(&bfqd->lock); spin_lock_irq(&bfqd->lock);
bfqq = bfq_init_rq(rq); bfqq = bfq_init_rq(rq);
if (at_head || blk_rq_is_passthrough(rq)) { if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
if (at_head) if (at_head)
list_add(&rq->queuelist, &bfqd->dispatch); list_add(&rq->queuelist, &bfqd->dispatch);
else else
list_add_tail(&rq->queuelist, &bfqd->dispatch); list_add_tail(&rq->queuelist, &bfqd->dispatch);
} else { /* bfqq is assumed to be non null here */ } else {
idle_timer_disabled = __bfq_insert_request(bfqd, rq); idle_timer_disabled = __bfq_insert_request(bfqd, rq);
/* /*
* Update bfqq, because, if a queue merge has occurred * Update bfqq, because, if a queue merge has occurred
......
...@@ -1786,6 +1786,21 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) ...@@ -1786,6 +1786,21 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
return 1; return 1;
} }
static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
{
struct request *rq = scmd->request;
u32 req_blocks;
if (!blk_rq_is_passthrough(rq))
return true;
req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
if (n_blocks > req_blocks)
return false;
return true;
}
/** /**
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
* @qc: Storage for translated ATA taskfile * @qc: Storage for translated ATA taskfile
...@@ -1830,6 +1845,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) ...@@ -1830,6 +1845,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
scsi_10_lba_len(cdb, &block, &n_block); scsi_10_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3)) if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA; tf_flags |= ATA_TFLAG_FUA;
if (!ata_check_nblocks(scmd, n_block))
goto invalid_fld;
break; break;
case READ_6: case READ_6:
case WRITE_6: case WRITE_6:
...@@ -1844,6 +1861,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) ...@@ -1844,6 +1861,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
*/ */
if (!n_block) if (!n_block)
n_block = 256; n_block = 256;
if (!ata_check_nblocks(scmd, n_block))
goto invalid_fld;
break; break;
case READ_16: case READ_16:
case WRITE_16: case WRITE_16:
...@@ -1854,6 +1873,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) ...@@ -1854,6 +1873,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
scsi_16_lba_len(cdb, &block, &n_block); scsi_16_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3)) if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA; tf_flags |= ATA_TFLAG_FUA;
if (!ata_check_nblocks(scmd, n_block))
goto invalid_fld;
break; break;
default: default:
DPRINTK("no-byte command\n"); DPRINTK("no-byte command\n");
......
...@@ -658,6 +658,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) ...@@ -658,6 +658,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int offset; unsigned int offset;
unsigned char *buf; unsigned char *buf;
if (!qc->cursg) {
qc->curbytes = qc->nbytes;
return;
}
if (qc->curbytes == qc->nbytes - qc->sect_size) if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST; ap->hsm_task_state = HSM_ST_LAST;
...@@ -683,6 +687,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) ...@@ -683,6 +687,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
if (qc->cursg_ofs == qc->cursg->length) { if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg); qc->cursg = sg_next(qc->cursg);
if (!qc->cursg)
ap->hsm_task_state = HSM_ST_LAST;
qc->cursg_ofs = 0; qc->cursg_ofs = 0;
} }
} }
......
...@@ -158,7 +158,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev) ...@@ -158,7 +158,6 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
static int rb532_pata_driver_remove(struct platform_device *pdev) static int rb532_pata_driver_remove(struct platform_device *pdev)
{ {
struct ata_host *ah = platform_get_drvdata(pdev); struct ata_host *ah = platform_get_drvdata(pdev);
struct rb532_cf_info *info = ah->private_data;
ata_host_detach(ah); ata_host_detach(ah);
......
...@@ -323,10 +323,14 @@ flush(const char __user *str, size_t cnt, int exiting) ...@@ -323,10 +323,14 @@ flush(const char __user *str, size_t cnt, int exiting)
} }
flush_scheduled_work(); flush_scheduled_work();
/* pass one: without sleeping, do aoedev_downdev */ /* pass one: do aoedev_downdev, which might sleep */
restart1:
spin_lock_irqsave(&devlist_lock, flags); spin_lock_irqsave(&devlist_lock, flags);
for (d = devlist; d; d = d->next) { for (d = devlist; d; d = d->next) {
spin_lock(&d->lock); spin_lock(&d->lock);
if (d->flags & DEVFL_TKILL)
goto cont;
if (exiting) { if (exiting) {
/* unconditionally take each device down */ /* unconditionally take each device down */
} else if (specified) { } else if (specified) {
...@@ -338,8 +342,11 @@ flush(const char __user *str, size_t cnt, int exiting) ...@@ -338,8 +342,11 @@ flush(const char __user *str, size_t cnt, int exiting)
|| d->ref) || d->ref)
goto cont; goto cont;
spin_unlock(&d->lock);
spin_unlock_irqrestore(&devlist_lock, flags);
aoedev_downdev(d); aoedev_downdev(d);
d->flags |= DEVFL_TKILL; d->flags |= DEVFL_TKILL;
goto restart1;
cont: cont:
spin_unlock(&d->lock); spin_unlock(&d->lock);
} }
...@@ -348,7 +355,7 @@ flush(const char __user *str, size_t cnt, int exiting) ...@@ -348,7 +355,7 @@ flush(const char __user *str, size_t cnt, int exiting)
/* pass two: call freedev, which might sleep, /* pass two: call freedev, which might sleep,
* for aoedevs marked with DEVFL_TKILL * for aoedevs marked with DEVFL_TKILL
*/ */
restart: restart2:
spin_lock_irqsave(&devlist_lock, flags); spin_lock_irqsave(&devlist_lock, flags);
for (d = devlist; d; d = d->next) { for (d = devlist; d; d = d->next) {
spin_lock(&d->lock); spin_lock(&d->lock);
...@@ -357,7 +364,7 @@ flush(const char __user *str, size_t cnt, int exiting) ...@@ -357,7 +364,7 @@ flush(const char __user *str, size_t cnt, int exiting)
spin_unlock(&d->lock); spin_unlock(&d->lock);
spin_unlock_irqrestore(&devlist_lock, flags); spin_unlock_irqrestore(&devlist_lock, flags);
freedev(d); freedev(d);
goto restart; goto restart2;
} }
spin_unlock(&d->lock); spin_unlock(&d->lock);
} }
......
...@@ -885,7 +885,7 @@ static void loop_unprepare_queue(struct loop_device *lo) ...@@ -885,7 +885,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
static int loop_kthread_worker_fn(void *worker_ptr) static int loop_kthread_worker_fn(void *worker_ptr)
{ {
current->flags |= PF_LESS_THROTTLE; current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
return kthread_worker_fn(worker_ptr); return kthread_worker_fn(worker_ptr);
} }
......
...@@ -23,24 +23,28 @@ static const char * const bch_cache_modes[] = { ...@@ -23,24 +23,28 @@ static const char * const bch_cache_modes[] = {
"writethrough", "writethrough",
"writeback", "writeback",
"writearound", "writearound",
"none" "none",
NULL
}; };
/* Default is 0 ("auto") */ /* Default is 0 ("auto") */
static const char * const bch_stop_on_failure_modes[] = { static const char * const bch_stop_on_failure_modes[] = {
"auto", "auto",
"always" "always",
NULL
}; };
static const char * const cache_replacement_policies[] = { static const char * const cache_replacement_policies[] = {
"lru", "lru",
"fifo", "fifo",
"random" "random",
NULL
}; };
static const char * const error_actions[] = { static const char * const error_actions[] = {
"unregister", "unregister",
"panic" "panic",
NULL
}; };
write_attribute(attach); write_attribute(attach);
...@@ -338,7 +342,7 @@ STORE(__cached_dev) ...@@ -338,7 +342,7 @@ STORE(__cached_dev)
} }
if (attr == &sysfs_cache_mode) { if (attr == &sysfs_cache_mode) {
v = sysfs_match_string(bch_cache_modes, buf); v = __sysfs_match_string(bch_cache_modes, -1, buf);
if (v < 0) if (v < 0)
return v; return v;
...@@ -349,7 +353,7 @@ STORE(__cached_dev) ...@@ -349,7 +353,7 @@ STORE(__cached_dev)
} }
if (attr == &sysfs_stop_when_cache_set_failed) { if (attr == &sysfs_stop_when_cache_set_failed) {
v = sysfs_match_string(bch_stop_on_failure_modes, buf); v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
if (v < 0) if (v < 0)
return v; return v;
...@@ -816,7 +820,7 @@ STORE(__bch_cache_set) ...@@ -816,7 +820,7 @@ STORE(__bch_cache_set)
0, UINT_MAX); 0, UINT_MAX);
if (attr == &sysfs_errors) { if (attr == &sysfs_errors) {
v = sysfs_match_string(error_actions, buf); v = __sysfs_match_string(error_actions, -1, buf);
if (v < 0) if (v < 0)
return v; return v;
...@@ -1088,7 +1092,7 @@ STORE(__bch_cache) ...@@ -1088,7 +1092,7 @@ STORE(__bch_cache)
} }
if (attr == &sysfs_cache_replacement_policy) { if (attr == &sysfs_cache_replacement_policy) {
v = sysfs_match_string(cache_replacement_policies, buf); v = __sysfs_match_string(cache_replacement_policies, -1, buf);
if (v < 0) if (v < 0)
return v; return v;
......
...@@ -349,7 +349,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ...@@ -349,7 +349,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
loff_t pos = iocb->ki_pos; loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE; blk_qc_t qc = BLK_QC_T_NONE;
gfp_t gfp; gfp_t gfp;
ssize_t ret; int ret;
if ((pos | iov_iter_alignment(iter)) & if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1)) (bdev_logical_block_size(bdev) - 1))
...@@ -386,8 +386,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ...@@ -386,8 +386,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
ret = 0; ret = 0;
for (;;) { for (;;) {
int err;
bio_set_dev(bio, bdev); bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9; bio->bi_iter.bi_sector = pos >> 9;
bio->bi_write_hint = iocb->ki_hint; bio->bi_write_hint = iocb->ki_hint;
...@@ -395,10 +393,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ...@@ -395,10 +393,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio->bi_end_io = blkdev_bio_end_io; bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio; bio->bi_ioprio = iocb->ki_ioprio;
err = bio_iov_iter_get_pages(bio, iter); ret = bio_iov_iter_get_pages(bio, iter);
if (unlikely(err)) { if (unlikely(ret)) {
if (!ret)
ret = err;
bio->bi_status = BLK_STS_IOERR; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
break; break;
...@@ -421,7 +417,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ...@@ -421,7 +417,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (nowait) if (nowait)
bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE); bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size; pos += bio->bi_iter.bi_size;
nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES); nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
...@@ -433,13 +428,13 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ...@@ -433,13 +428,13 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
polled = true; polled = true;
} }
dio->size += bio->bi_iter.bi_size;
qc = submit_bio(bio); qc = submit_bio(bio);
if (qc == BLK_QC_T_EAGAIN) { if (qc == BLK_QC_T_EAGAIN) {
if (!ret) dio->size -= bio->bi_iter.bi_size;
ret = -EAGAIN; ret = -EAGAIN;
goto error; goto error;
} }
ret = dio->size;
if (polled) if (polled)
WRITE_ONCE(iocb->ki_cookie, qc); WRITE_ONCE(iocb->ki_cookie, qc);
...@@ -460,18 +455,17 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ...@@ -460,18 +455,17 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
atomic_inc(&dio->ref); atomic_inc(&dio->ref);
} }
dio->size += bio->bi_iter.bi_size;
qc = submit_bio(bio); qc = submit_bio(bio);
if (qc == BLK_QC_T_EAGAIN) { if (qc == BLK_QC_T_EAGAIN) {
if (!ret) dio->size -= bio->bi_iter.bi_size;
ret = -EAGAIN; ret = -EAGAIN;
goto error; goto error;
} }
ret = dio->size;
bio = bio_alloc(gfp, nr_pages); bio = bio_alloc(gfp, nr_pages);
if (!bio) { if (!bio) {
if (!ret) ret = -EAGAIN;
ret = -EAGAIN;
goto error; goto error;
} }
} }
...@@ -496,6 +490,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) ...@@ -496,6 +490,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
out: out:
if (!ret) if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status); ret = blk_status_to_errno(dio->bio.bi_status);
if (likely(!ret))
ret = dio->size;
bio_put(&dio->bio); bio_put(&dio->bio);
return ret; return ret;
...@@ -1754,7 +1750,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) ...@@ -1754,7 +1750,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
/* finish claiming */ /* finish claiming */
mutex_lock(&bdev->bd_mutex); mutex_lock(&bdev->bd_mutex);
bd_finish_claiming(bdev, whole, holder); if (!res)
bd_finish_claiming(bdev, whole, holder);
else
bd_abort_claiming(bdev, whole, holder);
/* /*
* Block event polling for write claims if requested. Any * Block event polling for write claims if requested. Any
* write holder makes the write_holder state stick until * write holder makes the write_holder state stick until
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment