Commit ded080c8 authored by Ilya Dryomov's avatar Ilya Dryomov

rbd: don't move requests to the running list on errors

The running list is supposed to contain requests that are pinning the
exclusive lock, i.e. those that must be flushed before exclusive lock
is released.  When wake_lock_waiters() is called to handle an error,
requests on the acquiring list are failed with that error and no
flushing takes place.  Briefly moving them to the running list is not
only pointless but also harmful: if exclusive lock gets acquired
before all of their state machines are scheduled and go through
rbd_lock_del_request(), we trigger

    rbd_assert(list_empty(&rbd_dev->running_list));

in rbd_try_acquire_lock().

Cc: stable@vger.kernel.org
Fixes: 637cd060 ("rbd: new exclusive lock wait/wake code")
Signed-off-by: default avatarIlya Dryomov <idryomov@gmail.com>
Reviewed-by: default avatarDongsheng Yang <dongsheng.yang@easystack.cn>
parent cd30e8bd
...@@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req) ...@@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
static void rbd_lock_del_request(struct rbd_img_request *img_req) static void rbd_lock_del_request(struct rbd_img_request *img_req)
{ {
struct rbd_device *rbd_dev = img_req->rbd_dev; struct rbd_device *rbd_dev = img_req->rbd_dev;
bool need_wakeup; bool need_wakeup = false;
lockdep_assert_held(&rbd_dev->lock_rwsem); lockdep_assert_held(&rbd_dev->lock_rwsem);
spin_lock(&rbd_dev->lock_lists_lock); spin_lock(&rbd_dev->lock_lists_lock);
rbd_assert(!list_empty(&img_req->lock_item)); if (!list_empty(&img_req->lock_item)) {
list_del_init(&img_req->lock_item); list_del_init(&img_req->lock_item);
need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING && need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
list_empty(&rbd_dev->running_list)); list_empty(&rbd_dev->running_list));
}
spin_unlock(&rbd_dev->lock_lists_lock); spin_unlock(&rbd_dev->lock_lists_lock);
if (need_wakeup) if (need_wakeup)
complete(&rbd_dev->releasing_wait); complete(&rbd_dev->releasing_wait);
...@@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result) ...@@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
return; return;
} }
list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) { while (!list_empty(&rbd_dev->acquiring_list)) {
img_req = list_first_entry(&rbd_dev->acquiring_list,
struct rbd_img_request, lock_item);
mutex_lock(&img_req->state_mutex); mutex_lock(&img_req->state_mutex);
rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK); rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
if (!result)
list_move_tail(&img_req->lock_item,
&rbd_dev->running_list);
else
list_del_init(&img_req->lock_item);
rbd_img_schedule(img_req, result); rbd_img_schedule(img_req, result);
mutex_unlock(&img_req->state_mutex); mutex_unlock(&img_req->state_mutex);
} }
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
} }
static bool locker_equal(const struct ceph_locker *lhs, static bool locker_equal(const struct ceph_locker *lhs,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment