Commit bbead745 authored by Ilya Dryomov's avatar Ilya Dryomov

rbd: ignore unlock errors

Currently the lock_state is set to UNLOCKED (preventing further I/O),
but RELEASED_LOCK notification isn't sent.  Be consistent with userspace
and treat ceph_cls_unlock() errors as the image is unlocked.
Signed-off-by: default avatarIlya Dryomov <idryomov@gmail.com>
Reviewed-by: default avatarJason Dillaman <dillaman@redhat.com>
parent 5769ed0c
...@@ -3097,7 +3097,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) ...@@ -3097,7 +3097,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
/* /*
* lock_rwsem must be held for write * lock_rwsem must be held for write
*/ */
static int rbd_unlock(struct rbd_device *rbd_dev) static void rbd_unlock(struct rbd_device *rbd_dev)
{ {
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
char cookie[32]; char cookie[32];
...@@ -3105,19 +3105,16 @@ static int rbd_unlock(struct rbd_device *rbd_dev) ...@@ -3105,19 +3105,16 @@ static int rbd_unlock(struct rbd_device *rbd_dev)
WARN_ON(!__rbd_is_lock_owner(rbd_dev)); WARN_ON(!__rbd_is_lock_owner(rbd_dev));
rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
format_lock_cookie(rbd_dev, cookie); format_lock_cookie(rbd_dev, cookie);
ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
RBD_LOCK_NAME, cookie); RBD_LOCK_NAME, cookie);
if (ret && ret != -ENOENT) { if (ret && ret != -ENOENT)
rbd_warn(rbd_dev, "cls_unlock failed: %d", ret); rbd_warn(rbd_dev, "failed to unlock: %d", ret);
return ret;
}
/* treat errors as the image is unlocked */
rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work); queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
return 0;
} }
static int __rbd_notify_op_lock(struct rbd_device *rbd_dev, static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
...@@ -3490,16 +3487,15 @@ static bool rbd_release_lock(struct rbd_device *rbd_dev) ...@@ -3490,16 +3487,15 @@ static bool rbd_release_lock(struct rbd_device *rbd_dev)
if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
return false; return false;
if (!rbd_unlock(rbd_dev)) rbd_unlock(rbd_dev);
/* /*
* Give others a chance to grab the lock - we would re-acquire * Give others a chance to grab the lock - we would re-acquire
* almost immediately if we got new IO during ceph_osdc_sync() * almost immediately if we got new IO during ceph_osdc_sync()
* otherwise. We need to ack our own notifications, so this * otherwise. We need to ack our own notifications, so this
* lock_dwork will be requeued from rbd_wait_state_locked() * lock_dwork will be requeued from rbd_wait_state_locked()
* after wake_requests() in rbd_handle_released_lock(). * after wake_requests() in rbd_handle_released_lock().
*/ */
cancel_delayed_work(&rbd_dev->lock_dwork); cancel_delayed_work(&rbd_dev->lock_dwork);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment