Commit 9a1050ad authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ceph-for-5.1-rc2' of git://github.com/ceph/ceph-client

Pull ceph fixes from Ilya Dryomov:
 "A follow up for the new alloc_size logic and a blacklisting fix,
  marked for stable"

* tag 'ceph-for-5.1-rc2' of git://github.com/ceph/ceph-client:
  rbd: drop wait_for_latest_osdmap()
  libceph: wait for latest osdmap in ceph_monc_blacklist_add()
  rbd: set io_min, io_opt and discard_granularity to alloc_size
parents a5ed1e96 9d4a227f
...@@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private) ...@@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private)
pctx->opts->queue_depth = intval; pctx->opts->queue_depth = intval;
break; break;
case Opt_alloc_size: case Opt_alloc_size:
if (intval < 1) { if (intval < SECTOR_SIZE) {
pr_err("alloc_size out of range\n"); pr_err("alloc_size out of range\n");
return -EINVAL; return -EINVAL;
} }
...@@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc) ...@@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
kref_put(&rbdc->kref, rbd_client_release); kref_put(&rbdc->kref, rbd_client_release);
} }
static int wait_for_latest_osdmap(struct ceph_client *client)
{
u64 newest_epoch;
int ret;
ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
if (ret)
return ret;
if (client->osdc.osdmap->epoch >= newest_epoch)
return 0;
ceph_osdc_maybe_request_map(&client->osdc);
return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
client->options->mount_timeout);
}
/* /*
* Get a ceph client with specific addr and configuration, if one does * Get a ceph client with specific addr and configuration, if one does
* not exist create it. Either way, ceph_opts is consumed by this * not exist create it. Either way, ceph_opts is consumed by this
...@@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts) ...@@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
* Using an existing client. Make sure ->pg_pools is up to * Using an existing client. Make sure ->pg_pools is up to
* date before we look up the pool id in do_rbd_add(). * date before we look up the pool id in do_rbd_add().
*/ */
ret = wait_for_latest_osdmap(rbdc->client); ret = ceph_wait_for_latest_osdmap(rbdc->client,
rbdc->client->options->mount_timeout);
if (ret) { if (ret) {
rbd_warn(NULL, "failed to get latest osdmap: %d", ret); rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
rbd_put_client(rbdc); rbd_put_client(rbdc);
...@@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) ...@@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
q->limits.max_sectors = queue_max_hw_sectors(q); q->limits.max_sectors = queue_max_hw_sectors(q);
blk_queue_max_segments(q, USHRT_MAX); blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, UINT_MAX); blk_queue_max_segment_size(q, UINT_MAX);
blk_queue_io_min(q, objset_bytes); blk_queue_io_min(q, rbd_dev->opts->alloc_size);
blk_queue_io_opt(q, objset_bytes); blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
if (rbd_dev->opts->trim) { if (rbd_dev->opts->trim) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = objset_bytes; q->limits.discard_granularity = rbd_dev->opts->alloc_size;
blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
} }
......
...@@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client); ...@@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client);
extern int __ceph_open_session(struct ceph_client *client, extern int __ceph_open_session(struct ceph_client *client,
unsigned long started); unsigned long started);
extern int ceph_open_session(struct ceph_client *client); extern int ceph_open_session(struct ceph_client *client);
int ceph_wait_for_latest_osdmap(struct ceph_client *client,
unsigned long timeout);
/* pagevec.c */ /* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages); extern void ceph_release_page_vector(struct page **pages, int num_pages);
......
...@@ -738,7 +738,6 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started) ...@@ -738,7 +738,6 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started)
} }
EXPORT_SYMBOL(__ceph_open_session); EXPORT_SYMBOL(__ceph_open_session);
int ceph_open_session(struct ceph_client *client) int ceph_open_session(struct ceph_client *client)
{ {
int ret; int ret;
...@@ -754,6 +753,23 @@ int ceph_open_session(struct ceph_client *client) ...@@ -754,6 +753,23 @@ int ceph_open_session(struct ceph_client *client)
} }
EXPORT_SYMBOL(ceph_open_session); EXPORT_SYMBOL(ceph_open_session);
int ceph_wait_for_latest_osdmap(struct ceph_client *client,
unsigned long timeout)
{
u64 newest_epoch;
int ret;
ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
if (ret)
return ret;
if (client->osdc.osdmap->epoch >= newest_epoch)
return 0;
ceph_osdc_maybe_request_map(&client->osdc);
return ceph_monc_wait_osdmap(&client->monc, newest_epoch, timeout);
}
EXPORT_SYMBOL(ceph_wait_for_latest_osdmap);
static int __init init_ceph_lib(void) static int __init init_ceph_lib(void)
{ {
......
...@@ -922,6 +922,15 @@ int ceph_monc_blacklist_add(struct ceph_mon_client *monc, ...@@ -922,6 +922,15 @@ int ceph_monc_blacklist_add(struct ceph_mon_client *monc,
mutex_unlock(&monc->mutex); mutex_unlock(&monc->mutex);
ret = wait_generic_request(req); ret = wait_generic_request(req);
if (!ret)
/*
* Make sure we have the osdmap that includes the blacklist
* entry. This is needed to ensure that the OSDs pick up the
* new blacklist before processing any future requests from
* this client.
*/
ret = ceph_wait_for_latest_osdmap(monc->client, 0);
out: out:
put_generic_request(req); put_generic_request(req);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment