Commit bb873b53 authored by Ilya Dryomov's avatar Ilya Dryomov

libceph: switch to calc_target(), part 2

The crux of this is getting rid of ceph_osdc_build_request(), so that
MOSDOp can be encoded not before but after calc_target() calculates the
actual target.  Encoding now happens within ceph_osdc_start_request().

Also nuked is the accompanying bunch of pointers into the encoded
buffer that was used to update fields on each send - instead, the
entire front is re-encoded.  If we want to support target->name_len !=
base->name_len in the future, there is no other way, because oid is
surrounded by other fields in the encoded buffer.

Encoding OSD ops and adding data items to the request message were
mixed together in osd_req_encode_op().  While we want to re-encode OSD
ops, we don't want to add duplicate data items to the message when
resending, so all call to ceph_osdc_msg_data_add() are factored out
into a new setup_request_data().
Signed-off-by: default avatarIlya Dryomov <idryomov@gmail.com>
parent a66dd383
...@@ -1896,27 +1896,17 @@ static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request) ...@@ -1896,27 +1896,17 @@ static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
{ {
struct rbd_img_request *img_request = obj_request->img_request; struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req; struct ceph_osd_request *osd_req = obj_request->osd_req;
u64 snap_id;
rbd_assert(osd_req != NULL);
snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP; if (img_request)
ceph_osdc_build_request(osd_req, obj_request->offset, osd_req->r_snapid = img_request->snap_id;
NULL, snap_id, NULL);
} }
static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{ {
struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req; struct ceph_osd_request *osd_req = obj_request->osd_req;
struct ceph_snap_context *snapc;
struct timespec mtime = CURRENT_TIME;
rbd_assert(osd_req != NULL);
snapc = img_request ? img_request->snapc : NULL; osd_req->r_mtime = CURRENT_TIME;
ceph_osdc_build_request(osd_req, obj_request->offset, osd_req->r_data_offset = obj_request->offset;
snapc, CEPH_NOSNAP, &mtime);
} }
/* /*
......
...@@ -376,8 +376,6 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) ...@@ -376,8 +376,6 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
req->r_callback = finish_read; req->r_callback = finish_read;
req->r_inode = inode; req->r_inode = inode;
ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
ret = ceph_osdc_start_request(osdc, req, false); ret = ceph_osdc_start_request(osdc, req, false);
if (ret < 0) if (ret < 0)
...@@ -1063,10 +1061,7 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -1063,10 +1061,7 @@ static int ceph_writepages_start(struct address_space *mapping,
pages = NULL; pages = NULL;
} }
vino = ceph_vino(inode); req->r_mtime = inode->i_mtime;
ceph_osdc_build_request(req, offset, snapc, vino.snap,
&inode->i_mtime);
rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
BUG_ON(rc); BUG_ON(rc);
req = NULL; req = NULL;
...@@ -1614,7 +1609,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) ...@@ -1614,7 +1609,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
goto out; goto out;
} }
ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime); req->r_mtime = inode->i_mtime;
err = ceph_osdc_start_request(&fsc->client->osdc, req, false); err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err) if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req); err = ceph_osdc_wait_request(&fsc->client->osdc, req);
...@@ -1657,7 +1652,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) ...@@ -1657,7 +1652,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
goto out_put; goto out_put;
} }
ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime); req->r_mtime = inode->i_mtime;
err = ceph_osdc_start_request(&fsc->client->osdc, req, false); err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err) if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req); err = ceph_osdc_wait_request(&fsc->client->osdc, req);
...@@ -1790,12 +1785,9 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool) ...@@ -1790,12 +1785,9 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
0, false, true); 0, false, true);
ceph_osdc_build_request(rd_req, 0, NULL, CEPH_NOSNAP,
&ci->vfs_inode.i_mtime);
err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
ceph_osdc_build_request(wr_req, 0, NULL, CEPH_NOSNAP, wr_req->r_mtime = ci->vfs_inode.i_mtime;
&ci->vfs_inode.i_mtime);
err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
if (!err) if (!err)
......
...@@ -727,8 +727,8 @@ static void ceph_aio_retry_work(struct work_struct *work) ...@@ -727,8 +727,8 @@ static void ceph_aio_retry_work(struct work_struct *work)
req->r_ops[0] = orig_req->r_ops[0]; req->r_ops[0] = orig_req->r_ops[0];
osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
ceph_osdc_build_request(req, req->r_ops[0].extent.offset, req->r_mtime = aio_req->mtime;
snapc, CEPH_NOSNAP, &aio_req->mtime); req->r_data_offset = req->r_ops[0].extent.offset;
ceph_osdc_put_request(orig_req); ceph_osdc_put_request(orig_req);
...@@ -882,14 +882,12 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -882,14 +882,12 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
(pos+len) | (PAGE_SIZE - 1)); (pos+len) | (PAGE_SIZE - 1));
osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
req->r_mtime = mtime;
} }
osd_req_op_extent_osd_data_pages(req, 0, pages, len, start, osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
false, false); false, false);
ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
if (aio_req) { if (aio_req) {
aio_req->total_len += len; aio_req->total_len += len;
aio_req->num_reqs++; aio_req->num_reqs++;
...@@ -1074,9 +1072,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, ...@@ -1074,9 +1072,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
false, true); false, true);
/* BUG_ON(vino.snap != CEPH_NOSNAP); */ req->r_mtime = mtime;
ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!ret) if (!ret)
ret = ceph_osdc_wait_request(&fsc->client->osdc, req); ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
...@@ -1532,9 +1528,7 @@ static int ceph_zero_partial_object(struct inode *inode, ...@@ -1532,9 +1528,7 @@ static int ceph_zero_partial_object(struct inode *inode,
goto out; goto out;
} }
ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap, req->r_mtime = inode->i_mtime;
&inode->i_mtime);
ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!ret) { if (!ret) {
ret = ceph_osdc_wait_request(&fsc->client->osdc, req); ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
......
...@@ -104,7 +104,7 @@ struct ceph_osd_req_op { ...@@ -104,7 +104,7 @@ struct ceph_osd_req_op {
struct ceph_osd_data response_data; struct ceph_osd_data response_data;
__u8 class_len; __u8 class_len;
__u8 method_len; __u8 method_len;
__u8 argc; u32 indata_len;
} cls; } cls;
struct { struct {
u64 cookie; u64 cookie;
...@@ -162,14 +162,6 @@ struct ceph_osd_request { ...@@ -162,14 +162,6 @@ struct ceph_osd_request {
/* request osd ops array */ /* request osd ops array */
unsigned int r_num_ops; unsigned int r_num_ops;
/* these are updated on each send */
__le32 *r_request_osdmap_epoch;
__le32 *r_request_flags;
__le64 *r_request_pool;
void *r_request_pgid;
__le32 *r_request_attempts;
struct ceph_eversion *r_request_reassert_version;
int r_result; int r_result;
int r_got_reply; int r_got_reply;
int r_linger; int r_linger;
...@@ -180,16 +172,22 @@ struct ceph_osd_request { ...@@ -180,16 +172,22 @@ struct ceph_osd_request {
struct completion r_completion, r_safe_completion; struct completion r_completion, r_safe_completion;
ceph_osdc_callback_t r_callback; ceph_osdc_callback_t r_callback;
ceph_osdc_unsafe_callback_t r_unsafe_callback; ceph_osdc_unsafe_callback_t r_unsafe_callback;
struct ceph_eversion r_reassert_version;
struct list_head r_unsafe_item; struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */ struct inode *r_inode; /* for use by callbacks */
void *r_priv; /* ditto */ void *r_priv; /* ditto */
u64 r_snapid; /* set by submitter */
unsigned long r_stamp; /* send OR check time */ u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */
struct ceph_snap_context *r_snapc; /* for writes */
struct timespec r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
struct ceph_snap_context *r_snapc; /* snap context for writes */ /* internal */
unsigned long r_stamp; /* jiffies, send or check time */
int r_attempts;
struct ceph_eversion r_replay_version; /* aka reassert_version */
u32 r_last_force_resend;
struct ceph_osd_req_op r_ops[]; struct ceph_osd_req_op r_ops[];
}; };
...@@ -334,11 +332,6 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client * ...@@ -334,11 +332,6 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *
gfp_t gfp_flags); gfp_t gfp_flags);
int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp); int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp);
extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
struct ceph_snap_context *snapc,
u64 snap_id,
struct timespec *mtime);
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout, struct ceph_file_layout *layout,
struct ceph_vino vino, struct ceph_vino vino,
......
...@@ -394,6 +394,13 @@ enum { ...@@ -394,6 +394,13 @@ enum {
CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */ CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */
CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */ CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */
CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */ CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */
CEPH_OSD_FLAG_MAP_SNAP_CLONE = 0x80000, /* map snap direct to clone id */
CEPH_OSD_FLAG_ENFORCE_SNAPC = 0x100000, /* use snapc provided even if
pool uses pool snaps */
CEPH_OSD_FLAG_REDIRECTED = 0x200000, /* op has been redirected */
CEPH_OSD_FLAG_KNOWN_REDIR = 0x400000, /* redirect bit is authoritative */
CEPH_OSD_FLAG_FULL_TRY = 0x800000, /* try op despite full flag */
CEPH_OSD_FLAG_FULL_FORCE = 0x1000000, /* force op despite full flag */
}; };
enum { enum {
......
...@@ -145,6 +145,43 @@ static int monc_show(struct seq_file *s, void *p) ...@@ -145,6 +145,43 @@ static int monc_show(struct seq_file *s, void *p)
return 0; return 0;
} }
static void dump_target(struct seq_file *s, struct ceph_osd_request_target *t)
{
int i;
seq_printf(s, "osd%d\t%llu.%x\t[", t->osd, t->pgid.pool, t->pgid.seed);
for (i = 0; i < t->up.size; i++)
seq_printf(s, "%s%d", (!i ? "" : ","), t->up.osds[i]);
seq_printf(s, "]/%d\t[", t->up.primary);
for (i = 0; i < t->acting.size; i++)
seq_printf(s, "%s%d", (!i ? "" : ","), t->acting.osds[i]);
seq_printf(s, "]/%d\t%*pE\t0x%x", t->acting.primary,
t->target_oid.name_len, t->target_oid.name, t->flags);
if (t->paused)
seq_puts(s, "\tP");
}
static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
{
int i;
seq_printf(s, "%llu\t", req->r_tid);
dump_target(s, &req->r_t);
seq_printf(s, "\t%d\t%u'%llu", req->r_attempts,
le32_to_cpu(req->r_replay_version.epoch),
le64_to_cpu(req->r_replay_version.version));
for (i = 0; i < req->r_num_ops; i++) {
struct ceph_osd_req_op *op = &req->r_ops[i];
seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
ceph_osd_op_name(op->op));
}
seq_putc(s, '\n');
}
static int osdc_show(struct seq_file *s, void *pp) static int osdc_show(struct seq_file *s, void *pp)
{ {
struct ceph_client *client = s->private; struct ceph_client *client = s->private;
...@@ -154,32 +191,10 @@ static int osdc_show(struct seq_file *s, void *pp) ...@@ -154,32 +191,10 @@ static int osdc_show(struct seq_file *s, void *pp)
mutex_lock(&osdc->request_mutex); mutex_lock(&osdc->request_mutex);
for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
struct ceph_osd_request *req; struct ceph_osd_request *req;
unsigned int i;
int opcode;
req = rb_entry(p, struct ceph_osd_request, r_node); req = rb_entry(p, struct ceph_osd_request, r_node);
seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid, dump_request(s, req);
req->r_osd ? req->r_osd->o_osd : -1,
req->r_t.pgid.pool, req->r_t.pgid.seed);
seq_printf(s, "%*pE", req->r_base_oid.name_len,
req->r_base_oid.name);
if (req->r_reassert_version.epoch)
seq_printf(s, "\t%u'%llu",
(unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
le64_to_cpu(req->r_reassert_version.version));
else
seq_printf(s, "\t");
for (i = 0; i < req->r_num_ops; i++) {
opcode = req->r_ops[i].op;
seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
ceph_osd_op_name(opcode));
}
seq_printf(s, "\n");
} }
mutex_unlock(&osdc->request_mutex); mutex_unlock(&osdc->request_mutex);
return 0; return 0;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment