Commit dbad185d authored by Sage Weil's avatar Sage Weil

ceph: drop src address(es) from message header [new protocol feature]

The CEPH_FEATURE_NOSRCADDR protocol feature avoids putting the full source
address in each message header (twice).  This patch switches the client to
the new scheme, and _requires_ this feature on the server.  The server
will support both the old and new schemes.  That means an old client will
work with a new server, but a new client will not work with an old server.
Signed-off-by: default avatarSage Weil <sage@newdream.net>
parent a5ee751c
......@@ -53,8 +53,17 @@
/*
* feature bits
*/
#define CEPH_FEATURE_SUPPORTED 0
#define CEPH_FEATURE_REQUIRED 0
#define CEPH_FEATURE_UID 1
#define CEPH_FEATURE_NOSRCADDR 2
#define CEPH_FEATURE_SUPPORTED_MON CEPH_FEATURE_UID|CEPH_FEATURE_NOSRCADDR
#define CEPH_FEATURE_REQUIRED_MON CEPH_FEATURE_UID
#define CEPH_FEATURE_SUPPORTED_MDS CEPH_FEATURE_UID|CEPH_FEATURE_NOSRCADDR
#define CEPH_FEATURE_REQUIRED_MDS CEPH_FEATURE_UID
#define CEPH_FEATURE_SUPPORTED_OSD CEPH_FEATURE_UID|CEPH_FEATURE_NOSRCADDR
#define CEPH_FEATURE_REQUIRED_OSD CEPH_FEATURE_UID
#define CEPH_FEATURE_SUPPORTED_CLIENT CEPH_FEATURE_NOSRCADDR
#define CEPH_FEATURE_REQUIRED_CLIENT CEPH_FEATURE_NOSRCADDR
/*
......
......@@ -663,7 +663,7 @@ static void prepare_write_connect(struct ceph_messenger *msgr,
dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
con->connect_seq, global_seq, proto);
con->out_connect.features = CEPH_FEATURE_SUPPORTED;
con->out_connect.features = CEPH_FEATURE_SUPPORTED_CLIENT;
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
con->out_connect.global_seq = cpu_to_le32(global_seq);
......@@ -1126,8 +1126,8 @@ static void fail_protocol(struct ceph_connection *con)
static int process_connect(struct ceph_connection *con)
{
u64 sup_feat = CEPH_FEATURE_SUPPORTED;
u64 req_feat = CEPH_FEATURE_REQUIRED;
u64 sup_feat = CEPH_FEATURE_SUPPORTED_CLIENT;
u64 req_feat = CEPH_FEATURE_REQUIRED_CLIENT;
u64 server_feat = le64_to_cpu(con->in_reply.features);
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
......@@ -1514,14 +1514,14 @@ static void process_message(struct ceph_connection *con)
/* if first message, set peer_name */
if (con->peer_name.type == 0)
con->peer_name = msg->hdr.src.name;
con->peer_name = msg->hdr.src;
con->in_seq++;
mutex_unlock(&con->mutex);
dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
msg, le64_to_cpu(msg->hdr.seq),
ENTITY_NAME(msg->hdr.src.name),
ENTITY_NAME(msg->hdr.src),
le16_to_cpu(msg->hdr.type),
ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
le32_to_cpu(msg->hdr.front_len),
......@@ -1987,9 +1987,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
}
/* set src+dst */
msg->hdr.src.name = con->msgr->inst.name;
msg->hdr.src.addr = con->msgr->my_enc_addr;
msg->hdr.orig_src = msg->hdr.src;
msg->hdr.src = con->msgr->inst.name;
BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
......
......@@ -120,7 +120,7 @@ struct ceph_msg_connect_reply {
/*
* message header
*/
struct ceph_msg_header {
struct ceph_msg_header_old {
__le64 seq; /* message seq# for this session */
__le64 tid; /* transaction id */
__le16 type; /* message type */
......@@ -138,6 +138,24 @@ struct ceph_msg_header {
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
struct ceph_msg_header {
__le64 seq; /* message seq# for this session */
__le64 tid; /* transaction id */
__le16 type; /* message type */
__le16 priority; /* priority. higher value == higher priority */
__le16 version; /* version of message encoding */
__le32 front_len; /* bytes in main payload */
__le32 middle_len;/* bytes in middle payload */
__le32 data_len; /* bytes of data payload */
__le16 data_off; /* sender: include full offset;
receiver: mask against ~PAGE_MASK */
struct ceph_entity_name src;
__le32 reserved;
__le32 crc; /* header crc32c */
} __attribute__ ((packed));
#define CEPH_MSG_PRIO_LOW 64
#define CEPH_MSG_PRIO_DEFAULT 127
#define CEPH_MSG_PRIO_HIGH 196
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment