Commit 086a6f68 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Greg Kroah-Hartman

Drivers: hv: vss: switch to using the hvutil_device_state state machine

Switch to using the hvutil_device_state state machine from using kvp_transaction.active.

State transitions are:
-> HVUTIL_DEVICE_INIT when driver loads or on device release
-> HVUTIL_READY if the handshake was successful
-> HVUTIL_HOSTMSG_RECEIVED when there is a non-negotiation message from the host
-> HVUTIL_USERSPACE_REQ after we sent the message to the userspace daemon
   -> HVUTIL_USERSPACE_RECV after/if the userspace daemon has replied
-> HVUTIL_READY after we respond to the host
-> HVUTIL_DEVICE_DYING on driver unload

In hv_vss_onchannelcallback() process ICMSGTYPE_NEGOTIATE messages even when
the userspace daemon is disconnected, otherwise we can make the host think
we don't support VSS and disable the service completely.

Unfortunately there is no good way we can figure out that the userspace daemon
has died (unless we start treating all timeouts as such), add a protection
against processing new VSS_OP_REGISTER messages while being in the middle of a
transaction (HVUTIL_USERSPACE_REQ or HVUTIL_USERSPACE_RECV state).
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Tested-by: default avatarAlex Ng <alexng@microsoft.com>
Signed-off-by: default avatarK. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 97bf16cd
...@@ -33,16 +33,21 @@ ...@@ -33,16 +33,21 @@
#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000)) #define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
/* /*
* Global state maintained for transaction that is being processed. * Global state maintained for transaction that is being processed. For a class
* Note that only one transaction can be active at any point in time. * of integration services, including the "VSS service", the specified protocol
* is a "request/response" protocol which means that there can only be single
* outstanding transaction from the host at any given point in time. We use
* this to simplify memory management in this driver - we cache and process
* only one message at a time.
* *
* This state is set when we receive a request from the host; we * While the request/response protocol is guaranteed by the host, we further
* cleanup this state when the transaction is completed - when we respond * ensure this by serializing packet processing in this driver - we do not
* to the host with the key value. * read additional packets from the VMBUs until the current packet is fully
* handled.
*/ */
static struct { static struct {
bool active; /* transaction status - active or not */ int state; /* hvutil_device_state */
int recv_len; /* number of bytes received. */ int recv_len; /* number of bytes received. */
struct vmbus_channel *recv_channel; /* chn we got the request */ struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */ u64 recv_req_id; /* request ID. */
...@@ -75,6 +80,10 @@ static void vss_timeout_func(struct work_struct *dummy) ...@@ -75,6 +80,10 @@ static void vss_timeout_func(struct work_struct *dummy)
pr_warn("VSS: timeout waiting for daemon to reply\n"); pr_warn("VSS: timeout waiting for daemon to reply\n");
vss_respond_to_host(HV_E_FAIL); vss_respond_to_host(HV_E_FAIL);
/* Transaction is finished, reset the state. */
if (vss_transaction.state > HVUTIL_READY)
vss_transaction.state = HVUTIL_READY;
hv_poll_channel(vss_transaction.vss_context, hv_poll_channel(vss_transaction.vss_context,
hv_vss_onchannelcallback); hv_vss_onchannelcallback);
} }
...@@ -86,15 +95,32 @@ vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) ...@@ -86,15 +95,32 @@ vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
vss_msg = (struct hv_vss_msg *)msg->data; vss_msg = (struct hv_vss_msg *)msg->data;
if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER) { /*
* Don't process registration messages if we're in the middle of
* a transaction processing.
*/
if (vss_transaction.state > HVUTIL_READY &&
vss_msg->vss_hdr.operation == VSS_OP_REGISTER)
return;
if (vss_transaction.state == HVUTIL_DEVICE_INIT &&
vss_msg->vss_hdr.operation == VSS_OP_REGISTER) {
pr_info("VSS daemon registered\n"); pr_info("VSS daemon registered\n");
vss_transaction.active = false; vss_transaction.state = HVUTIL_READY;
} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
vss_transaction.state = HVUTIL_USERSPACE_RECV;
if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(vss_msg->error);
/* Transaction is finished, reset the state. */
vss_transaction.state = HVUTIL_READY;
hv_poll_channel(vss_transaction.vss_context,
hv_vss_onchannelcallback);
}
} else {
/* This is a spurious call! */
pr_warn("VSS: Transaction not active\n");
return;
} }
if (cancel_delayed_work_sync(&vss_timeout_work))
vss_respond_to_host(vss_msg->error);
hv_poll_channel(vss_transaction.vss_context,
hv_vss_onchannelcallback);
} }
...@@ -105,6 +131,10 @@ static void vss_send_op(struct work_struct *dummy) ...@@ -105,6 +131,10 @@ static void vss_send_op(struct work_struct *dummy)
struct cn_msg *msg; struct cn_msg *msg;
struct hv_vss_msg *vss_msg; struct hv_vss_msg *vss_msg;
/* The transaction state is wrong. */
if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED)
return;
msg = kzalloc(sizeof(*msg) + sizeof(*vss_msg), GFP_ATOMIC); msg = kzalloc(sizeof(*msg) + sizeof(*vss_msg), GFP_ATOMIC);
if (!msg) if (!msg)
return; return;
...@@ -117,12 +147,16 @@ static void vss_send_op(struct work_struct *dummy) ...@@ -117,12 +147,16 @@ static void vss_send_op(struct work_struct *dummy)
vss_msg->vss_hdr.operation = op; vss_msg->vss_hdr.operation = op;
msg->len = sizeof(struct hv_vss_msg); msg->len = sizeof(struct hv_vss_msg);
vss_transaction.state = HVUTIL_USERSPACE_REQ;
rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC); rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
if (rc) { if (rc) {
pr_warn("VSS: failed to communicate to the daemon: %d\n", rc); pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&vss_timeout_work)) if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(HV_E_FAIL); vss_respond_to_host(HV_E_FAIL);
vss_transaction.state = HVUTIL_READY;
}
} }
kfree(msg); kfree(msg);
return; return;
...@@ -140,17 +174,6 @@ vss_respond_to_host(int error) ...@@ -140,17 +174,6 @@ vss_respond_to_host(int error)
struct vmbus_channel *channel; struct vmbus_channel *channel;
u64 req_id; u64 req_id;
/*
* If a transaction is not active; log and return.
*/
if (!vss_transaction.active) {
/*
* This is a spurious call!
*/
pr_warn("VSS: Transaction not active\n");
return;
}
/* /*
* Copy the global state for completing the transaction. Note that * Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time. * only one transaction can be active at a time.
...@@ -159,7 +182,6 @@ vss_respond_to_host(int error) ...@@ -159,7 +182,6 @@ vss_respond_to_host(int error)
buf_len = vss_transaction.recv_len; buf_len = vss_transaction.recv_len;
channel = vss_transaction.recv_channel; channel = vss_transaction.recv_channel;
req_id = vss_transaction.recv_req_id; req_id = vss_transaction.recv_req_id;
vss_transaction.active = false;
icmsghdrp = (struct icmsg_hdr *) icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)]; &recv_buffer[sizeof(struct vmbuspipe_hdr)];
...@@ -196,7 +218,7 @@ void hv_vss_onchannelcallback(void *context) ...@@ -196,7 +218,7 @@ void hv_vss_onchannelcallback(void *context)
struct icmsg_hdr *icmsghdrp; struct icmsg_hdr *icmsghdrp;
struct icmsg_negotiate *negop = NULL; struct icmsg_negotiate *negop = NULL;
if (vss_transaction.active) { if (vss_transaction.state > HVUTIL_READY) {
/* /*
* We will defer processing this callback once * We will defer processing this callback once
* the current transaction is complete. * the current transaction is complete.
...@@ -230,7 +252,6 @@ void hv_vss_onchannelcallback(void *context) ...@@ -230,7 +252,6 @@ void hv_vss_onchannelcallback(void *context)
vss_transaction.recv_len = recvlen; vss_transaction.recv_len = recvlen;
vss_transaction.recv_channel = channel; vss_transaction.recv_channel = channel;
vss_transaction.recv_req_id = requestid; vss_transaction.recv_req_id = requestid;
vss_transaction.active = true;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg; vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
switch (vss_msg->vss_hdr.operation) { switch (vss_msg->vss_hdr.operation) {
...@@ -247,6 +268,12 @@ void hv_vss_onchannelcallback(void *context) ...@@ -247,6 +268,12 @@ void hv_vss_onchannelcallback(void *context)
*/ */
case VSS_OP_FREEZE: case VSS_OP_FREEZE:
case VSS_OP_THAW: case VSS_OP_THAW:
if (vss_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
vss_respond_to_host(HV_E_FAIL);
return;
}
vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
schedule_work(&vss_send_op_work); schedule_work(&vss_send_op_work);
schedule_delayed_work(&vss_timeout_work, schedule_delayed_work(&vss_timeout_work,
VSS_USERSPACE_TIMEOUT); VSS_USERSPACE_TIMEOUT);
...@@ -297,12 +324,14 @@ hv_vss_init(struct hv_util_service *srv) ...@@ -297,12 +324,14 @@ hv_vss_init(struct hv_util_service *srv)
* Defer processing channel callbacks until the daemon * Defer processing channel callbacks until the daemon
* has registered. * has registered.
*/ */
vss_transaction.active = true; vss_transaction.state = HVUTIL_DEVICE_INIT;
return 0; return 0;
} }
void hv_vss_deinit(void) void hv_vss_deinit(void)
{ {
vss_transaction.state = HVUTIL_DEVICE_DYING;
cn_del_callback(&vss_id); cn_del_callback(&vss_id);
cancel_delayed_work_sync(&vss_timeout_work); cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_send_op_work); cancel_work_sync(&vss_send_op_work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment