Commit e04df98a authored by Bjorn Andersson's avatar Bjorn Andersson Committed by David S. Miller

net: qrtr: Remove receive worker

Rather than enqueuing messages and scheduling a worker to deliver them
to the individual sockets we can now, thanks to the previous work, move
this directly into the endpoint callback.

This saves us a context switch per incoming message and removes the
possibility of an opportunistic suspend to happen between the message is
coming from the endpoint until it ends up in the socket's receive
buffer.
Signed-off-by: default avatarBjorn Andersson <bjorn.andersson@linaro.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f16a4b26
...@@ -119,7 +119,6 @@ static DEFINE_MUTEX(qrtr_port_lock); ...@@ -119,7 +119,6 @@ static DEFINE_MUTEX(qrtr_port_lock);
* @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port * @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port
* @qrtr_tx_lock: lock for qrtr_tx_flow inserts * @qrtr_tx_lock: lock for qrtr_tx_flow inserts
* @rx_queue: receive queue * @rx_queue: receive queue
* @work: scheduled work struct for recv work
* @item: list item for broadcast list * @item: list item for broadcast list
*/ */
struct qrtr_node { struct qrtr_node {
...@@ -132,7 +131,6 @@ struct qrtr_node { ...@@ -132,7 +131,6 @@ struct qrtr_node {
struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */ struct mutex qrtr_tx_lock; /* for qrtr_tx_flow */
struct sk_buff_head rx_queue; struct sk_buff_head rx_queue;
struct work_struct work;
struct list_head item; struct list_head item;
}; };
...@@ -157,6 +155,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb, ...@@ -157,6 +155,8 @@ static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb, static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
int type, struct sockaddr_qrtr *from, int type, struct sockaddr_qrtr *from,
struct sockaddr_qrtr *to); struct sockaddr_qrtr *to);
static struct qrtr_sock *qrtr_port_lookup(int port);
static void qrtr_port_put(struct qrtr_sock *ipc);
/* Release node resources and free the node. /* Release node resources and free the node.
* *
...@@ -178,7 +178,6 @@ static void __qrtr_node_release(struct kref *kref) ...@@ -178,7 +178,6 @@ static void __qrtr_node_release(struct kref *kref)
list_del(&node->item); list_del(&node->item);
mutex_unlock(&qrtr_node_lock); mutex_unlock(&qrtr_node_lock);
cancel_work_sync(&node->work);
skb_queue_purge(&node->rx_queue); skb_queue_purge(&node->rx_queue);
/* Free tx flow counters */ /* Free tx flow counters */
...@@ -422,6 +421,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) ...@@ -422,6 +421,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
struct qrtr_node *node = ep->node; struct qrtr_node *node = ep->node;
const struct qrtr_hdr_v1 *v1; const struct qrtr_hdr_v1 *v1;
const struct qrtr_hdr_v2 *v2; const struct qrtr_hdr_v2 *v2;
struct qrtr_sock *ipc;
struct sk_buff *skb; struct sk_buff *skb;
struct qrtr_cb *cb; struct qrtr_cb *cb;
unsigned int size; unsigned int size;
...@@ -486,8 +486,20 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len) ...@@ -486,8 +486,20 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
skb_put_data(skb, data + hdrlen, size); skb_put_data(skb, data + hdrlen, size);
skb_queue_tail(&node->rx_queue, skb); qrtr_node_assign(node, cb->src_node);
schedule_work(&node->work);
if (cb->type == QRTR_TYPE_RESUME_TX) {
qrtr_tx_resume(node, skb);
} else {
ipc = qrtr_port_lookup(cb->dst_port);
if (!ipc)
goto err;
if (sock_queue_rcv_skb(&ipc->sk, skb))
goto err;
qrtr_port_put(ipc);
}
return 0; return 0;
...@@ -522,40 +534,6 @@ static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt) ...@@ -522,40 +534,6 @@ static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
return skb; return skb;
} }
static struct qrtr_sock *qrtr_port_lookup(int port);
static void qrtr_port_put(struct qrtr_sock *ipc);
/* Handle and route a received packet.
*
* This will auto-reply with resume-tx packet as necessary.
*/
static void qrtr_node_rx_work(struct work_struct *work)
{
struct qrtr_node *node = container_of(work, struct qrtr_node, work);
struct sk_buff *skb;
while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
struct qrtr_sock *ipc;
struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
qrtr_node_assign(node, cb->src_node);
if (cb->type == QRTR_TYPE_RESUME_TX) {
qrtr_tx_resume(node, skb);
} else {
ipc = qrtr_port_lookup(cb->dst_port);
if (!ipc) {
kfree_skb(skb);
} else {
if (sock_queue_rcv_skb(&ipc->sk, skb))
kfree_skb(skb);
qrtr_port_put(ipc);
}
}
}
}
/** /**
* qrtr_endpoint_register() - register a new endpoint * qrtr_endpoint_register() - register a new endpoint
* @ep: endpoint to register * @ep: endpoint to register
...@@ -575,7 +553,6 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid) ...@@ -575,7 +553,6 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
if (!node) if (!node)
return -ENOMEM; return -ENOMEM;
INIT_WORK(&node->work, qrtr_node_rx_work);
kref_init(&node->ref); kref_init(&node->ref);
mutex_init(&node->ep_lock); mutex_init(&node->ep_lock);
skb_queue_head_init(&node->rx_queue); skb_queue_head_init(&node->rx_queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment