Commit 04f482fa authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller

connector: convert to synchronous netlink message processing

Commits 01a16b21 (netlink: kill eff_cap from struct netlink_skb_parms)
and c53fa1ed (netlink: kill loginuid/sessionid/sid members from struct
netlink_skb_parms) removed some members from struct netlink_skb_parms
that depend on the current context, all netlink users are now required
to do synchronous message processing.

connector however queues received messages and processes them in a work
queue, which is not valid anymore. This patch converts connector to do
synchronous message processing by invoking the registered callback handler
directly from the netlink receive function.

In order to avoid invoking the callback with connector locks held, a
reference count is added to struct cn_callback_entry, the reference
is taken when finding a matching callback entry on the device's queue_list
and released after the callback handler has been invoked.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Acked-by: default avatarEvgeniy Polyakov <zbr@ioremap.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e2666f84
...@@ -31,24 +31,9 @@ ...@@ -31,24 +31,9 @@
#include <linux/connector.h> #include <linux/connector.h>
#include <linux/delay.h> #include <linux/delay.h>
void cn_queue_wrapper(struct work_struct *work)
{
struct cn_callback_entry *cbq =
container_of(work, struct cn_callback_entry, work);
struct cn_callback_data *d = &cbq->data;
struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(d->skb));
struct netlink_skb_parms *nsp = &NETLINK_CB(d->skb);
d->callback(msg, nsp);
kfree_skb(d->skb);
d->skb = NULL;
kfree(d->free);
}
static struct cn_callback_entry * static struct cn_callback_entry *
cn_queue_alloc_callback_entry(const char *name, struct cb_id *id, cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *)) void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
{ {
struct cn_callback_entry *cbq; struct cn_callback_entry *cbq;
...@@ -59,17 +44,23 @@ cn_queue_alloc_callback_entry(const char *name, struct cb_id *id, ...@@ -59,17 +44,23 @@ cn_queue_alloc_callback_entry(const char *name, struct cb_id *id,
return NULL; return NULL;
} }
atomic_set(&cbq->refcnt, 1);
atomic_inc(&dev->refcnt);
cbq->pdev = dev;
snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name); snprintf(cbq->id.name, sizeof(cbq->id.name), "%s", name);
memcpy(&cbq->id.id, id, sizeof(struct cb_id)); memcpy(&cbq->id.id, id, sizeof(struct cb_id));
cbq->data.callback = callback; cbq->callback = callback;
INIT_WORK(&cbq->work, &cn_queue_wrapper);
return cbq; return cbq;
} }
static void cn_queue_free_callback(struct cn_callback_entry *cbq) void cn_queue_release_callback(struct cn_callback_entry *cbq)
{ {
flush_workqueue(cbq->pdev->cn_queue); if (!atomic_dec_and_test(&cbq->refcnt))
return;
atomic_dec(&cbq->pdev->refcnt);
kfree(cbq); kfree(cbq);
} }
...@@ -85,13 +76,10 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, ...@@ -85,13 +76,10 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
struct cn_callback_entry *cbq, *__cbq; struct cn_callback_entry *cbq, *__cbq;
int found = 0; int found = 0;
cbq = cn_queue_alloc_callback_entry(name, id, callback); cbq = cn_queue_alloc_callback_entry(dev, name, id, callback);
if (!cbq) if (!cbq)
return -ENOMEM; return -ENOMEM;
atomic_inc(&dev->refcnt);
cbq->pdev = dev;
spin_lock_bh(&dev->queue_lock); spin_lock_bh(&dev->queue_lock);
list_for_each_entry(__cbq, &dev->queue_list, callback_entry) { list_for_each_entry(__cbq, &dev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, id)) { if (cn_cb_equal(&__cbq->id.id, id)) {
...@@ -104,8 +92,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, ...@@ -104,8 +92,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
spin_unlock_bh(&dev->queue_lock); spin_unlock_bh(&dev->queue_lock);
if (found) { if (found) {
cn_queue_free_callback(cbq); cn_queue_release_callback(cbq);
atomic_dec(&dev->refcnt);
return -EINVAL; return -EINVAL;
} }
...@@ -130,10 +117,8 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id) ...@@ -130,10 +117,8 @@ void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
} }
spin_unlock_bh(&dev->queue_lock); spin_unlock_bh(&dev->queue_lock);
if (found) { if (found)
cn_queue_free_callback(cbq); cn_queue_release_callback(cbq);
atomic_dec(&dev->refcnt);
}
} }
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
...@@ -151,12 +136,6 @@ struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls) ...@@ -151,12 +136,6 @@ struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *nls)
dev->nls = nls; dev->nls = nls;
dev->cn_queue = alloc_ordered_workqueue(dev->name, 0);
if (!dev->cn_queue) {
kfree(dev);
return NULL;
}
return dev; return dev;
} }
...@@ -164,9 +143,6 @@ void cn_queue_free_dev(struct cn_queue_dev *dev) ...@@ -164,9 +143,6 @@ void cn_queue_free_dev(struct cn_queue_dev *dev)
{ {
struct cn_callback_entry *cbq, *n; struct cn_callback_entry *cbq, *n;
flush_workqueue(dev->cn_queue);
destroy_workqueue(dev->cn_queue);
spin_lock_bh(&dev->queue_lock); spin_lock_bh(&dev->queue_lock);
list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry) list_for_each_entry_safe(cbq, n, &dev->queue_list, callback_entry)
list_del(&cbq->callback_entry); list_del(&cbq->callback_entry);
......
...@@ -122,51 +122,28 @@ EXPORT_SYMBOL_GPL(cn_netlink_send); ...@@ -122,51 +122,28 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
*/ */
static int cn_call_callback(struct sk_buff *skb) static int cn_call_callback(struct sk_buff *skb)
{ {
struct cn_callback_entry *__cbq, *__new_cbq; struct cn_callback_entry *i, *cbq = NULL;
struct cn_dev *dev = &cdev; struct cn_dev *dev = &cdev;
struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb)); struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb));
struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
int err = -ENODEV; int err = -ENODEV;
spin_lock_bh(&dev->cbdev->queue_lock); spin_lock_bh(&dev->cbdev->queue_lock);
list_for_each_entry(__cbq, &dev->cbdev->queue_list, callback_entry) { list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
if (cn_cb_equal(&__cbq->id.id, &msg->id)) { if (cn_cb_equal(&i->id.id, &msg->id)) {
if (likely(!work_pending(&__cbq->work) && atomic_inc(&i->refcnt);
__cbq->data.skb == NULL)) { cbq = i;
__cbq->data.skb = skb;
if (queue_work(dev->cbdev->cn_queue,
&__cbq->work))
err = 0;
else
err = -EINVAL;
} else {
struct cn_callback_data *d;
err = -ENOMEM;
__new_cbq = kzalloc(sizeof(struct cn_callback_entry), GFP_ATOMIC);
if (__new_cbq) {
d = &__new_cbq->data;
d->skb = skb;
d->callback = __cbq->data.callback;
d->free = __new_cbq;
INIT_WORK(&__new_cbq->work,
&cn_queue_wrapper);
if (queue_work(dev->cbdev->cn_queue,
&__new_cbq->work))
err = 0;
else {
kfree(__new_cbq);
err = -EINVAL;
}
}
}
break; break;
} }
} }
spin_unlock_bh(&dev->cbdev->queue_lock); spin_unlock_bh(&dev->cbdev->queue_lock);
if (cbq != NULL) {
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
}
return err; return err;
} }
......
...@@ -88,8 +88,6 @@ struct cn_queue_dev { ...@@ -88,8 +88,6 @@ struct cn_queue_dev {
atomic_t refcnt; atomic_t refcnt;
unsigned char name[CN_CBQ_NAMELEN]; unsigned char name[CN_CBQ_NAMELEN];
struct workqueue_struct *cn_queue;
struct list_head queue_list; struct list_head queue_list;
spinlock_t queue_lock; spinlock_t queue_lock;
...@@ -101,20 +99,13 @@ struct cn_callback_id { ...@@ -101,20 +99,13 @@ struct cn_callback_id {
struct cb_id id; struct cb_id id;
}; };
struct cn_callback_data {
struct sk_buff *skb;
void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
void *free;
};
struct cn_callback_entry { struct cn_callback_entry {
struct list_head callback_entry; struct list_head callback_entry;
struct work_struct work; atomic_t refcnt;
struct cn_queue_dev *pdev; struct cn_queue_dev *pdev;
struct cn_callback_id id; struct cn_callback_id id;
struct cn_callback_data data; void (*callback) (struct cn_msg *, struct netlink_skb_parms *);
u32 seq, group; u32 seq, group;
}; };
...@@ -138,13 +129,12 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name, ...@@ -138,13 +129,12 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
struct cb_id *id, struct cb_id *id,
void (*callback)(struct cn_msg *, struct netlink_skb_parms *)); void (*callback)(struct cn_msg *, struct netlink_skb_parms *));
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id); void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
void cn_queue_release_callback(struct cn_callback_entry *);
struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *); struct cn_queue_dev *cn_queue_alloc_dev(const char *name, struct sock *);
void cn_queue_free_dev(struct cn_queue_dev *dev); void cn_queue_free_dev(struct cn_queue_dev *dev);
int cn_cb_equal(struct cb_id *, struct cb_id *); int cn_cb_equal(struct cb_id *, struct cb_id *);
void cn_queue_wrapper(struct work_struct *work);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __CONNECTOR_H */ #endif /* __CONNECTOR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment