Commit fe24371d authored by Alexander Aring's avatar Alexander Aring Committed by Marcel Holtmann

mac802154: tx: remove kmalloc in xmit hotpath

This patch removes the kmalloc allocation for workqueue data. This patch
replaces the kmalloc and uses the control block of skb. The control block
has enough space and isn't use by any other layer in this case.
Signed-off-by: default avatarAlexander Aring <alex.aring@gmail.com>
Signed-off-by: default avatarMarcel Holtmann <marcel@holtmann.org>
parent 50c6fb99
......@@ -30,7 +30,7 @@
/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
* packets through the workqueue.
*/
struct xmit_work {
struct wpan_xmit_cb {
struct sk_buff *skb;
struct work_struct work;
struct ieee802154_local *local;
......@@ -38,50 +38,54 @@ struct xmit_work {
u8 page;
};
static inline struct wpan_xmit_cb *wpan_xmit_cb(const struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct wpan_xmit_cb));
return (struct wpan_xmit_cb *)skb->cb;
}
static void mac802154_xmit_worker(struct work_struct *work)
{
struct xmit_work *xw = container_of(work, struct xmit_work, work);
struct wpan_xmit_cb *cb = container_of(work, struct wpan_xmit_cb, work);
struct ieee802154_sub_if_data *sdata;
int res;
mutex_lock(&xw->local->phy->pib_lock);
if (xw->local->phy->current_channel != xw->chan ||
xw->local->phy->current_page != xw->page) {
res = xw->local->ops->set_channel(&xw->local->hw,
xw->page,
xw->chan);
mutex_lock(&cb->local->phy->pib_lock);
if (cb->local->phy->current_channel != cb->chan ||
cb->local->phy->current_page != cb->page) {
res = cb->local->ops->set_channel(&cb->local->hw, cb->page,
cb->chan);
if (res) {
pr_debug("set_channel failed\n");
goto out;
}
xw->local->phy->current_channel = xw->chan;
xw->local->phy->current_page = xw->page;
cb->local->phy->current_channel = cb->chan;
cb->local->phy->current_page = cb->page;
}
res = xw->local->ops->xmit(&xw->local->hw, xw->skb);
res = cb->local->ops->xmit(&cb->local->hw, cb->skb);
if (res)
pr_debug("transmission failed\n");
out:
mutex_unlock(&xw->local->phy->pib_lock);
mutex_unlock(&cb->local->phy->pib_lock);
/* Restart the netif queue on each sub_if_data object. */
rcu_read_lock();
list_for_each_entry_rcu(sdata, &xw->local->interfaces, list)
list_for_each_entry_rcu(sdata, &cb->local->interfaces, list)
netif_wake_queue(sdata->dev);
rcu_read_unlock();
dev_kfree_skb(xw->skb);
kfree(xw);
dev_kfree_skb(cb->skb);
}
static netdev_tx_t mac802154_tx(struct ieee802154_local *local,
struct sk_buff *skb, u8 page, u8 chan)
{
struct xmit_work *work;
struct ieee802154_sub_if_data *sdata;
struct wpan_xmit_cb *cb = wpan_xmit_cb(skb);
if (!(local->phy->channels_supported[page] & (1 << chan))) {
WARN_ON(1);
......@@ -101,25 +105,19 @@ static netdev_tx_t mac802154_tx(struct ieee802154_local *local,
if (skb_cow_head(skb, local->hw.extra_tx_headroom))
goto err_tx;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
kfree_skb(skb);
return NETDEV_TX_BUSY;
}
/* Stop the netif queue on each sub_if_data object. */
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
netif_stop_queue(sdata->dev);
rcu_read_unlock();
INIT_WORK(&work->work, mac802154_xmit_worker);
work->skb = skb;
work->local = local;
work->page = page;
work->chan = chan;
INIT_WORK(&cb->work, mac802154_xmit_worker);
cb->skb = skb;
cb->local = local;
cb->page = page;
cb->chan = chan;
queue_work(local->workqueue, &work->work);
queue_work(local->workqueue, &cb->work);
return NETDEV_TX_OK;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment