Commit 46d55fce authored by Sergey Matyukevich's avatar Sergey Matyukevich Committed by Kalle Valo

qtnfmac: send EAPOL frames via control path

Send EAPOL frames via control path so they can be treated in a different
way rather than normal data frames. In this case EAPOLs are sent with
higher priority and with disabled aggregation and encryption. Besides,
all devices benefit from sending EAPOL frames via high priority path,
so move the functionality from chip specific to common code.
Signed-off-by: default avatarIgor Mitsyanko <igor.mitsyanko.os@quantenna.com>
Signed-off-by: default avatarSergey Matyukevich <sergey.matyukevich.os@quantenna.com>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
parent 97aef03c
......@@ -67,6 +67,14 @@ static int qtnf_netdev_close(struct net_device *ndev)
return 0;
}
static void qtnf_packet_send_hi_pri(struct sk_buff *skb)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
skb_queue_tail(&vif->high_pri_tx_queue, skb);
queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
}
/* Netdev handler for data transmission.
*/
static netdev_tx_t
......@@ -107,6 +115,12 @@ qtnf_netdev_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* tx path is enabled: reset vif timeout */
vif->cons_tx_timeout_cnt = 0;
if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
qtnf_packet_send_hi_pri(skb);
qtnf_update_tx_stats(ndev, skb);
return NETDEV_TX_OK;
}
return qtnf_bus_data_tx(mac->bus, skb);
}
......@@ -841,15 +855,6 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
void qtnf_packet_send_hi_pri(struct sk_buff *skb)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
skb_queue_tail(&vif->high_pri_tx_queue, skb);
queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
}
EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri);
struct dentry *qtnf_get_debugfs_dir(void)
{
return qtnf_debugfs_dir;
......
......@@ -152,7 +152,6 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
void qtnf_packet_send_hi_pri(struct sk_buff *skb);
struct dentry *qtnf_get_debugfs_dir(void);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
......
......@@ -357,7 +357,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->pcie_irq_count = 0;
pcie_priv->tx_reclaim_done = 0;
pcie_priv->tx_reclaim_req = 0;
pcie_priv->tx_eapol = 0;
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {
......
......@@ -63,7 +63,6 @@ struct qtnf_pcie_bus_priv {
u32 tx_done_count;
u32 tx_reclaim_done;
u32 tx_reclaim_req;
u32 tx_eapol;
u8 msi_enabled;
u8 tx_stopped;
......
......@@ -509,13 +509,6 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
int len;
int i;
if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
qtnf_packet_send_hi_pri(skb);
qtnf_update_tx_stats(skb->dev, skb);
priv->tx_eapol++;
return NETDEV_TX_OK;
}
spin_lock_irqsave(&priv->tx_lock, flags);
if (!qtnf_tx_queue_ready(ts)) {
......@@ -779,7 +772,6 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment