Commit 25d43f18 authored by Intiyaz Basha's avatar Intiyaz Basha Committed by David S. Miller

liquidio: moved update_txq_status to lio_core.c

Moving common update_txq_status to lio_core.c
Signed-off-by: default avatarIntiyaz Basha <intiyaz.basha@cavium.com>
Signed-off-by: default avatarFelix Manlunas <felix.manlunas@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e65a8ccb
...@@ -364,3 +364,36 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev) ...@@ -364,3 +364,36 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev)
destroy_workqueue(lio->rxq_status_wq.wq); destroy_workqueue(lio->rxq_status_wq.wq);
} }
} }
/* Runs in interrupt context. */
void lio_update_txq_status(struct octeon_device *oct, int iq_num)
{
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
struct net_device *netdev;
struct lio *lio;
netdev = oct->props[iq->ifidx].netdev;
/* This is needed because the first IQ does not have
* a netdev associated with it.
*/
if (!netdev)
return;
lio = GET_LIO(netdev);
if (netif_is_multiqueue(netdev)) {
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
lio->linfo.link.s.link_up &&
(!octnet_iq_is_full(oct, iq_num))) {
netif_wake_subqueue(netdev, iq->q_index);
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
tx_restart, 1);
}
} else if (netif_queue_stopped(netdev) &&
lio->linfo.link.s.link_up &&
(!octnet_iq_is_full(oct, lio->txq))) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
tx_restart, 1);
netif_wake_queue(netdev);
}
}
...@@ -906,39 +906,6 @@ static inline void update_link_status(struct net_device *netdev, ...@@ -906,39 +906,6 @@ static inline void update_link_status(struct net_device *netdev,
} }
} }
/* Runs in interrupt context. */
static void update_txq_status(struct octeon_device *oct, int iq_num)
{
struct net_device *netdev;
struct lio *lio;
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
netdev = oct->props[iq->ifidx].netdev;
/* This is needed because the first IQ does not have
* a netdev associated with it.
*/
if (!netdev)
return;
lio = GET_LIO(netdev);
if (netif_is_multiqueue(netdev)) {
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
lio->linfo.link.s.link_up &&
(!octnet_iq_is_full(oct, iq_num))) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
tx_restart, 1);
netif_wake_subqueue(netdev, iq->q_index);
}
} else if (netif_queue_stopped(netdev) &&
lio->linfo.link.s.link_up &&
(!octnet_iq_is_full(oct, lio->txq))) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
lio->txq, tx_restart, 1);
netif_wake_queue(netdev);
}
}
static static
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
{ {
...@@ -2518,7 +2485,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) ...@@ -2518,7 +2485,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
/* Update iq read-index rather than waiting for next interrupt. /* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false. * Return back if tx_done is false.
*/ */
update_txq_status(oct, iq_no); lio_update_txq_status(oct, iq_no);
} else { } else {
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
__func__, iq_no); __func__, iq_no);
......
...@@ -647,30 +647,6 @@ static void update_link_status(struct net_device *netdev, ...@@ -647,30 +647,6 @@ static void update_link_status(struct net_device *netdev,
} }
} }
static void update_txq_status(struct octeon_device *oct, int iq_num)
{
struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
struct net_device *netdev;
struct lio *lio;
netdev = oct->props[iq->ifidx].netdev;
lio = GET_LIO(netdev);
if (netif_is_multiqueue(netdev)) {
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
lio->linfo.link.s.link_up &&
(!octnet_iq_is_full(oct, iq_num))) {
netif_wake_subqueue(netdev, iq->q_index);
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
tx_restart, 1);
}
} else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up &&
(!octnet_iq_is_full(oct, lio->txq))) {
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
lio->txq, tx_restart, 1);
netif_wake_queue(netdev);
}
}
static static
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
{ {
...@@ -1608,7 +1584,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) ...@@ -1608,7 +1584,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
/* Update iq read-index rather than waiting for next interrupt. /* Update iq read-index rather than waiting for next interrupt.
* Return back if tx_done is false. * Return back if tx_done is false.
*/ */
update_txq_status(oct, iq_no); lio_update_txq_status(oct, iq_no);
} else { } else {
dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
__func__, iq_no); __func__, iq_no);
......
...@@ -474,4 +474,5 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) ...@@ -474,4 +474,5 @@ static inline int wait_for_pending_requests(struct octeon_device *oct)
return 0; return 0;
} }
void lio_update_txq_status(struct octeon_device *oct, int iq_num);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment