Commit 6714d478 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'r8169-use-new-macros-from-netdev_queues-h'

Heiner Kallweit says:

====================
r8169: use new macros from netdev_queues.h

Add one missing subqueue version of the macros, and use the new macros
in r8169 to simplify the code.
====================

Link: https://lore.kernel.org/r/7147a001-3d9c-a48d-d398-a94c666aa65b@gmail.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 3684a23b 1a31ae00
......@@ -30,6 +30,7 @@
#include <linux/ipv6.h>
#include <asm/unaligned.h>
#include <net/ip6_checksum.h>
#include <net/netdev_queues.h>
#include "r8169.h"
#include "r8169_firmware.h"
......@@ -68,6 +69,8 @@
#define NUM_RX_DESC 256 /* Number of Rx descriptor registers */
#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
#define R8169_TX_STOP_THRS (MAX_SKB_FRAGS + 1)
#define R8169_TX_START_THRS (2 * R8169_TX_STOP_THRS)
#define OCP_STD_PHY_BASE 0xa400
......@@ -4162,13 +4165,9 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return true;
}
static bool rtl_tx_slots_avail(struct rtl8169_private *tp)
static unsigned int rtl_tx_slots_avail(struct rtl8169_private *tp)
{
unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC
- READ_ONCE(tp->cur_tx);
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
return slots_avail > MAX_SKB_FRAGS;
return READ_ONCE(tp->dirty_tx) + NUM_TX_DESC - READ_ONCE(tp->cur_tx);
}
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
......@@ -4245,27 +4244,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
stop_queue = !rtl_tx_slots_avail(tp);
if (unlikely(stop_queue)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue.
*/
smp_wmb();
netif_stop_queue(dev);
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
* May the current thread have a pessimistic view of the ring
* status and forget to wake up queue, a racing rtl_tx thread
* can't.
*/
smp_mb__after_atomic();
if (rtl_tx_slots_avail(tp))
netif_start_queue(dev);
door_bell = true;
}
if (door_bell)
stop_queue = !netif_subqueue_maybe_stop(dev, 0, rtl_tx_slots_avail(tp),
R8169_TX_STOP_THRS,
R8169_TX_START_THRS);
if (door_bell || stop_queue)
rtl8169_doorbell(tp);
return NETDEV_TX_OK;
......@@ -4389,19 +4371,12 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
}
if (tp->dirty_tx != dirty_tx) {
netdev_completed_queue(dev, pkts_compl, bytes_compl);
dev_sw_netstats_tx_add(dev, pkts_compl, bytes_compl);
WRITE_ONCE(tp->dirty_tx, dirty_tx);
/* Sync with rtl8169_start_xmit:
* - publish dirty_tx ring index (write barrier)
* - refresh cur_tx ring index and queue status (read barrier)
* May the current thread miss the stopped queue condition,
* a racing xmit thread can only have a right view of the
* ring status.
*/
smp_store_mb(tp->dirty_tx, dirty_tx);
if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp))
netif_wake_queue(dev);
netif_subqueue_completed_wake(dev, 0, pkts_compl, bytes_compl,
rtl_tx_slots_avail(tp),
R8169_TX_START_THRS);
/*
* 8168 hack: TxPoll requests are lost when the Tx packets are
* too close. Let's kick an extra TxPoll request when a burst
......
......@@ -160,4 +160,14 @@ netdev_txq_completed_mb(struct netdev_queue *dev_queue,
netif_txq_maybe_stop(txq, get_desc, stop_thrs, start_thrs); \
})
#define netif_subqueue_completed_wake(dev, idx, pkts, bytes, \
get_desc, start_thrs) \
({ \
struct netdev_queue *txq; \
\
txq = netdev_get_tx_queue(dev, idx); \
netif_txq_completed_wake(txq, pkts, bytes, \
get_desc, start_thrs); \
})
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment