Commit 572a9d7b authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller

net: allow to propagate errors through ->ndo_hard_start_xmit()

Currently the ->ndo_hard_start_xmit() callbacks are only permitted to return
one of the NETDEV_TX codes. This prevents any kind of error propagation for
virtual devices, like queue congestion of the underlying device in case of
layered devices, or unreachability in case of tunnels.

This patches changes the NET_XMIT codes to avoid clashes with the NETDEV_TX
codes and changes the two callers of dev_hard_start_xmit() to expect either
errno codes, NET_XMIT codes or NETDEV_TX codes as return value.

In case of qdisc_restart(), all non NETDEV_TX codes are mapped to NETDEV_TX_OK
since no error propagation is possible when using qdiscs. In case of
dev_queue_xmit(), the error is propagated upwards.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9ea2bdab
...@@ -63,27 +63,48 @@ struct wireless_dev; ...@@ -63,27 +63,48 @@ struct wireless_dev;
#define HAVE_FREE_NETDEV /* free_netdev() */ #define HAVE_FREE_NETDEV /* free_netdev() */
#define HAVE_NETDEV_PRIV /* netdev_priv() */ #define HAVE_NETDEV_PRIV /* netdev_priv() */
#define NET_XMIT_SUCCESS 0 /*
#define NET_XMIT_DROP 1 /* skb dropped */ * Transmit return codes: transmit return codes originate from three different
#define NET_XMIT_CN 2 /* congestion notification */ * namespaces:
#define NET_XMIT_POLICED 3 /* skb is shot by police */ *
#define NET_XMIT_MASK 0xFFFF /* qdisc flags in net/sch_generic.h */ * - qdisc return codes
* - driver transmit return codes
* - errno values
*
* Drivers are allowed to return any one of those in their hard_start_xmit()
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
* higher layers. Virtual network devices transmit synchronously, in this case
* the driver transmit return codes are consumed by dev_queue_xmit(), all
* others are propagated to higher layers.
*/
/* qdisc ->enqueue() return codes. */
#define NET_XMIT_SUCCESS 0x00
#define NET_XMIT_DROP 0x10 /* skb dropped */
#define NET_XMIT_CN 0x20 /* congestion notification */
#define NET_XMIT_POLICED 0x30 /* skb is shot by police */
#define NET_XMIT_MASK 0xf0 /* qdisc flags in net/sch_generic.h */
/* Backlog congestion levels */ /* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */ #define NET_RX_DROP 1 /* packet dropped */
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
* indicates that the device will soon be dropping packets, or already drops * indicates that the device will soon be dropping packets, or already drops
* some packets of the same priority; prompting us to send less aggressively. */ * some packets of the same priority; prompting us to send less aggressively. */
#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e)) #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
/* Driver transmit return codes */ /* Driver transmit return codes */
#define NETDEV_TX_MASK 0xf
enum netdev_tx { enum netdev_tx {
NETDEV_TX_OK = 0, /* driver took care of packet */ __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_BUSY, /* driver tx path was busy*/ NETDEV_TX_OK = 0, /* driver took care of packet */
NETDEV_TX_LOCKED = -1, /* driver tx lock was already taken */ NETDEV_TX_BUSY = 1, /* driver tx path was busy*/
NETDEV_TX_LOCKED = 2, /* driver tx lock was already taken */
}; };
typedef enum netdev_tx netdev_tx_t; typedef enum netdev_tx netdev_tx_t;
......
...@@ -1757,7 +1757,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -1757,7 +1757,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq) struct netdev_queue *txq)
{ {
const struct net_device_ops *ops = dev->netdev_ops; const struct net_device_ops *ops = dev->netdev_ops;
int rc; int rc = NETDEV_TX_OK;
if (likely(!skb->next)) { if (likely(!skb->next)) {
if (!list_empty(&ptype_all)) if (!list_empty(&ptype_all))
...@@ -1805,6 +1805,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -1805,6 +1805,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
nskb->next = NULL; nskb->next = NULL;
rc = ops->ndo_start_xmit(nskb, dev); rc = ops->ndo_start_xmit(nskb, dev);
if (unlikely(rc != NETDEV_TX_OK)) { if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;
nskb->next = skb->next; nskb->next = skb->next;
skb->next = nskb; skb->next = nskb;
return rc; return rc;
...@@ -1814,11 +1816,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -1814,11 +1816,12 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} while (skb->next); } while (skb->next);
skb->destructor = DEV_GSO_CB(skb)->destructor; out_kfree_gso_skb:
if (likely(skb->next == NULL))
skb->destructor = DEV_GSO_CB(skb)->destructor;
out_kfree_skb: out_kfree_skb:
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return rc;
} }
static u32 skb_tx_hashrnd; static u32 skb_tx_hashrnd;
...@@ -1906,6 +1909,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -1906,6 +1909,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc; return rc;
} }
static inline bool dev_xmit_complete(int rc)
{
/* successful transmission */
if (rc == NETDEV_TX_OK)
return true;
/* error while transmitting, driver consumed skb */
if (rc < 0)
return true;
/* error while queueing to a different device, driver consumed skb */
if (rc & NET_XMIT_MASK)
return true;
return false;
}
/** /**
* dev_queue_xmit - transmit a buffer * dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit * @skb: buffer to transmit
...@@ -2003,8 +2023,8 @@ int dev_queue_xmit(struct sk_buff *skb) ...@@ -2003,8 +2023,8 @@ int dev_queue_xmit(struct sk_buff *skb)
HARD_TX_LOCK(dev, txq, cpu); HARD_TX_LOCK(dev, txq, cpu);
if (!netif_tx_queue_stopped(txq)) { if (!netif_tx_queue_stopped(txq)) {
rc = NET_XMIT_SUCCESS; rc = dev_hard_start_xmit(skb, dev, txq);
if (!dev_hard_start_xmit(skb, dev, txq)) { if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq); HARD_TX_UNLOCK(dev, txq);
goto out; goto out;
} }
......
...@@ -120,8 +120,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, ...@@ -120,8 +120,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_LOCK(dev, txq, smp_processor_id()); HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_tx_queue_stopped(txq) && if (!netif_tx_queue_stopped(txq) &&
!netif_tx_queue_frozen(txq)) !netif_tx_queue_frozen(txq)) {
ret = dev_hard_start_xmit(skb, dev, txq); ret = dev_hard_start_xmit(skb, dev, txq);
/* an error implies that the skb was consumed */
if (ret < 0)
ret = NETDEV_TX_OK;
/* all NET_XMIT codes map to NETDEV_TX_OK */
ret &= ~NET_XMIT_MASK;
}
HARD_TX_UNLOCK(dev, txq); HARD_TX_UNLOCK(dev, txq);
spin_lock(root_lock); spin_lock(root_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment