Commit c1b28847 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-cleanup-skb_tx_hash'

Alexander Duyck says:

====================
Clean up users of skb_tx_hash and __skb_tx_hash

I am in the process of doing some work to try and enable macvlan Tx queue
selection without using ndo_select_queue. As a part of that I will likely
need to make changes to skb_tx_hash. As such this is a clean up or refactor
of the two spots where he function has been used. In both cases it didn't
really seem like the function was being used correctly so I have updated
both code paths to not make use of the function.

My current development environment doesn't have an mlx4 or OPA vnic
available so the changes to those have been build tested only.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f9065284 1b837d48
...@@ -443,17 +443,16 @@ static u8 opa_vnic_get_rc(struct __opa_veswport_info *info, ...@@ -443,17 +443,16 @@ static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
} }
/* opa_vnic_calc_entropy - calculate the packet entropy */ /* opa_vnic_calc_entropy - calculate the packet entropy */
u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb) u8 opa_vnic_calc_entropy(struct sk_buff *skb)
{ {
u16 hash16; u32 hash = skb_get_hash(skb);
/* /* store XOR of all bytes in lower 8 bits */
* Get flow based 16-bit hash and then XOR the upper and lower bytes hash ^= hash >> 8;
* to get the entropy. hash ^= hash >> 16;
* __skb_tx_hash limits qcount to 16 bits. Hence, get 15-bit hash.
*/ /* return lower 8 bits as entropy */
hash16 = __skb_tx_hash(adapter->netdev, skb, BIT(15)); return (u8)(hash & 0xFF);
return (u8)((hash16 >> 8) ^ (hash16 & 0xff));
} }
/* opa_vnic_get_def_port - get default port based on entropy */ /* opa_vnic_get_def_port - get default port based on entropy */
...@@ -490,7 +489,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb) ...@@ -490,7 +489,7 @@ void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
hdr = skb_push(skb, OPA_VNIC_HDR_LEN); hdr = skb_push(skb, OPA_VNIC_HDR_LEN);
entropy = opa_vnic_calc_entropy(adapter, skb); entropy = opa_vnic_calc_entropy(skb);
def_port = opa_vnic_get_def_port(adapter, entropy); def_port = opa_vnic_get_def_port(adapter, entropy);
len = opa_vnic_wire_length(skb); len = opa_vnic_wire_length(skb);
dlid = opa_vnic_get_dlid(adapter, skb, def_port); dlid = opa_vnic_get_dlid(adapter, skb, def_port);
......
...@@ -299,7 +299,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev, ...@@ -299,7 +299,7 @@ struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter); void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter);
void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb); void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb); u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb);
u8 opa_vnic_calc_entropy(struct opa_vnic_adapter *adapter, struct sk_buff *skb); u8 opa_vnic_calc_entropy(struct sk_buff *skb);
void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter); void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter);
void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter); void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter);
void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter, void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
......
...@@ -104,7 +104,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb, ...@@ -104,7 +104,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
/* pass entropy and vl as metadata in skb */ /* pass entropy and vl as metadata in skb */
mdata = skb_push(skb, sizeof(*mdata)); mdata = skb_push(skb, sizeof(*mdata));
mdata->entropy = opa_vnic_calc_entropy(adapter, skb); mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb); mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb, rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
accel_priv, fallback); accel_priv, fallback);
......
...@@ -694,7 +694,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -694,7 +694,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 rings_p_up = priv->num_tx_rings_p_up; u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev)) if (netdev_get_num_tc(dev))
return skb_tx_hash(dev, skb); return fallback(dev, skb);
return fallback(dev, skb) % rings_p_up; return fallback(dev, skb) % rings_p_up;
} }
......
...@@ -3213,19 +3213,6 @@ static inline int netif_set_xps_queue(struct net_device *dev, ...@@ -3213,19 +3213,6 @@ static inline int netif_set_xps_queue(struct net_device *dev,
} }
#endif #endif
u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
unsigned int num_tx_queues);
/*
* Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
* as a distribution range limit for the returned value.
*/
static inline u16 skb_tx_hash(const struct net_device *dev,
struct sk_buff *skb)
{
return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
}
/** /**
* netif_is_multiqueue - test if device has multiple transmit queues * netif_is_multiqueue - test if device has multiple transmit queues
* @dev: network device * @dev: network device
......
...@@ -2615,17 +2615,16 @@ EXPORT_SYMBOL(netif_device_attach); ...@@ -2615,17 +2615,16 @@ EXPORT_SYMBOL(netif_device_attach);
* Returns a Tx hash based on the given packet descriptor a Tx queues' number * Returns a Tx hash based on the given packet descriptor a Tx queues' number
* to be used as a distribution range. * to be used as a distribution range.
*/ */
u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, static u16 skb_tx_hash(const struct net_device *dev, struct sk_buff *skb)
unsigned int num_tx_queues)
{ {
u32 hash; u32 hash;
u16 qoffset = 0; u16 qoffset = 0;
u16 qcount = num_tx_queues; u16 qcount = dev->real_num_tx_queues;
if (skb_rx_queue_recorded(skb)) { if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb); hash = skb_get_rx_queue(skb);
while (unlikely(hash >= num_tx_queues)) while (unlikely(hash >= qcount))
hash -= num_tx_queues; hash -= qcount;
return hash; return hash;
} }
...@@ -2638,7 +2637,6 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, ...@@ -2638,7 +2637,6 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
} }
EXPORT_SYMBOL(__skb_tx_hash);
static void skb_warn_bad_offload(const struct sk_buff *skb) static void skb_warn_bad_offload(const struct sk_buff *skb)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment