Commit dd79cf7d authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2016-07-14

This series contains fixes to i40e and ixgbe.

Alex fixes issues found in i40e_rx_checksum() which was broken, where the
checksum was being returned valid when it was not.

Kiran fixes a bug which was found when we abruptly remove a cable which
caused a panic.  Set the VSI broadcast promiscuous mode during VSI add
sequence and prevents adding MAC filter if specified MAC address is
broadcast.

Paolo Abeni fixes a bug by returning the actual work done, capped to
weight - 1, since the core doesn't allow to return the full budget when
the driver modifies the NAPI status.

Guilherme Piccoli fixes an issue where the q_vector initialization
routine sets the affinity _mask of a q_vector based on v_idx value.
This means a loop iterates on v_idx, which is an incremental value, and
the cpumask is created based on this value.  This is a problem in
systems with multiple logical CPUs per core (like in SMT scenarios).
Changed the way q_vector's affinity_mask is created to resolve the issue.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c961e877 7f6c5539
...@@ -1344,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -1344,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!vsi || !macaddr) if (!vsi || !macaddr)
return NULL; return NULL;
/* Do not allow broadcast filter to be added since broadcast filter
* is added as part of add VSI for any newly created VSI except
* FDIR VSI
*/
if (is_broadcast_ether_addr(macaddr))
return NULL;
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev); f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
if (!f) { if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC); f = kzalloc(sizeof(*f), GFP_ATOMIC);
...@@ -2151,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2151,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
aq_ret, pf->hw.aq.asq_last_status); aq_ret, pf->hw.aq.asq_last_status);
} }
} }
aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
vsi->seid,
cur_promisc, NULL);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set brdcast promisc failed, err %s, aq_err %s\n",
i40e_stat_str(&pf->hw, aq_ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
} }
out: out:
/* if something went wrong then set the changed flag so we try again */ /* if something went wrong then set the changed flag so we try again */
...@@ -7726,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf) ...@@ -7726,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct * @v_idx: index of the vector in the vsi struct
* @cpu: cpu to be used on affinity_mask
* *
* We allocate one q_vector. If allocation fails we return -ENOMEM. * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/ **/
static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
{ {
struct i40e_q_vector *q_vector; struct i40e_q_vector *q_vector;
...@@ -7740,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) ...@@ -7740,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
q_vector->vsi = vsi; q_vector->vsi = vsi;
q_vector->v_idx = v_idx; q_vector->v_idx = v_idx;
cpumask_set_cpu(v_idx, &q_vector->affinity_mask); cpumask_set_cpu(cpu, &q_vector->affinity_mask);
if (vsi->netdev) if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi, netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, NAPI_POLL_WEIGHT); i40e_napi_poll, NAPI_POLL_WEIGHT);
...@@ -7764,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx) ...@@ -7764,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
int v_idx, num_q_vectors; int err, v_idx, num_q_vectors, current_cpu;
int err;
/* if not MSIX, give the one vector only to the LAN VSI */ /* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED) if (pf->flags & I40E_FLAG_MSIX_ENABLED)
...@@ -7775,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) ...@@ -7775,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else else
return -EINVAL; return -EINVAL;
current_cpu = cpumask_first(cpu_online_mask);
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
err = i40e_vsi_alloc_q_vector(vsi, v_idx); err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
if (err) if (err)
goto err_out; goto err_out;
current_cpu = cpumask_next(current_cpu, cpu_online_mask);
if (unlikely(current_cpu >= nr_cpu_ids))
current_cpu = cpumask_first(cpu_online_mask);
} }
return 0; return 0;
...@@ -9224,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi) ...@@ -9224,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
static int i40e_add_vsi(struct i40e_vsi *vsi) static int i40e_add_vsi(struct i40e_vsi *vsi)
{ {
int ret = -ENODEV; int ret = -ENODEV;
i40e_status aq_ret = 0;
u8 laa_macaddr[ETH_ALEN]; u8 laa_macaddr[ETH_ALEN];
bool found_laa_mac_filter = false; bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
...@@ -9413,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ...@@ -9413,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->seid = ctxt.seid; vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number; vsi->id = ctxt.vsi_number;
} }
/* Except FDIR VSI, for all othet VSI set the broadcast filter */
if (vsi->type != I40E_VSI_FDIR) {
aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
if (aq_ret) {
ret = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set brdcast promisc failed, err %s, aq_err %s\n",
i40e_stat_str(hw, aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */ /* If macvlan filters already exist, force them to get loaded */
......
...@@ -1280,8 +1280,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1280,8 +1280,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
union i40e_rx_desc *rx_desc) union i40e_rx_desc *rx_desc)
{ {
struct i40e_rx_ptype_decoded decoded; struct i40e_rx_ptype_decoded decoded;
bool ipv4, ipv6, tunnel = false;
u32 rx_error, rx_status; u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype; u8 ptype;
u64 qword; u64 qword;
...@@ -1336,19 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1336,19 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return; return;
/* The hardware supported by this driver does not validate outer /* If there is an outer header present that might contain a checksum
* checksums for tunneled VXLAN or GENEVE frames. I don't agree * we need to bump the checksum level by 1 to reflect the fact that
* with it but the specification states that you "MAY validate", it * we are indicating we validated the inner checksum.
* doesn't make it a hard requirement so if we have validated the
* inner checksum report CHECKSUM_UNNECESSARY.
*/ */
if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
I40E_RX_PTYPE_INNER_PROT_UDP | skb->csum_level = 1;
I40E_RX_PTYPE_INNER_PROT_SCTP))
tunnel = true; /* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case I40E_RX_PTYPE_INNER_PROT_TCP:
case I40E_RX_PTYPE_INNER_PROT_UDP:
case I40E_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = tunnel ? 1 : 0; /* fall though */
default:
break;
}
return; return;
......
...@@ -752,8 +752,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -752,8 +752,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
union i40e_rx_desc *rx_desc) union i40e_rx_desc *rx_desc)
{ {
struct i40e_rx_ptype_decoded decoded; struct i40e_rx_ptype_decoded decoded;
bool ipv4, ipv6, tunnel = false;
u32 rx_error, rx_status; u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype; u8 ptype;
u64 qword; u64 qword;
...@@ -808,19 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -808,19 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return; return;
/* The hardware supported by this driver does not validate outer /* If there is an outer header present that might contain a checksum
* checksums for tunneled VXLAN or GENEVE frames. I don't agree * we need to bump the checksum level by 1 to reflect the fact that
* with it but the specification states that you "MAY validate", it * we are indicating we validated the inner checksum.
* doesn't make it a hard requirement so if we have validated the
* inner checksum report CHECKSUM_UNNECESSARY.
*/ */
if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
I40E_RX_PTYPE_INNER_PROT_UDP | skb->csum_level = 1;
I40E_RX_PTYPE_INNER_PROT_SCTP))
tunnel = true; /* Only report checksum unnecessary for TCP, UDP, or SCTP */
switch (decoded.inner_prot) {
case I40E_RX_PTYPE_INNER_PROT_TCP:
case I40E_RX_PTYPE_INNER_PROT_UDP:
case I40E_RX_PTYPE_INNER_PROT_SCTP:
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = tunnel ? 1 : 0; /* fall though */
default:
break;
}
return; return;
......
...@@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ...@@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
if (!test_bit(__IXGBE_DOWN, &adapter->state)) if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
return 0; return min(work_done, budget - 1);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment