Commit 8863002c authored by David S. Miller's avatar David S. Miller

Merge branch 'mellanox-net-fixes'

Or Gerlitz says:

====================
Mellanox NIC driver update, Nov 12, 2015

Few small mlx5 and mlx4 fixes from the team... done over
net commit c5a37883 "Merge branch 'akpm' (patches from Andrew)"

Eran's patch needs to go to 4.2 and 4.3 stable kernels.

Tariq's patch need to go to 4.3 stable too.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 340c78e5 d49c2197
...@@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) ...@@ -892,9 +892,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
dev->caps.port_mask[i] = dev->caps.port_type[i]; dev->caps.port_mask[i] = dev->caps.port_type[i];
dev->caps.phys_port_id[i] = func_cap.phys_port_id; dev->caps.phys_port_id[i] = func_cap.phys_port_id;
if (mlx4_get_slave_pkey_gid_tbl_len(dev, i, err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
&dev->caps.gid_table_len[i], &dev->caps.gid_table_len[i],
&dev->caps.pkey_table_len[i])) &dev->caps.pkey_table_len[i]);
if (err)
goto err_mem; goto err_mem;
} }
...@@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) ...@@ -906,6 +907,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.uar_page_size * dev->caps.num_uars, dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long) (unsigned long long)
pci_resource_len(dev->persist->pdev, 2)); pci_resource_len(dev->persist->pdev, 2));
err = -ENOMEM;
goto err_mem; goto err_mem;
} }
......
...@@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) ...@@ -4952,26 +4952,41 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
struct res_counter *counter; struct res_counter *counter;
struct res_counter *tmp; struct res_counter *tmp;
int err; int err;
int index; int *counters_arr = NULL;
int i, j;
err = move_all_busy(dev, slave, RES_COUNTER); err = move_all_busy(dev, slave, RES_COUNTER);
if (err) if (err)
mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n", mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
slave); slave);
counters_arr = kmalloc_array(dev->caps.max_counters,
sizeof(*counters_arr), GFP_KERNEL);
if (!counters_arr)
return;
do {
i = 0;
j = 0;
spin_lock_irq(mlx4_tlock(dev)); spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(counter, tmp, counter_list, com.list) { list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
if (counter->com.owner == slave) { if (counter->com.owner == slave) {
index = counter->com.res_id; counters_arr[i++] = counter->com.res_id;
rb_erase(&counter->com.node, rb_erase(&counter->com.node,
&tracker->res_tree[RES_COUNTER]); &tracker->res_tree[RES_COUNTER]);
list_del(&counter->com.list); list_del(&counter->com.list);
kfree(counter); kfree(counter);
__mlx4_counter_free(dev, index);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
} }
} }
spin_unlock_irq(mlx4_tlock(dev)); spin_unlock_irq(mlx4_tlock(dev));
while (j < i) {
__mlx4_counter_free(dev, counters_arr[j++]);
mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
}
} while (i);
kfree(counters_arr);
} }
static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
......
...@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb { ...@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) #define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
};
struct mlx5e_sq_dma { struct mlx5e_sq_dma {
dma_addr_t addr; dma_addr_t addr;
u32 size; u32 size;
enum mlx5e_dma_map_type type;
}; };
enum { enum {
......
...@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) ...@@ -1332,6 +1332,42 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
return err; return err;
} }
static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
u32 tirn)
{
void *in;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
kvfree(in);
return err;
}
static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
{
int err;
int i;
for (i = 0; i < MLX5E_NUM_TT; i++) {
err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
priv->tirn[i]);
if (err)
return err;
}
return 0;
}
static int mlx5e_set_dev_port_mtu(struct net_device *netdev) static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1376,6 +1412,13 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag; goto err_clear_state_opened_flag;
} }
err = mlx5e_refresh_tirs_self_loopback_enable(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
__func__, err);
goto err_close_channels;
}
mlx5e_update_carrier(priv); mlx5e_update_carrier(priv);
mlx5e_redirect_rqts(priv); mlx5e_redirect_rqts(priv);
...@@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1383,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev)
return 0; return 0;
err_close_channels:
mlx5e_close_channels(priv);
err_clear_state_opened_flag: err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state); clear_bit(MLX5E_STATE_OPENED, &priv->state);
return err; return err;
...@@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1856,6 +1901,8 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
mlx5_query_port_max_mtu(mdev, &max_mtu, 1); mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
if (new_mtu > max_mtu) { if (new_mtu > max_mtu) {
netdev_err(netdev, netdev_err(netdev,
"%s: Bad MTU (%d) > (%d) Max\n", "%s: Bad MTU (%d) > (%d) Max\n",
...@@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) ...@@ -1909,6 +1956,9 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
"Not creating net device, some required device capabilities are missing\n"); "Not creating net device, some required device capabilities are missing\n");
return -ENOTSUPP; return -ENOTSUPP;
} }
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
return 0; return 0;
} }
......
...@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) ...@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
} }
} }
static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, static inline void mlx5e_tx_dma_unmap(struct device *pdev,
u32 *size) struct mlx5e_sq_dma *dma)
{ {
sq->dma_fifo_pc--; switch (dma->type) {
*addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; case MLX5E_DMA_MAP_SINGLE:
*size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
} break;
case MLX5E_DMA_MAP_PAGE:
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
{ break;
dma_addr_t addr; default:
u32 size; WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
int i;
for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
mlx5e_dma_pop_last_pushed(sq, &addr, &size);
dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
} }
} }
static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
u32 size) dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{ {
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
sq->dma_fifo_pc++; sq->dma_fifo_pc++;
} }
static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
u32 *size)
{ {
*addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; return &sq->dma_fifo[i & sq->dma_fifo_mask];
*size = sq->dma_fifo[i & sq->dma_fifo_mask].size; }
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
{
int i;
for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
struct mlx5e_sq_dma *last_pushed_dma =
mlx5e_dma_get(sq, --sq->dma_fifo_pc);
mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
}
} }
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
...@@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, ...@@ -118,8 +126,15 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
*/ */
#define MLX5E_MIN_INLINE ETH_HLEN #define MLX5E_MIN_INLINE ETH_HLEN
if (bf && (skb_headlen(skb) <= sq->max_inline)) if (bf) {
u16 ihs = skb_headlen(skb);
if (skb_vlan_tag_present(skb))
ihs += VLAN_HLEN;
if (ihs <= sq->max_inline)
return skb_headlen(skb); return skb_headlen(skb);
}
return MLX5E_MIN_INLINE; return MLX5E_MIN_INLINE;
} }
...@@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -218,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(headlen); dseg->byte_count = cpu_to_be32(headlen);
mlx5e_dma_push(sq, dma_addr, headlen); mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
MLX5E_TX_SKB_CB(skb)->num_dma++; MLX5E_TX_SKB_CB(skb)->num_dma++;
dseg++; dseg++;
...@@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -237,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz); dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
MLX5E_TX_SKB_CB(skb)->num_dma++; MLX5E_TX_SKB_CB(skb)->num_dma++;
dseg++; dseg++;
...@@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -353,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
} }
for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
dma_addr_t addr; struct mlx5e_sq_dma *dma =
u32 size; mlx5e_dma_get(sq, dma_fifo_cc++);
mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); mlx5e_tx_dma_unmap(sq->pdev, dma);
dma_fifo_cc++;
dma_unmap_single(sq->pdev, addr, size,
DMA_TO_DEVICE);
} }
npkts++; npkts++;
......
...@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { ...@@ -453,26 +453,28 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 lro_cap[0x1]; u8 lro_cap[0x1];
u8 lro_psh_flag[0x1]; u8 lro_psh_flag[0x1];
u8 lro_time_stamp[0x1]; u8 lro_time_stamp[0x1];
u8 reserved_0[0x6]; u8 reserved_0[0x3];
u8 self_lb_en_modifiable[0x1];
u8 reserved_1[0x2];
u8 max_lso_cap[0x5]; u8 max_lso_cap[0x5];
u8 reserved_1[0x4]; u8 reserved_2[0x4];
u8 rss_ind_tbl_cap[0x4]; u8 rss_ind_tbl_cap[0x4];
u8 reserved_2[0x3]; u8 reserved_3[0x3];
u8 tunnel_lso_const_out_ip_id[0x1]; u8 tunnel_lso_const_out_ip_id[0x1];
u8 reserved_3[0x2]; u8 reserved_4[0x2];
u8 tunnel_statless_gre[0x1]; u8 tunnel_statless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1]; u8 tunnel_stateless_vxlan[0x1];
u8 reserved_4[0x20]; u8 reserved_5[0x20];
u8 reserved_5[0x10]; u8 reserved_6[0x10];
u8 lro_min_mss_size[0x10]; u8 lro_min_mss_size[0x10];
u8 reserved_6[0x120]; u8 reserved_7[0x120];
u8 lro_timer_supported_periods[4][0x20]; u8 lro_timer_supported_periods[4][0x20];
u8 reserved_7[0x600]; u8 reserved_8[0x600];
}; };
struct mlx5_ifc_roce_cap_bits { struct mlx5_ifc_roce_cap_bits {
...@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits { ...@@ -4051,9 +4053,11 @@ struct mlx5_ifc_modify_tis_in_bits {
}; };
struct mlx5_ifc_modify_tir_bitmask_bits { struct mlx5_ifc_modify_tir_bitmask_bits {
u8 reserved[0x20]; u8 reserved_0[0x20];
u8 reserved1[0x1f]; u8 reserved_1[0x1b];
u8 self_lb_en[0x1];
u8 reserved_2[0x3];
u8 lro[0x1]; u8 lro[0x1];
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment