Commit efef7939 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4-next'

Or Gerlitz says:

====================
mlx4 driver update

This series from Matan, Jenny, Dotan and myself is mostly about adding
support to a new performance optimized flow steering mode (patches 4-10).

The 1st two patches are small fixes (one for VXLAN and one for SRIOV),
and the third patch is a fix to avoid hard-lockup situation when many
(hunderds) processes holding user-space QPs/CQs get events.

Matan and Or.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 630f4b70 7d077cd3
...@@ -233,7 +233,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector ...@@ -233,7 +233,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
if (err) if (err)
goto err_dbmap; goto err_dbmap;
cq->mcq.comp = mlx4_ib_cq_comp; if (context)
cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
else
cq->mcq.comp = mlx4_ib_cq_comp;
cq->mcq.event = mlx4_ib_cq_event; cq->mcq.event = mlx4_ib_cq_event;
if (context) if (context)
......
...@@ -2227,7 +2227,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -2227,7 +2227,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN, MLX4_IB_UC_STEER_QPN_ALIGN,
&ibdev->steer_qpn_base); &ibdev->steer_qpn_base, 0);
if (err) if (err)
goto err_counter; goto err_counter;
......
...@@ -802,16 +802,21 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -802,16 +802,21 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
} }
} }
} else { } else {
/* Raw packet QPNs must be aligned to 8 bits. If not, the WQE /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
* BlueFlame setup flow wrongly causes VLAN insertion. */ * otherwise, the WQE BlueFlame setup flow wrongly causes
* VLAN insertion. */
if (init_attr->qp_type == IB_QPT_RAW_PACKET) if (init_attr->qp_type == IB_QPT_RAW_PACKET)
err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
(init_attr->cap.max_send_wr ?
MLX4_RESERVE_ETH_BF_QP : 0) |
(init_attr->cap.max_recv_wr ?
MLX4_RESERVE_A0_QP : 0));
else else
if (qp->flags & MLX4_IB_QP_NETIF) if (qp->flags & MLX4_IB_QP_NETIF)
err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
else else
err = mlx4_qp_reserve_range(dev->dev, 1, 1, err = mlx4_qp_reserve_range(dev->dev, 1, 1,
&qpn); &qpn, 0);
if (err) if (err)
goto err_proxy; goto err_proxy;
} }
......
This diff is collapsed.
...@@ -52,6 +52,51 @@ ...@@ -52,6 +52,51 @@
#define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8) #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
#define MLX4_EQ_STATE_FIRED (10 << 8) #define MLX4_EQ_STATE_FIRED (10 << 8)
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
void mlx4_cq_tasklet_cb(unsigned long data)
{
unsigned long flags;
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
struct mlx4_cq *mcq, *temp;
spin_lock_irqsave(&ctx->lock, flags);
list_splice_tail_init(&ctx->list, &ctx->process_list);
spin_unlock_irqrestore(&ctx->lock, flags);
list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
if (atomic_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
}
if (!list_empty(&ctx->process_list))
tasklet_schedule(&ctx->task);
}
static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
{
unsigned long flags;
struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
spin_lock_irqsave(&tasklet_ctx->lock, flags);
/* When migrating CQs between EQs will be implemented, please note
* that you need to sync this point. It is possible that
* while migrating a CQ, completions on the old EQs could
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
atomic_inc(&cq->refcount);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{ {
struct mlx4_cq *cq; struct mlx4_cq *cq;
...@@ -292,6 +337,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, ...@@ -292,6 +337,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq->uar = uar; cq->uar = uar;
atomic_set(&cq->refcount, 1); atomic_set(&cq->refcount, 1);
init_completion(&cq->free); init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
&priv->eq_table.eq[cq->vector].tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
cq->irq = priv->eq_table.eq[cq->vector].irq; cq->irq = priv->eq_table.eq[cq->vector].irq;
return 0; return 0;
......
...@@ -595,7 +595,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv) ...@@ -595,7 +595,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
return 0; return 0;
} }
err = mlx4_qp_reserve_range(dev, 1, 1, qpn); err = mlx4_qp_reserve_range(dev, 1, 1, qpn, MLX4_RESERVE_A0_QP);
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) { if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n"); en_err(priv, "Failed to reserve qp for mac registration\n");
...@@ -1974,15 +1974,8 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) ...@@ -1974,15 +1974,8 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{ {
struct mlx4_en_port_profile *prof = priv->prof; struct mlx4_en_port_profile *prof = priv->prof;
int i; int i;
int err;
int node; int node;
err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) {
en_err(priv, "failed reserving range for TX rings\n");
return err;
}
/* Create tx Rings */ /* Create tx Rings */
for (i = 0; i < priv->tx_ring_num; i++) { for (i = 0; i < priv->tx_ring_num; i++) {
node = cpu_to_node(i % num_online_cpus()); node = cpu_to_node(i % num_online_cpus());
...@@ -1991,7 +1984,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) ...@@ -1991,7 +1984,6 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
goto err; goto err;
if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
priv->base_tx_qpn + i,
prof->tx_ring_size, TXBB_SIZE, prof->tx_ring_size, TXBB_SIZE,
node, i)) node, i))
goto err; goto err;
...@@ -2602,7 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2602,7 +2594,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (mdev->dev->caps.steering_mode == if (mdev->dev->caps.steering_mode ==
MLX4_STEERING_MODE_DEVICE_MANAGED) MLX4_STEERING_MODE_DEVICE_MANAGED &&
mdev->dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
dev->hw_features |= NETIF_F_NTUPLE; dev->hw_features |= NETIF_F_NTUPLE;
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
......
...@@ -888,7 +888,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -888,7 +888,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
gro_skb->ip_summed = ip_summed; gro_skb->ip_summed = ip_summed;
if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY) if (l2_tunnel && ip_summed == CHECKSUM_UNNECESSARY)
gro_skb->encapsulation = 1; gro_skb->csum_level = 1;
if ((cqe->vlan_my_qpn & if ((cqe->vlan_my_qpn &
cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) && cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
(dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
...@@ -1130,7 +1131,8 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) ...@@ -1130,7 +1131,8 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
int err; int err;
u32 qpn; u32 qpn;
err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn); err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
MLX4_RESERVE_A0_QP);
if (err) { if (err) {
en_err(priv, "Failed reserving drop qpn\n"); en_err(priv, "Failed reserving drop qpn\n");
return err; return err;
...@@ -1173,7 +1175,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) ...@@ -1173,7 +1175,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
en_dbg(DRV, priv, "Configuring rss steering\n"); en_dbg(DRV, priv, "Configuring rss steering\n");
err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num, err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
priv->rx_ring_num, priv->rx_ring_num,
&rss_map->base_qpn); &rss_map->base_qpn, 0);
if (err) { if (err) {
en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num); en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
return err; return err;
......
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#include "mlx4_en.h" #include "mlx4_en.h"
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size, struct mlx4_en_tx_ring **pring, u32 size,
u16 stride, int node, int queue_index) u16 stride, int node, int queue_index)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
...@@ -112,11 +112,17 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -112,11 +112,17 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring, ring->buf, ring->size, ring->buf_size, ring, ring->buf, ring->size, ring->buf_size,
(unsigned long long) ring->wqres.buf.direct.map); (unsigned long long) ring->wqres.buf.direct.map);
ring->qpn = qpn; err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn,
MLX4_RESERVE_ETH_BF_QP);
if (err) {
en_err(priv, "failed reserving qp for TX ring\n");
goto err_map;
}
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
if (err) { if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn); en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_map; goto err_reserve;
} }
ring->qp.event = mlx4_en_sqp_event; ring->qp.event = mlx4_en_sqp_event;
...@@ -143,6 +149,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -143,6 +149,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
*pring = ring; *pring = ring;
return 0; return 0;
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
err_map: err_map:
mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res: err_hwq_res:
......
...@@ -450,7 +450,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -450,7 +450,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe; struct mlx4_eqe *eqe;
int cqn; int cqn = -1;
int eqes_found = 0; int eqes_found = 0;
int set_ci = 0; int set_ci = 0;
int port; int port;
...@@ -758,6 +758,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -758,6 +758,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq_set_ci(eq, 1); eq_set_ci(eq, 1);
/* cqn is 24bit wide but is initialized such that its higher bits
* are ones too. Thus, if we got any event, cqn's high bits should be off
* and we need to schedule the tasklet.
*/
if (!(cqn & ~0xffffff))
tasklet_schedule(&eq->tasklet_ctx.task);
return eqes_found; return eqes_found;
} }
...@@ -971,6 +978,12 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, ...@@ -971,6 +978,12 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
eq->cons_index = 0; eq->cons_index = 0;
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb,
(unsigned long)&eq->tasklet_ctx);
return err; return err;
err_out_free_mtt: err_out_free_mtt:
...@@ -1027,6 +1040,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, ...@@ -1027,6 +1040,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
} }
} }
synchronize_irq(eq->irq); synchronize_irq(eq->irq);
tasklet_disable(&eq->tasklet_ctx.task);
mlx4_mtt_cleanup(dev, &eq->mtt); mlx4_mtt_cleanup(dev, &eq->mtt);
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
......
This diff is collapsed.
...@@ -43,6 +43,26 @@ struct mlx4_mod_stat_cfg { ...@@ -43,6 +43,26 @@ struct mlx4_mod_stat_cfg {
u8 log_pg_sz_m; u8 log_pg_sz_m;
}; };
struct mlx4_port_cap {
u8 supported_port_types;
u8 suggested_type;
u8 default_sense;
u8 log_max_macs;
u8 log_max_vlans;
int ib_mtu;
int max_port_width;
int max_vl;
int max_gids;
int max_pkeys;
u64 def_mac;
u16 eth_mtu;
int trans_type;
int vendor_oui;
u16 wavelength;
u64 trans_code;
u8 dmfs_optimized_state;
};
struct mlx4_dev_cap { struct mlx4_dev_cap {
int max_srq_sz; int max_srq_sz;
int max_qp_sz; int max_qp_sz;
...@@ -67,17 +87,6 @@ struct mlx4_dev_cap { ...@@ -67,17 +87,6 @@ struct mlx4_dev_cap {
int local_ca_ack_delay; int local_ca_ack_delay;
int num_ports; int num_ports;
u32 max_msg_sz; u32 max_msg_sz;
int ib_mtu[MLX4_MAX_PORTS + 1];
int max_port_width[MLX4_MAX_PORTS + 1];
int max_vl[MLX4_MAX_PORTS + 1];
int max_gids[MLX4_MAX_PORTS + 1];
int max_pkeys[MLX4_MAX_PORTS + 1];
u64 def_mac[MLX4_MAX_PORTS + 1];
u16 eth_mtu[MLX4_MAX_PORTS + 1];
int trans_type[MLX4_MAX_PORTS + 1];
int vendor_oui[MLX4_MAX_PORTS + 1];
u16 wavelength[MLX4_MAX_PORTS + 1];
u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support; u16 stat_rate_support;
int fs_log_max_ucast_qp_range_size; int fs_log_max_ucast_qp_range_size;
int fs_max_num_qp_per_entry; int fs_max_num_qp_per_entry;
...@@ -115,12 +124,10 @@ struct mlx4_dev_cap { ...@@ -115,12 +124,10 @@ struct mlx4_dev_cap {
u64 max_icm_sz; u64 max_icm_sz;
int max_gso_sz; int max_gso_sz;
int max_rss_tbl_sz; int max_rss_tbl_sz;
u8 supported_port_types[MLX4_MAX_PORTS + 1];
u8 suggested_type[MLX4_MAX_PORTS + 1];
u8 default_sense[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
u32 max_counters; u32 max_counters;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
struct mlx4_port_cap port_cap[MLX4_MAX_PORTS + 1];
}; };
struct mlx4_func_cap { struct mlx4_func_cap {
...@@ -144,6 +151,7 @@ struct mlx4_func_cap { ...@@ -144,6 +151,7 @@ struct mlx4_func_cap {
u8 port_flags; u8 port_flags;
u8 flags1; u8 flags1;
u64 phys_port_id; u64 phys_port_id;
u32 extra_flags;
}; };
struct mlx4_func { struct mlx4_func {
...@@ -189,6 +197,7 @@ struct mlx4_init_hca_param { ...@@ -189,6 +197,7 @@ struct mlx4_init_hca_param {
u8 mw_enabled; /* Enable memory windows */ u8 mw_enabled; /* Enable memory windows */
u8 uar_page_sz; /* log pg sz in 4k chunks */ u8 uar_page_sz; /* log pg sz in 4k chunks */
u8 steering_mode; /* for QUERY_HCA */ u8 steering_mode; /* for QUERY_HCA */
u8 dmfs_high_steer_mode; /* for QUERY_HCA */
u64 dev_cap_enabled; u64 dev_cap_enabled;
u16 cqe_size; /* For use only when CQE stride feature enabled */ u16 cqe_size; /* For use only when CQE stride feature enabled */
u16 eqe_size; /* For use only when EQE stride feature enabled */ u16 eqe_size; /* For use only when EQE stride feature enabled */
...@@ -216,6 +225,7 @@ struct mlx4_set_ib_param { ...@@ -216,6 +225,7 @@ struct mlx4_set_ib_param {
}; };
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_cap);
int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
struct mlx4_func_cap *func_cap); struct mlx4_func_cap *func_cap);
int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
......
This diff is collapsed.
...@@ -999,12 +999,27 @@ int mlx4_flow_attach(struct mlx4_dev *dev, ...@@ -999,12 +999,27 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
} }
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
if (ret == -ENOMEM) if (ret == -ENOMEM) {
mlx4_err_rule(dev, mlx4_err_rule(dev,
"mcg table is full. Fail to register network rule\n", "mcg table is full. Fail to register network rule\n",
rule); rule);
else if (ret) } else if (ret) {
mlx4_err_rule(dev, "Fail to register network rule\n", rule); if (ret == -ENXIO) {
if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
mlx4_err_rule(dev,
"DMFS is not enabled, "
"failed to register network rule.\n",
rule);
else
mlx4_err_rule(dev,
"Rule exceeds the dmfs_high_rate_mode limitations, "
"failed to register network rule.\n",
rule);
} else {
mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
}
}
mlx4_free_cmd_mailbox(dev, mailbox); mlx4_free_cmd_mailbox(dev, mailbox);
......
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/mlx4/device.h> #include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h> #include <linux/mlx4/driver.h>
...@@ -243,6 +245,7 @@ struct mlx4_bitmap { ...@@ -243,6 +245,7 @@ struct mlx4_bitmap {
u32 reserved_top; u32 reserved_top;
u32 mask; u32 mask;
u32 avail; u32 avail;
u32 effective_len;
spinlock_t lock; spinlock_t lock;
unsigned long *table; unsigned long *table;
}; };
...@@ -373,6 +376,14 @@ struct mlx4_srq_context { ...@@ -373,6 +376,14 @@ struct mlx4_srq_context {
__be64 db_rec_addr; __be64 db_rec_addr;
}; };
struct mlx4_eq_tasklet {
struct list_head list;
struct list_head process_list;
struct tasklet_struct task;
/* lock on completion tasklet list */
spinlock_t lock;
};
struct mlx4_eq { struct mlx4_eq {
struct mlx4_dev *dev; struct mlx4_dev *dev;
void __iomem *doorbell; void __iomem *doorbell;
...@@ -383,6 +394,7 @@ struct mlx4_eq { ...@@ -383,6 +394,7 @@ struct mlx4_eq {
int nent; int nent;
struct mlx4_buf_list *page_list; struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt; struct mlx4_mtt mtt;
struct mlx4_eq_tasklet tasklet_ctx;
}; };
struct mlx4_slave_eqe { struct mlx4_slave_eqe {
...@@ -670,8 +682,17 @@ struct mlx4_srq_table { ...@@ -670,8 +682,17 @@ struct mlx4_srq_table {
struct mlx4_icm_table cmpt_table; struct mlx4_icm_table cmpt_table;
}; };
enum mlx4_qp_table_zones {
MLX4_QP_TABLE_ZONE_GENERAL,
MLX4_QP_TABLE_ZONE_RSS,
MLX4_QP_TABLE_ZONE_RAW_ETH,
MLX4_QP_TABLE_ZONE_NUM
};
struct mlx4_qp_table { struct mlx4_qp_table {
struct mlx4_bitmap bitmap; struct mlx4_bitmap *bitmap_gen;
struct mlx4_zone_allocator *zones;
u32 zones_uids[MLX4_QP_TABLE_ZONE_NUM];
u32 rdmarc_base; u32 rdmarc_base;
int rdmarc_shift; int rdmarc_shift;
spinlock_t lock; spinlock_t lock;
...@@ -873,7 +894,8 @@ extern struct workqueue_struct *mlx4_wq; ...@@ -873,7 +894,8 @@ extern struct workqueue_struct *mlx4_wq;
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap); u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr); void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj, int use_rr);
u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align); u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt,
int align, u32 skip_mask);
void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt, void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt,
int use_rr); int use_rr);
u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap); u32 mlx4_bitmap_avail(struct mlx4_bitmap *bitmap);
...@@ -959,7 +981,7 @@ int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave, ...@@ -959,7 +981,7 @@ int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd); struct mlx4_cmd_info *cmd);
int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base); int *base, u8 flags);
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
...@@ -1146,6 +1168,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev); ...@@ -1146,6 +1168,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev);
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param, int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
unsigned long timeout); unsigned long timeout);
void mlx4_cq_tasklet_cb(unsigned long data);
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn); void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type); void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
...@@ -1332,4 +1355,72 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); ...@@ -1332,4 +1355,72 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
int mlx4_config_mad_demux(struct mlx4_dev *dev); int mlx4_config_mad_demux(struct mlx4_dev *dev);
enum mlx4_zone_flags {
MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0,
MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO = 1UL << 1,
MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO = 1UL << 2,
MLX4_ZONE_USE_RR = 1UL << 3,
};
enum mlx4_zone_alloc_flags {
/* No two objects could overlap between zones. UID
* could be left unused. If this flag is given and
* two overlapped zones are used, an object will be free'd
* from the smallest possible matching zone.
*/
MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP = 1UL << 0,
};
struct mlx4_zone_allocator;
/* Create a new zone allocator */
struct mlx4_zone_allocator *mlx4_zone_allocator_create(enum mlx4_zone_alloc_flags flags);
/* Attach a mlx4_bitmap <bitmap> of priority <priority> to the zone allocator
* <zone_alloc>. Allocating an object from this zone adds an offset <offset>.
* Similarly, when searching for an object to free, this offset it taken into
* account. The use_rr mlx4_ib parameter for allocating objects from this <bitmap>
* is given through the MLX4_ZONE_USE_RR flag in <flags>.
* When an allocation fails, <zone_alloc> tries to allocate from other zones
* according to the policy set by <flags>. <puid> is the unique identifier
* received to this zone.
*/
int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc,
struct mlx4_bitmap *bitmap,
u32 flags,
int priority,
int offset,
u32 *puid);
/* Remove bitmap indicated by <uid> from <zone_alloc> */
int mlx4_zone_remove_one(struct mlx4_zone_allocator *zone_alloc, u32 uid);
/* Delete the zone allocator <zone_alloc. This function doesn't destroy
* the attached bitmaps.
*/
void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc);
/* Allocate <count> objects with align <align> and skip_mask <skip_mask>
* from the mlx4_bitmap whose uid is <uid>. The bitmap which we actually
* allocated from is returned in <puid>. If the allocation fails, a negative
* number is returned. Otherwise, the offset of the first object is returned.
*/
u32 mlx4_zone_alloc_entries(struct mlx4_zone_allocator *zones, u32 uid, int count,
int align, u32 skip_mask, u32 *puid);
/* Free <count> objects, start from <obj> of the uid <uid> from zone_allocator
* <zones>.
*/
u32 mlx4_zone_free_entries(struct mlx4_zone_allocator *zones,
u32 uid, u32 obj, u32 count);
/* If <zones> was allocated with MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP, instead of
* specifying the uid when freeing an object, zone allocator could figure it by
* itself. Other parameters are similar to mlx4_zone_free.
*/
u32 mlx4_zone_free_entries_unique(struct mlx4_zone_allocator *zones, u32 obj, u32 count);
/* Returns a pointer to mlx4_bitmap that was attached to <zones> with <uid> */
struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 uid);
#endif /* MLX4_H */ #endif /* MLX4_H */
...@@ -778,7 +778,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); ...@@ -778,7 +778,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, struct mlx4_en_tx_ring **pring,
int qpn, u32 size, u16 stride, u32 size, u16 stride,
int node, int queue_index); int node, int queue_index);
void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring); struct mlx4_en_tx_ring **pring);
......
This diff is collapsed.
...@@ -1543,16 +1543,21 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ...@@ -1543,16 +1543,21 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
int align; int align;
int base; int base;
int qpn; int qpn;
u8 flags;
switch (op) { switch (op) {
case RES_OP_RESERVE: case RES_OP_RESERVE:
count = get_param_l(&in_param) & 0xffffff; count = get_param_l(&in_param) & 0xffffff;
/* Turn off all unsupported QP allocation flags that the
* slave tries to set.
*/
flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
align = get_param_h(&in_param); align = get_param_h(&in_param);
err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
if (err) if (err)
return err; return err;
err = __mlx4_qp_reserve_range(dev, count, align, &base); err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
if (err) { if (err) {
mlx4_release_resource(dev, slave, RES_QP, count, 0); mlx4_release_resource(dev, slave, RES_QP, count, 0);
return err; return err;
......
...@@ -117,6 +117,14 @@ enum { ...@@ -117,6 +117,14 @@ enum {
MLX4_STEERING_MODE_DEVICE_MANAGED MLX4_STEERING_MODE_DEVICE_MANAGED
}; };
enum {
MLX4_STEERING_DMFS_A0_DEFAULT,
MLX4_STEERING_DMFS_A0_DYNAMIC,
MLX4_STEERING_DMFS_A0_STATIC,
MLX4_STEERING_DMFS_A0_DISABLE,
MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
};
static inline const char *mlx4_steering_mode_str(int steering_mode) static inline const char *mlx4_steering_mode_str(int steering_mode)
{ {
switch (steering_mode) { switch (steering_mode) {
...@@ -191,7 +199,26 @@ enum { ...@@ -191,7 +199,26 @@ enum {
MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
};
enum {
MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0,
MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
};
/* bit enums for an 8-bit flags field indicating special use
* QPs which require special handling in qp_reserve_range.
* Currently, this only includes QPs used by the ETH interface,
* where we expect to use blueflame. These QPs must not have
* bits 6 and 7 set in their qp number.
*
* This enum may use only bits 0..7.
*/
enum {
MLX4_RESERVE_A0_QP = 1 << 6,
MLX4_RESERVE_ETH_BF_QP = 1 << 7,
}; };
enum { enum {
...@@ -207,7 +234,8 @@ enum { ...@@ -207,7 +234,8 @@ enum {
enum { enum {
MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0, MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1 MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
}; };
...@@ -333,6 +361,8 @@ enum { ...@@ -333,6 +361,8 @@ enum {
enum mlx4_qp_region { enum mlx4_qp_region {
MLX4_QP_REGION_FW = 0, MLX4_QP_REGION_FW = 0,
MLX4_QP_REGION_RSS_RAW_ETH,
MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH,
MLX4_QP_REGION_ETH_ADDR, MLX4_QP_REGION_ETH_ADDR,
MLX4_QP_REGION_FC_ADDR, MLX4_QP_REGION_FC_ADDR,
MLX4_QP_REGION_FC_EXCH, MLX4_QP_REGION_FC_EXCH,
...@@ -462,6 +492,7 @@ struct mlx4_caps { ...@@ -462,6 +492,7 @@ struct mlx4_caps {
int reserved_mcgs; int reserved_mcgs;
int num_qp_per_mgm; int num_qp_per_mgm;
int steering_mode; int steering_mode;
int dmfs_high_steer_mode;
int fs_log_max_ucast_qp_range_size; int fs_log_max_ucast_qp_range_size;
int num_pds; int num_pds;
int reserved_pds; int reserved_pds;
...@@ -501,6 +532,9 @@ struct mlx4_caps { ...@@ -501,6 +532,9 @@ struct mlx4_caps {
u64 phys_port_id[MLX4_MAX_PORTS + 1]; u64 phys_port_id[MLX4_MAX_PORTS + 1];
int tunnel_offload_mode; int tunnel_offload_mode;
u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1]; u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
u8 alloc_res_qp_mask;
u32 dmfs_high_rate_qpn_base;
u32 dmfs_high_rate_qpn_range;
}; };
struct mlx4_buf_list { struct mlx4_buf_list {
...@@ -621,6 +655,11 @@ struct mlx4_cq { ...@@ -621,6 +655,11 @@ struct mlx4_cq {
atomic_t refcount; atomic_t refcount;
struct completion free; struct completion free;
struct {
struct list_head list;
void (*comp)(struct mlx4_cq *);
void *priv;
} tasklet_ctx;
}; };
struct mlx4_qp { struct mlx4_qp {
...@@ -869,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) ...@@ -869,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
{ {
return (qpn < dev->phys_caps.base_sqpn + 8 + return (qpn < dev->phys_caps.base_sqpn + 8 +
16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) &&
qpn >= dev->phys_caps.base_sqpn) ||
(qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]);
} }
static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
...@@ -945,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, ...@@ -945,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed, int timestamp_en); unsigned vector, int collapsed, int timestamp_en);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); int *base, u8 flags);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment