Commit a6fc8594 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2018-07-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2018-07-18

The following series provides fixes to mlx5 core and net device driver.

Please pull and let me know if there's any problem.

For -stable v4.7
    net/mlx5e: Don't allow aRFS for encapsulated packets
    net/mlx5e: Fix quota counting in aRFS expire flow

For -stable v4.15
    net/mlx5e: Only allow offloading decap egress (egdev) flows
    net/mlx5e: Refine ets validation function
    net/mlx5: Adjust clock overflow work period

For -stable v4.17
    net/mlx5: E-Switch, UBSAN fix undefined behavior in mlx5_eswitch_mode
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f1d66bf9 7e29392e
...@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, ...@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
int i; int i;
buf->size = size; buf->size = size;
buf->npages = 1 << get_order(size); buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
buf->page_shift = PAGE_SHIFT; buf->page_shift = PAGE_SHIFT;
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
GFP_KERNEL); GFP_KERNEL);
......
...@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) ...@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
HLIST_HEAD(del_list); HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock); spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
break;
if (!work_pending(&arfs_rule->arfs_work) && if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev, rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id, arfs_rule->rxq, arfs_rule->flow_id,
arfs_rule->filter_id)) { arfs_rule->filter_id)) {
hlist_del_init(&arfs_rule->hlist); hlist_del_init(&arfs_rule->hlist);
hlist_add_head(&arfs_rule->hlist, &del_list); hlist_add_head(&arfs_rule->hlist, &del_list);
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
break;
} }
} }
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs.arfs_lock);
...@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb->protocol != htons(ETH_P_IPV6)) skb->protocol != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
if (!arfs_t) if (!arfs_t)
return -EPROTONOSUPPORT; return -EPROTONOSUPPORT;
......
...@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) ...@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
} }
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
struct ieee_ets *ets) struct ieee_ets *ets,
bool zero_sum_allowed)
{ {
bool have_ets_tc = false; bool have_ets_tc = false;
int bw_sum = 0; int bw_sum = 0;
...@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, ...@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
} }
if (have_ets_tc && bw_sum != 100) { if (have_ets_tc && bw_sum != 100) {
netdev_err(netdev, if (bw_sum || (!bw_sum && !zero_sum_allowed))
"Failed to validate ETS: BW sum is illegal\n"); netdev_err(netdev,
"Failed to validate ETS: BW sum is illegal\n");
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
...@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, ...@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets)) if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP; return -EOPNOTSUPP;
err = mlx5e_dbcnl_validate_ets(netdev, ets); err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
if (err) if (err)
return err; return err;
...@@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev) ...@@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.prio_tc[i]); ets.prio_tc[i]);
} }
err = mlx5e_dbcnl_validate_ets(netdev, &ets); err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
if (err) { if (err)
netdev_err(netdev,
"%s, Failed to validate ETS: %d\n", __func__, err);
goto out; goto out;
}
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
if (err) { if (err) {
......
...@@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv, ...@@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
else else
actions = flow->nic_attr->action; actions = flow->nic_attr->action;
if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
!(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec, exts); return modify_header_match_supported(&parse_attr->spec, exts);
......
...@@ -2216,6 +2216,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, ...@@ -2216,6 +2216,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
{ {
return esw->mode; return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
} }
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
...@@ -1887,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1887,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft)) if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (dest) if (dest_num)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
mutex_lock(&root->chain_lock); mutex_lock(&root->chain_lock);
next_ft = find_next_chained_ft(prio); next_ft = find_next_chained_ft(prio);
......
...@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, ...@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
void mlx5_init_clock(struct mlx5_core_dev *mdev) void mlx5_init_clock(struct mlx5_core_dev *mdev)
{ {
struct mlx5_clock *clock = &mdev->clock; struct mlx5_clock *clock = &mdev->clock;
u64 overflow_cycles;
u64 ns; u64 ns;
u64 frac = 0; u64 frac = 0;
u32 dev_freq; u32 dev_freq;
...@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) ...@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
/* Calculate period in seconds to call the overflow watchdog - to make /* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around. * sure counter is checked at least once every wrap around.
* The period is calculated as the minimum between max HW cycles count
* (The clock source mask) and max amount of cycles that can be
* multiplied by clock multiplier where the result doesn't exceed
* 64bits.
*/ */
ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask, overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
frac, &frac); frac, &frac);
do_div(ns, NSEC_PER_SEC / 2 / HZ); do_div(ns, NSEC_PER_SEC / HZ);
clock->overflow_period = ns; clock->overflow_period = ns;
mdev->clock_info_page = alloc_page(GFP_KERNEL); mdev->clock_info_page = alloc_page(GFP_KERNEL);
......
...@@ -113,35 +113,45 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -113,35 +113,45 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
return err; return err;
} }
static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf, static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
struct mlx5_wq_qp *qp) struct mlx5_wq_qp *qp)
{ {
struct mlx5_frag_buf_ctrl *sq_fbc;
struct mlx5_frag_buf *rqb, *sqb; struct mlx5_frag_buf *rqb, *sqb;
rqb = &qp->rq.fbc.frag_buf; rqb = &qp->rq.fbc.frag_buf;
*rqb = *buf; *rqb = *buf;
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
rqb->npages = 1 << get_order(rqb->size); rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
sqb = &qp->sq.fbc.frag_buf; sq_fbc = &qp->sq.fbc;
*sqb = *buf; sqb = &sq_fbc->frag_buf;
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); *sqb = *buf;
sqb->npages = 1 << get_order(sqb->size); sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
sqb->frags += rqb->npages; /* first part is for the rq */ sqb->frags += rqb->npages; /* first part is for the rq */
if (sq_fbc->strides_offset)
sqb->frags--;
} }
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq, void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl) struct mlx5_wq_ctrl *wq_ctrl)
{ {
u32 sq_strides_offset;
int err; int err;
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size), MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc); &wq->rq.fbc);
mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
MLX5_GET(qpc, qpc, log_sq_size), sq_strides_offset =
&wq->sq.fbc); ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
MLX5_GET(qpc, qpc, log_sq_size),
sq_strides_offset,
&wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) { if (err) {
...@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free; goto err_db_free;
} }
mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq); mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
......
...@@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl { ...@@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf; struct mlx5_frag_buf frag_buf;
u32 sz_m1; u32 sz_m1;
u32 frag_sz_m1; u32 frag_sz_m1;
u32 strides_offset;
u8 log_sz; u8 log_sz;
u8 log_stride; u8 log_stride;
u8 log_frag_strides; u8 log_frag_strides;
...@@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key) ...@@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u; return key & 0xffffff00u;
} }
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz, static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
struct mlx5_frag_buf_ctrl *fbc) u32 strides_offset,
struct mlx5_frag_buf_ctrl *fbc)
{ {
fbc->log_stride = log_stride; fbc->log_stride = log_stride;
fbc->log_sz = log_sz; fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1; fbc->sz_m1 = (1 << fbc->log_sz) - 1;
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride; fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1; fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
fbc->strides_offset = strides_offset;
}
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
struct mlx5_frag_buf_ctrl *fbc)
{
mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
} }
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
...@@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc, ...@@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
u32 ix) u32 ix)
{ {
unsigned int frag = (ix >> fbc->log_frag_strides); unsigned int frag;
ix += fbc->strides_offset;
frag = ix >> fbc->log_frag_strides;
return fbc->frag_buf.frags[frag].buf + return fbc->frag_buf.frags[frag].buf +
((fbc->frag_sz_m1 & ix) << fbc->log_stride); ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment