Commit 93698321 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-12-10' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed:

====================
mlx5e-updates-2018-12-10 (gre)

This patch set adds GRE offloading support to Mellanox ethernet driver.

Patches 1-5 replace the existing egdev mechanism with the new TC indirect
block binds mechanism that was introduced by Netronome:
7f76fa36 ("net: sched: register callbacks for indirect tc block binds")

Patches 6-9 add GRE offloading support along with some required
refactoring work.

Patch 10, Add netif_is_gretap()/netif_is_ip6gretap()
 - Changed the is_gretap_dev and is_ip6gretap_dev logic from structure
   comparison to string comparison of the rtnl_link_ops kind field.

Patch 11, add GRE offloading support to mlx5.

Patch 12 removes the egdev mechanism from TC as it is no longer used by
any of the drivers.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d8ed257f 69bd4840
...@@ -647,8 +647,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, ...@@ -647,8 +647,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
flags, local_page_list, NULL, NULL); flags, local_page_list, NULL, NULL);
up_read(&owning_mm->mmap_sem); up_read(&owning_mm->mmap_sem);
if (npages < 0) if (npages < 0) {
if (npages != -EAGAIN)
pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
else
pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
break; break;
}
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
mutex_lock(&umem_odp->umem_mutex); mutex_lock(&umem_odp->umem_mutex);
...@@ -666,8 +671,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, ...@@ -666,8 +671,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
ret = ib_umem_odp_map_dma_single_page( ret = ib_umem_odp_map_dma_single_page(
umem_odp, k, local_page_list[j], umem_odp, k, local_page_list[j],
access_mask, current_seq); access_mask, current_seq);
if (ret < 0) if (ret < 0) {
if (ret != -EAGAIN)
pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
else
pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
break; break;
}
p = page_to_phys(local_page_list[j]); p = page_to_phys(local_page_list[j]);
k++; k++;
......
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "srq.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq) static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
{ {
...@@ -81,7 +82,7 @@ static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) ...@@ -81,7 +82,7 @@ static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) && if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
!((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
return cqe; return cqe;
} else { } else {
...@@ -177,8 +178,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, ...@@ -177,8 +178,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_core_srq *msrq = NULL; struct mlx5_core_srq *msrq = NULL;
if (qp->ibqp.xrcd) { if (qp->ibqp.xrcd) {
msrq = mlx5_core_get_srq(dev->mdev, msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
be32_to_cpu(cqe->srqn));
srq = to_mibsrq(msrq); srq = to_mibsrq(msrq);
} else { } else {
srq = to_msrq(qp->ibqp.srq); srq = to_msrq(qp->ibqp.srq);
...@@ -197,7 +197,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, ...@@ -197,7 +197,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
} }
wc->byte_len = be32_to_cpu(cqe->byte_cnt); wc->byte_len = be32_to_cpu(cqe->byte_cnt);
switch (cqe->op_own >> 4) { switch (get_cqe_opcode(cqe)) {
case MLX5_CQE_RESP_WR_IMM: case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM; wc->wc_flags = IB_WC_WITH_IMM;
...@@ -537,7 +537,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, ...@@ -537,7 +537,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
*/ */
rmb(); rmb();
opcode = cqe64->op_own >> 4; opcode = get_cqe_opcode(cqe64);
if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) { if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
if (likely(cq->resize_buf)) { if (likely(cq->resize_buf)) {
free_cq_buf(dev, &cq->buf); free_cq_buf(dev, &cq->buf);
...@@ -1295,7 +1295,7 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq) ...@@ -1295,7 +1295,7 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
return -EINVAL; return -EINVAL;
} }
while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
(i + 1) & cq->resize_buf->nent); (i + 1) & cq->resize_buf->nent);
dcqe64 = dsize == 64 ? dcqe : dcqe + 64; dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
*/ */
#include "ib_rep.h" #include "ib_rep.h"
#include "srq.h"
static const struct mlx5_ib_profile rep_profile = { static const struct mlx5_ib_profile rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT, STAGE_CREATE(MLX5_IB_STAGE_INIT,
...@@ -21,6 +22,9 @@ static const struct mlx5_ib_profile rep_profile = { ...@@ -21,6 +22,9 @@ static const struct mlx5_ib_profile rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE, STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_rep_roce_init, mlx5_ib_stage_rep_roce_init,
mlx5_ib_stage_rep_roce_cleanup), mlx5_ib_stage_rep_roce_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table,
mlx5_cleanup_srq_table),
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
mlx5_ib_stage_dev_res_init, mlx5_ib_stage_dev_res_init,
mlx5_ib_stage_dev_res_cleanup), mlx5_ib_stage_dev_res_cleanup),
......
This diff is collapsed.
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
#include <linux/mlx5/cq.h> #include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/mlx5/qp.h> #include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/mlx5/transobj.h> #include <linux/mlx5/transobj.h>
...@@ -50,6 +49,8 @@ ...@@ -50,6 +49,8 @@
#include <rdma/uverbs_ioctl.h> #include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h> #include <rdma/mlx5_user_ioctl_cmds.h>
#include "srq.h"
#define mlx5_ib_dbg(_dev, format, arg...) \ #define mlx5_ib_dbg(_dev, format, arg...) \
dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
__LINE__, current->pid, ##arg) __LINE__, current->pid, ##arg)
...@@ -774,7 +775,9 @@ enum mlx5_ib_stages { ...@@ -774,7 +775,9 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_CAPS, MLX5_IB_STAGE_CAPS,
MLX5_IB_STAGE_NON_DEFAULT_CB, MLX5_IB_STAGE_NON_DEFAULT_CB,
MLX5_IB_STAGE_ROCE, MLX5_IB_STAGE_ROCE,
MLX5_IB_STAGE_SRQ,
MLX5_IB_STAGE_DEVICE_RESOURCES, MLX5_IB_STAGE_DEVICE_RESOURCES,
MLX5_IB_STAGE_DEVICE_NOTIFIER,
MLX5_IB_STAGE_ODP, MLX5_IB_STAGE_ODP,
MLX5_IB_STAGE_COUNTERS, MLX5_IB_STAGE_COUNTERS,
MLX5_IB_STAGE_CONG_DEBUGFS, MLX5_IB_STAGE_CONG_DEBUGFS,
...@@ -782,6 +785,7 @@ enum mlx5_ib_stages { ...@@ -782,6 +785,7 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_BFREG, MLX5_IB_STAGE_BFREG,
MLX5_IB_STAGE_PRE_IB_REG_UMR, MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_SPECS, MLX5_IB_STAGE_SPECS,
MLX5_IB_STAGE_WHITELIST_UID,
MLX5_IB_STAGE_IB_REG, MLX5_IB_STAGE_IB_REG,
MLX5_IB_STAGE_POST_IB_REG_UMR, MLX5_IB_STAGE_POST_IB_REG_UMR,
MLX5_IB_STAGE_DELAY_DROP, MLX5_IB_STAGE_DELAY_DROP,
...@@ -806,6 +810,7 @@ struct mlx5_ib_multiport_info { ...@@ -806,6 +810,7 @@ struct mlx5_ib_multiport_info {
struct list_head list; struct list_head list;
struct mlx5_ib_dev *ibdev; struct mlx5_ib_dev *ibdev;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct notifier_block mdev_events;
struct completion unref_comp; struct completion unref_comp;
u64 sys_image_guid; u64 sys_image_guid;
u32 mdev_refcnt; u32 mdev_refcnt;
...@@ -880,10 +885,20 @@ struct mlx5_ib_lb_state { ...@@ -880,10 +885,20 @@ struct mlx5_ib_lb_state {
bool enabled; bool enabled;
}; };
struct mlx5_ib_pf_eq {
struct mlx5_ib_dev *dev;
struct mlx5_eq *core;
struct work_struct work;
spinlock_t lock; /* Pagefaults spinlock */
struct workqueue_struct *wq;
mempool_t *pool;
};
struct mlx5_ib_dev { struct mlx5_ib_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
const struct uverbs_object_tree_def *driver_trees[7]; const struct uverbs_object_tree_def *driver_trees[7];
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct notifier_block mdev_events;
struct mlx5_roce roce[MLX5_MAX_PORTS]; struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports; int num_ports;
/* serialize update of capability mask /* serialize update of capability mask
...@@ -902,6 +917,8 @@ struct mlx5_ib_dev { ...@@ -902,6 +917,8 @@ struct mlx5_ib_dev {
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_odp_caps odp_caps; struct ib_odp_caps odp_caps;
u64 odp_max_size; u64 odp_max_size;
struct mlx5_ib_pf_eq odp_pf_eq;
/* /*
* Sleepable RCU that prevents destruction of MRs while they are still * Sleepable RCU that prevents destruction of MRs while they are still
* being used by a page fault handler. * being used by a page fault handler.
...@@ -927,6 +944,7 @@ struct mlx5_ib_dev { ...@@ -927,6 +944,7 @@ struct mlx5_ib_dev {
u64 sys_image_guid; u64 sys_image_guid;
struct mlx5_memic memic; struct mlx5_memic memic;
u16 devx_whitelist_uid; u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table;
}; };
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
...@@ -1158,9 +1176,8 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm, ...@@ -1158,9 +1176,8 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev); void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
struct mlx5_pagefault *pfault);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev); int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void); int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void); void mlx5_ib_odp_cleanup(void);
void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
...@@ -1175,6 +1192,7 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) ...@@ -1175,6 +1192,7 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
} }
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; } static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; } static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {} static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
......
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* /*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/ */
#include <linux/module.h> #include <linux/module.h>
#include <linux/mlx5/qp.h> #include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "srq.h"
/* not supported currently */
static int srq_signature;
static void *get_wqe(struct mlx5_ib_srq *srq, int n) static void *get_wqe(struct mlx5_ib_srq *srq, int n)
{ {
...@@ -202,7 +171,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, ...@@ -202,7 +171,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
err = -ENOMEM; err = -ENOMEM;
goto err_in; goto err_in;
} }
srq->wq_sig = !!srq_signature; srq->wq_sig = 0;
in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 && if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
...@@ -327,7 +296,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, ...@@ -327,7 +296,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in.pd = to_mpd(pd)->pdn; in.pd = to_mpd(pd)->pdn;
in.db_record = srq->db.dma; in.db_record = srq->db.dma;
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in); err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
kvfree(in.pas); kvfree(in.pas);
if (err) { if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
...@@ -351,7 +320,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, ...@@ -351,7 +320,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
return &srq->ibsrq; return &srq->ibsrq;
err_core: err_core:
mlx5_core_destroy_srq(dev->mdev, &srq->msrq); mlx5_cmd_destroy_srq(dev, &srq->msrq);
err_usr_kern_srq: err_usr_kern_srq:
if (pd->uobject) if (pd->uobject)
...@@ -381,7 +350,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, ...@@ -381,7 +350,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL; return -EINVAL;
mutex_lock(&srq->mutex); mutex_lock(&srq->mutex);
ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1); ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
mutex_unlock(&srq->mutex); mutex_unlock(&srq->mutex);
if (ret) if (ret)
...@@ -402,7 +371,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) ...@@ -402,7 +371,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
if (!out) if (!out)
return -ENOMEM; return -ENOMEM;
ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out); ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
if (ret) if (ret)
goto out_box; goto out_box;
...@@ -420,7 +389,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq) ...@@ -420,7 +389,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
struct mlx5_ib_dev *dev = to_mdev(srq->device); struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq); struct mlx5_ib_srq *msrq = to_msrq(srq);
mlx5_core_destroy_srq(dev->mdev, &msrq->msrq); mlx5_cmd_destroy_srq(dev, &msrq->msrq);
if (srq->uobject) { if (srq->uobject) {
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db); mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2013-2018, Mellanox Technologies. All rights reserved.
*/
#ifndef MLX5_IB_SRQ_H
#define MLX5_IB_SRQ_H
enum {
MLX5_SRQ_FLAG_ERR = (1 << 0),
MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
MLX5_SRQ_FLAG_RNDV = (1 << 2),
};
struct mlx5_srq_attr {
u32 type;
u32 flags;
u32 log_size;
u32 wqe_shift;
u32 log_page_size;
u32 wqe_cnt;
u32 srqn;
u32 xrcd;
u32 page_offset;
u32 cqn;
u32 pd;
u32 lwm;
u32 user_index;
u64 db_record;
__be64 *pas;
u32 tm_log_list_size;
u32 tm_next_tag;
u32 tm_hw_phase_cnt;
u32 tm_sw_phase_cnt;
u16 uid;
};
struct mlx5_ib_dev;
struct mlx5_core_srq {
struct mlx5_core_rsc_common common; /* must be first */
u32 srqn;
int max;
size_t max_gs;
size_t max_avail_gather;
int wqe_shift;
void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
atomic_t refcount;
struct completion free;
u16 uid;
};
struct mlx5_srq_table {
struct notifier_block nb;
/* protect radix tree
*/
spinlock_t lock;
struct radix_tree_root tree;
};
int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in);
int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out);
int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq);
struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn);
int mlx5_init_srq_table(struct mlx5_ib_dev *dev);
void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev);
#endif /* MLX5_IB_SRQ_H */
...@@ -12,9 +12,9 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o ...@@ -12,9 +12,9 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
# mlx5 core basic # mlx5 core basic
# #
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
diag/fs_tracepoint.o diag/fw_tracer.o diag/fs_tracepoint.o diag/fw_tracer.o
# #
...@@ -30,7 +30,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ ...@@ -30,7 +30,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o
# #
# Core extra # Core extra
......
...@@ -40,9 +40,11 @@ ...@@ -40,9 +40,11 @@
#include <linux/random.h> #include <linux/random.h>
#include <linux/io-mapping.h> #include <linux/io-mapping.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/eq.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/eq.h"
enum { enum {
CMD_IF_REV = 5, CMD_IF_REV = 5,
...@@ -313,6 +315,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -313,6 +315,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_FPGA_DESTROY_QP: case MLX5_CMD_OP_FPGA_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC: case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
return MLX5_CMD_STAT_OK; return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP: case MLX5_CMD_OP_QUERY_HCA_CAP:
...@@ -326,7 +329,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -326,7 +329,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_MKEY: case MLX5_CMD_OP_CREATE_MKEY:
case MLX5_CMD_OP_QUERY_MKEY: case MLX5_CMD_OP_QUERY_MKEY:
case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_CREATE_EQ: case MLX5_CMD_OP_CREATE_EQ:
case MLX5_CMD_OP_QUERY_EQ: case MLX5_CMD_OP_QUERY_EQ:
case MLX5_CMD_OP_GEN_EQE: case MLX5_CMD_OP_GEN_EQE:
...@@ -371,6 +373,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, ...@@ -371,6 +373,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER:
case MLX5_CMD_OP_SET_MONITOR_COUNTER:
case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
case MLX5_CMD_OP_SET_PP_RATE_LIMIT: case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
...@@ -520,6 +524,8 @@ const char *mlx5_command_str(int command) ...@@ -520,6 +524,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
...@@ -805,6 +811,8 @@ static u16 msg_to_opcode(struct mlx5_cmd_msg *in) ...@@ -805,6 +811,8 @@ static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
return MLX5_GET(mbox_in, in->first.data, opcode); return MLX5_GET(mbox_in, in->first.data, opcode);
} }
static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
static void cb_timeout_handler(struct work_struct *work) static void cb_timeout_handler(struct work_struct *work)
{ {
struct delayed_work *dwork = container_of(work, struct delayed_work, struct delayed_work *dwork = container_of(work, struct delayed_work,
...@@ -1412,14 +1420,32 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) ...@@ -1412,14 +1420,32 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
up(&cmd->sem); up(&cmd->sem);
} }
static int cmd_comp_notifier(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_core_dev *dev;
struct mlx5_cmd *cmd;
struct mlx5_eqe *eqe;
cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
dev = container_of(cmd, struct mlx5_core_dev, cmd);
eqe = data;
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
return NOTIFY_OK;
}
void mlx5_cmd_use_events(struct mlx5_core_dev *dev) void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{ {
MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
mlx5_eq_notifier_register(dev, &dev->cmd.nb);
mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
} }
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
{ {
mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
} }
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
...@@ -1435,7 +1461,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) ...@@ -1435,7 +1461,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
} }
} }
void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
{ {
struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent; struct mlx5_cmd_work_ent *ent;
...@@ -1533,7 +1559,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) ...@@ -1533,7 +1559,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
} }
} }
} }
EXPORT_SYMBOL(mlx5_cmd_comp_handler);
void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
{
unsigned long flags;
u64 vector;
/* wait for pending handlers to complete */
mlx5_eq_synchronize_cmd_irq(dev);
spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
if (!vector)
goto no_trig;
vector |= MLX5_TRIGGERED_CMD_COMP;
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
mlx5_cmd_comp_handler(dev, vector, true);
return;
no_trig:
spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
}
static int status_to_err(u8 status) static int status_to_err(u8 status)
{ {
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include <linux/mlx5/cq.h> #include <linux/mlx5/cq.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/eq.h"
#define TASKLET_MAX_TIME 2 #define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME) #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
...@@ -92,10 +93,10 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -92,10 +93,10 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
u32 out[MLX5_ST_SZ_DW(create_cq_out)]; u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
struct mlx5_eq *eq; struct mlx5_eq_comp *eq;
int err; int err;
eq = mlx5_eqn2eq(dev, eqn); eq = mlx5_eqn2comp_eq(dev, eqn);
if (IS_ERR(eq)) if (IS_ERR(eq))
return PTR_ERR(eq); return PTR_ERR(eq);
...@@ -119,12 +120,12 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -119,12 +120,12 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
INIT_LIST_HEAD(&cq->tasklet_ctx.list); INIT_LIST_HEAD(&cq->tasklet_ctx.list);
/* Add to comp EQ CQ tree to recv comp events */ /* Add to comp EQ CQ tree to recv comp events */
err = mlx5_eq_add_cq(eq, cq); err = mlx5_eq_add_cq(&eq->core, cq);
if (err) if (err)
goto err_cmd; goto err_cmd;
/* Add to async EQ CQ tree to recv async events */ /* Add to async EQ CQ tree to recv async events */
err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq); err = mlx5_eq_add_cq(mlx5_get_async_eq(dev), cq);
if (err) if (err)
goto err_cq_add; goto err_cq_add;
...@@ -139,7 +140,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, ...@@ -139,7 +140,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0; return 0;
err_cq_add: err_cq_add:
mlx5_eq_del_cq(eq, cq); mlx5_eq_del_cq(&eq->core, cq);
err_cmd: err_cmd:
memset(din, 0, sizeof(din)); memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout)); memset(dout, 0, sizeof(dout));
...@@ -157,11 +158,11 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) ...@@ -157,11 +158,11 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
int err; int err;
err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq); err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
if (err) if (err)
return err; return err;
err = mlx5_eq_del_cq(cq->eq, cq); err = mlx5_eq_del_cq(&cq->eq->core, cq);
if (err) if (err)
return err; return err;
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/mlx5/cq.h> #include <linux/mlx5/cq.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/eq.h"
enum { enum {
QP_PID, QP_PID,
...@@ -349,6 +350,16 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, ...@@ -349,6 +350,16 @@ static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
return param; return param;
} }
static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
u32 *out, int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index) int index)
{ {
......
...@@ -45,75 +45,11 @@ struct mlx5_device_context { ...@@ -45,75 +45,11 @@ struct mlx5_device_context {
unsigned long state; unsigned long state;
}; };
struct mlx5_delayed_event {
struct list_head list;
struct mlx5_core_dev *dev;
enum mlx5_dev_event event;
unsigned long param;
};
enum { enum {
MLX5_INTERFACE_ADDED, MLX5_INTERFACE_ADDED,
MLX5_INTERFACE_ATTACHED, MLX5_INTERFACE_ATTACHED,
}; };
static void add_delayed_event(struct mlx5_priv *priv,
struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
unsigned long param)
{
struct mlx5_delayed_event *delayed_event;
delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
if (!delayed_event) {
mlx5_core_err(dev, "event %d is missed\n", event);
return;
}
mlx5_core_dbg(dev, "Accumulating event %d\n", event);
delayed_event->dev = dev;
delayed_event->event = event;
delayed_event->param = param;
list_add_tail(&delayed_event->list, &priv->waiting_events_list);
}
static void delayed_event_release(struct mlx5_device_context *dev_ctx,
struct mlx5_priv *priv)
{
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n;
struct list_head temp;
INIT_LIST_HEAD(&temp);
spin_lock_irq(&priv->ctx_lock);
priv->is_accum_events = false;
list_splice_init(&priv->waiting_events_list, &temp);
if (!dev_ctx->context)
goto out;
list_for_each_entry_safe(de, n, &temp, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
out:
spin_unlock_irq(&priv->ctx_lock);
list_for_each_entry_safe(de, n, &temp, list) {
list_del(&de->list);
kfree(de);
}
}
/* accumulating events that can come after mlx5_ib calls to
* ib_register_device, till adding that interface to the events list.
*/
static void delayed_event_start(struct mlx5_priv *priv)
{
spin_lock_irq(&priv->ctx_lock);
priv->is_accum_events = true;
spin_unlock_irq(&priv->ctx_lock);
}
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{ {
...@@ -129,8 +65,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -129,8 +65,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
dev_ctx->intf = intf; dev_ctx->intf = intf;
delayed_event_start(priv);
dev_ctx->context = intf->add(dev); dev_ctx->context = intf->add(dev);
if (dev_ctx->context) { if (dev_ctx->context) {
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
...@@ -139,22 +73,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -139,22 +73,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
spin_lock_irq(&priv->ctx_lock); spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list); list_add_tail(&dev_ctx->list, &priv->ctx_list);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (dev_ctx->intf->pfault) {
if (priv->pfault) {
mlx5_core_err(dev, "multiple page fault handlers not supported");
} else {
priv->pfault_ctx = dev_ctx->context;
priv->pfault = dev_ctx->intf->pfault;
}
}
#endif
spin_unlock_irq(&priv->ctx_lock); spin_unlock_irq(&priv->ctx_lock);
} }
delayed_event_release(dev_ctx, priv);
if (!dev_ctx->context) if (!dev_ctx->context)
kfree(dev_ctx); kfree(dev_ctx);
} }
...@@ -179,15 +100,6 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) ...@@ -179,15 +100,6 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
if (!dev_ctx) if (!dev_ctx)
return; return;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
spin_lock_irq(&priv->ctx_lock);
if (priv->pfault == dev_ctx->intf->pfault)
priv->pfault = NULL;
spin_unlock_irq(&priv->ctx_lock);
synchronize_srcu(&priv->pfault_srcu);
#endif
spin_lock_irq(&priv->ctx_lock); spin_lock_irq(&priv->ctx_lock);
list_del(&dev_ctx->list); list_del(&dev_ctx->list);
spin_unlock_irq(&priv->ctx_lock); spin_unlock_irq(&priv->ctx_lock);
...@@ -207,26 +119,20 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv ...@@ -207,26 +119,20 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (!dev_ctx) if (!dev_ctx)
return; return;
delayed_event_start(priv);
if (intf->attach) { if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
goto out; return;
if (intf->attach(dev, dev_ctx->context)) if (intf->attach(dev, dev_ctx->context))
goto out; return;
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else { } else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
goto out; return;
dev_ctx->context = intf->add(dev); dev_ctx->context = intf->add(dev);
if (!dev_ctx->context) if (!dev_ctx->context)
goto out; return;
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
} }
out:
delayed_event_release(dev_ctx, priv);
} }
void mlx5_attach_device(struct mlx5_core_dev *dev) void mlx5_attach_device(struct mlx5_core_dev *dev)
...@@ -350,28 +256,6 @@ void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) ...@@ -350,28 +256,6 @@ void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
mutex_unlock(&mlx5_intf_mutex); mutex_unlock(&mlx5_intf_mutex);
} }
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
{
struct mlx5_priv *priv = &mdev->priv;
struct mlx5_device_context *dev_ctx;
unsigned long flags;
void *result = NULL;
spin_lock_irqsave(&priv->ctx_lock, flags);
list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
if ((dev_ctx->intf->protocol == protocol) &&
dev_ctx->intf->get_dev) {
result = dev_ctx->intf->get_dev(dev_ctx->context);
break;
}
spin_unlock_irqrestore(&priv->ctx_lock, flags);
return result;
}
EXPORT_SYMBOL(mlx5_get_protocol_dev);
/* Must be called with intf_mutex held */ /* Must be called with intf_mutex held */
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
{ {
...@@ -422,44 +306,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) ...@@ -422,44 +306,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
return res; return res;
} }
void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_device_context *dev_ctx;
unsigned long flags;
spin_lock_irqsave(&priv->ctx_lock, flags);
if (priv->is_accum_events)
add_delayed_event(priv, dev, event, param);
/* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
* still in priv->ctx_list. In this case, only notify the dev_ctx if its
* ADDED or ATTACHED bit are set.
*/
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event &&
(test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
dev_ctx->intf->event(dev, dev_ctx->context, event, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags);
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault)
{
struct mlx5_priv *priv = &dev->priv;
int srcu_idx;
srcu_idx = srcu_read_lock(&priv->pfault_srcu);
if (priv->pfault)
priv->pfault(dev, priv->pfault_ctx, pfault);
srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
}
#endif
void mlx5_dev_list_lock(void) void mlx5_dev_list_lock(void)
{ {
......
...@@ -161,10 +161,10 @@ static void print_misc_parameters_hdrs(struct trace_seq *p, ...@@ -161,10 +161,10 @@ static void print_misc_parameters_hdrs(struct trace_seq *p,
PRINT_MASKED_VAL(name, p, format); \ PRINT_MASKED_VAL(name, p, format); \
} }
DECLARE_MASK_VAL(u64, gre_key) = { DECLARE_MASK_VAL(u64, gre_key) = {
.m = MLX5_GET(fte_match_set_misc, mask, gre_key_h) << 8 | .m = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi) << 8 |
MLX5_GET(fte_match_set_misc, mask, gre_key_l), MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo),
.v = MLX5_GET(fte_match_set_misc, value, gre_key_h) << 8 | .v = MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.hi) << 8 |
MLX5_GET(fte_match_set_misc, value, gre_key_l)}; MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.lo)};
PRINT_MASKED_VAL(gre_key, p, "%llu"); PRINT_MASKED_VAL(gre_key, p, "%llu");
PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u"); PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u");
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "lib/eq.h"
#include "fw_tracer.h" #include "fw_tracer.h"
#include "fw_tracer_tracepoint.h" #include "fw_tracer_tracepoint.h"
...@@ -846,9 +847,9 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) ...@@ -846,9 +847,9 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
return ERR_PTR(err); return ERR_PTR(err);
} }
/* Create HW resources + start tracer static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data);
* must be called before Async EQ is created
*/ /* Create HW resources + start tracer */
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
{ {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
...@@ -874,6 +875,9 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) ...@@ -874,6 +875,9 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
goto err_dealloc_pd; goto err_dealloc_pd;
} }
MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
mlx5_eq_notifier_register(dev, &tracer->nb);
mlx5_fw_tracer_start(tracer); mlx5_fw_tracer_start(tracer);
return 0; return 0;
...@@ -883,9 +887,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) ...@@ -883,9 +887,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
return err; return err;
} }
/* Stop tracer + Cleanup HW resources /* Stop tracer + Cleanup HW resources */
* must be called after Async EQ is destroyed
*/
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
{ {
if (IS_ERR_OR_NULL(tracer)) if (IS_ERR_OR_NULL(tracer))
...@@ -893,7 +895,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) ...@@ -893,7 +895,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n", mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
tracer->owner); tracer->owner);
mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb);
cancel_work_sync(&tracer->ownership_change_work); cancel_work_sync(&tracer->ownership_change_work);
cancel_work_sync(&tracer->handle_traces_work); cancel_work_sync(&tracer->handle_traces_work);
...@@ -922,12 +924,11 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) ...@@ -922,12 +924,11 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
kfree(tracer); kfree(tracer);
} }
void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
{ {
struct mlx5_fw_tracer *tracer = dev->tracer; struct mlx5_fw_tracer *tracer = mlx5_nb_cof(nb, struct mlx5_fw_tracer, nb);
struct mlx5_core_dev *dev = tracer->dev;
if (!tracer) struct mlx5_eqe *eqe = data;
return;
switch (eqe->sub_type) { switch (eqe->sub_type) {
case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE: case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
...@@ -942,6 +943,8 @@ void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) ...@@ -942,6 +943,8 @@ void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
eqe->sub_type); eqe->sub_type);
} }
return NOTIFY_OK;
} }
EXPORT_TRACEPOINT_SYMBOL(mlx5_fw); EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);
...@@ -55,6 +55,7 @@ ...@@ -55,6 +55,7 @@
struct mlx5_fw_tracer { struct mlx5_fw_tracer {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
struct mlx5_nb nb;
bool owner; bool owner;
u8 trc_ver; u8 trc_ver;
struct workqueue_struct *work_queue; struct workqueue_struct *work_queue;
...@@ -170,6 +171,5 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev); ...@@ -170,6 +171,5 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer); int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer); void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer); void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
#endif #endif
...@@ -176,8 +176,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) ...@@ -176,8 +176,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{ {
return is_kdump_kernel() ? return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS : MLX5E_MIN_NUM_CHANNELS :
min_t(int, mdev->priv.eq_table.num_comp_vectors, min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
MLX5E_MAX_NUM_CHANNELS);
} }
/* Use this function to get max num channels after netdev was created */ /* Use this function to get max num channels after netdev was created */
...@@ -629,7 +628,6 @@ struct mlx5e_channel_stats { ...@@ -629,7 +628,6 @@ struct mlx5e_channel_stats {
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
enum { enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED, MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING, MLX5E_STATE_DESTROYING,
}; };
...@@ -696,6 +694,8 @@ struct mlx5e_priv { ...@@ -696,6 +694,8 @@ struct mlx5e_priv {
struct hwtstamp_config tstamp; struct hwtstamp_config tstamp;
u16 q_counter; u16 q_counter;
u16 drop_rq_q_counter; u16 drop_rq_q_counter;
struct notifier_block events_nb;
#ifdef CONFIG_MLX5_CORE_EN_DCB #ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx; struct mlx5e_dcbx dcbx;
#endif #endif
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
#ifndef __MLX5_EN_TC_TUNNEL_H__
#define __MLX5_EN_TC_TUNNEL_H__
#include <linux/netdevice.h>
#include <linux/mlx5/fs.h>
#include <net/pkt_cls.h>
#include <linux/netlink.h>
#include "en.h"
#include "en_rep.h"
enum {
MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
MLX5E_TC_TUNNEL_TYPE_VXLAN,
MLX5E_TC_TUNNEL_TYPE_GRETAP
};
int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct netlink_ext_ack *extack);
int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
int mlx5e_tc_tun_parse(struct net_device *filter_dev,
struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
void *headers_c,
void *headers_v);
#endif //__MLX5_EN_TC_TUNNEL_H__
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include "lib/clock.h" #include "lib/clock.h"
#include "en/port.h" #include "en/port.h"
#include "en/xdp.h" #include "en/xdp.h"
#include "lib/eq.h"
struct mlx5e_rq_param { struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)]; u32 rqc[MLX5_ST_SZ_DW(rqc)];
...@@ -293,33 +294,35 @@ void mlx5e_queue_update_stats(struct mlx5e_priv *priv) ...@@ -293,33 +294,35 @@ void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->update_stats_work); queue_work(priv->wq, &priv->update_stats_work);
} }
static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, static int async_event(struct notifier_block *nb, unsigned long event, void *data)
enum mlx5_dev_event event, unsigned long param)
{ {
struct mlx5e_priv *priv = vpriv; struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
struct mlx5_eqe *eqe = data;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
return; return NOTIFY_DONE;
switch (event) { switch (eqe->sub_type) {
case MLX5_DEV_EVENT_PORT_UP: case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
case MLX5_DEV_EVENT_PORT_DOWN: case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
queue_work(priv->wq, &priv->update_carrier_work); queue_work(priv->wq, &priv->update_carrier_work);
break; break;
default: default:
break; return NOTIFY_DONE;
} }
return NOTIFY_OK;
} }
static void mlx5e_enable_async_events(struct mlx5e_priv *priv) static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{ {
set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); priv->events_nb.notifier_call = async_event;
mlx5_notifier_register(priv->mdev, &priv->events_nb);
} }
static void mlx5e_disable_async_events(struct mlx5e_priv *priv) static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{ {
clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
} }
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
...@@ -1763,11 +1766,6 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq) ...@@ -1763,11 +1766,6 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq); mlx5e_free_cq(cq);
} }
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
{
return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
}
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params, struct mlx5e_params *params,
struct mlx5e_channel_param *cparam) struct mlx5e_channel_param *cparam)
...@@ -1918,9 +1916,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1918,9 +1916,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam, struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp) struct mlx5e_channel **cp)
{ {
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
struct net_dim_cq_moder icocq_moder = {0, 0}; struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev; struct net_device *netdev = priv->netdev;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c; struct mlx5e_channel *c;
unsigned int irq; unsigned int irq;
int err; int err;
...@@ -3388,11 +3386,14 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, ...@@ -3388,11 +3386,14 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
{ {
switch (cls_flower->command) { switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE: case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower, flags); return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
flags);
case TC_CLSFLOWER_DESTROY: case TC_CLSFLOWER_DESTROY:
return mlx5e_delete_flower(priv, cls_flower, flags); return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
flags);
case TC_CLSFLOWER_STATS: case TC_CLSFLOWER_STATS:
return mlx5e_stats_flower(priv, cls_flower, flags); return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
flags);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -4137,17 +4138,17 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, ...@@ -4137,17 +4138,17 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
struct mlx5e_txqsq *sq) struct mlx5e_txqsq *sq)
{ {
struct mlx5_eq *eq = sq->cq.mcq.eq; struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
u32 eqe_count; u32 eqe_count;
netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
eq->eqn, eq->cons_index, eq->irqn); eq->core.eqn, eq->core.cons_index, eq->core.irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq); eqe_count = mlx5_eq_poll_irq_disabled(eq);
if (!eqe_count) if (!eqe_count)
return false; return false;
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn); netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn);
sq->channel->stats->eq_rearm++; sq->channel->stats->eq_rearm++;
return true; return true;
} }
...@@ -4988,7 +4989,7 @@ int mlx5e_netdev_init(struct net_device *netdev, ...@@ -4988,7 +4989,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
netif_carrier_off(netdev); netif_carrier_off(netdev);
#ifdef CONFIG_MLX5_EN_ARFS #ifdef CONFIG_MLX5_EN_ARFS
netdev->rx_cpu_rmap = mdev->rmap; netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
#endif #endif
return 0; return 0;
...@@ -5200,21 +5201,12 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -5200,21 +5201,12 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
kfree(ppriv); kfree(ppriv);
} }
static void *mlx5e_get_netdev(void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
return priv->netdev;
}
static struct mlx5_interface mlx5e_interface = { static struct mlx5_interface mlx5e_interface = {
.add = mlx5e_add, .add = mlx5e_add,
.remove = mlx5e_remove, .remove = mlx5e_remove,
.attach = mlx5e_attach, .attach = mlx5e_attach,
.detach = mlx5e_detach, .detach = mlx5e_detach,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH, .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
}; };
void mlx5e_init(void) void mlx5e_init(void)
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include "en.h" #include "en.h"
#include "en_rep.h" #include "en_rep.h"
#include "en_tc.h" #include "en_tc.h"
#include "en/tc_tun.h"
#include "fs_core.h" #include "fs_core.h"
#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
...@@ -49,6 +50,15 @@ ...@@ -49,6 +50,15 @@
static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
struct mlx5e_rep_priv *rpriv;
struct list_head list;
};
static void mlx5e_rep_indr_unregister_block(struct net_device *netdev);
static void mlx5e_rep_get_drvinfo(struct net_device *dev, static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo) struct ethtool_drvinfo *drvinfo)
{ {
...@@ -518,6 +528,186 @@ static void mlx5e_rep_neigh_update(struct work_struct *work) ...@@ -518,6 +528,186 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
neigh_release(n); neigh_release(n);
} }
static struct mlx5e_rep_indr_block_priv *
mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
struct mlx5e_rep_indr_block_priv *cb_priv;
/* All callback list access should be protected by RTNL. */
ASSERT_RTNL();
list_for_each_entry(cb_priv,
&rpriv->uplink_priv.tc_indr_block_priv_list,
list)
if (cb_priv->netdev == netdev)
return cb_priv;
return NULL;
}
static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
{
struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
list_for_each_entry_safe(cb_priv, temp, head, list) {
mlx5e_rep_indr_unregister_block(cb_priv->netdev);
kfree(cb_priv);
}
}
static int
mlx5e_rep_indr_offload(struct net_device *netdev,
struct tc_cls_flower_offload *flower,
struct mlx5e_rep_indr_block_priv *indr_priv)
{
int err = 0;
struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
err = mlx5e_configure_flower(netdev, priv,
flower, MLX5E_TC_EGRESS);
break;
case TC_CLSFLOWER_DESTROY:
err = mlx5e_delete_flower(netdev, priv,
flower, MLX5E_TC_EGRESS);
break;
case TC_CLSFLOWER_STATS:
err = mlx5e_stats_flower(netdev, priv,
flower, MLX5E_TC_EGRESS);
break;
default:
err = -EOPNOTSUPP;
}
return err;
}
static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
void *type_data, void *indr_priv)
{
struct mlx5e_rep_indr_block_priv *priv = indr_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
default:
return -EOPNOTSUPP;
}
}
static int
mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
struct mlx5e_rep_priv *rpriv,
struct tc_block_offload *f)
{
struct mlx5e_rep_indr_block_priv *indr_priv;
int err = 0;
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
switch (f->command) {
case TC_BLOCK_BIND:
indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
if (indr_priv)
return -EEXIST;
indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
if (!indr_priv)
return -ENOMEM;
indr_priv->netdev = netdev;
indr_priv->rpriv = rpriv;
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
err = tcf_block_cb_register(f->block,
mlx5e_rep_indr_setup_block_cb,
netdev, indr_priv, f->extack);
if (err) {
list_del(&indr_priv->list);
kfree(indr_priv);
}
return err;
case TC_BLOCK_UNBIND:
tcf_block_cb_unregister(f->block,
mlx5e_rep_indr_setup_block_cb,
netdev);
indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
if (indr_priv) {
list_del(&indr_priv->list);
kfree(indr_priv);
}
return 0;
default:
return -EOPNOTSUPP;
}
return 0;
}
static
int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
enum tc_setup_type type, void *type_data)
{
switch (type) {
case TC_SETUP_BLOCK:
return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
type_data);
default:
return -EOPNOTSUPP;
}
}
static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
struct net_device *netdev)
{
int err;
err = __tc_indr_block_cb_register(netdev, rpriv,
mlx5e_rep_indr_setup_tc_cb,
netdev);
if (err) {
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
netdev_name(netdev), err);
}
return err;
}
static void mlx5e_rep_indr_unregister_block(struct net_device *netdev)
{
__tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
netdev);
}
static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
uplink_priv.netdevice_nb);
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
return NOTIFY_OK;
switch (event) {
case NETDEV_REGISTER:
mlx5e_rep_indr_register_block(rpriv, netdev);
break;
case NETDEV_UNREGISTER:
mlx5e_rep_indr_unregister_block(netdev);
break;
}
return NOTIFY_OK;
}
static struct mlx5e_neigh_hash_entry * static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh); struct mlx5e_neigh *m_neigh);
...@@ -838,24 +1028,14 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, ...@@ -838,24 +1028,14 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
{ {
switch (cls_flower->command) { switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE: case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower, flags); return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
flags);
case TC_CLSFLOWER_DESTROY: case TC_CLSFLOWER_DESTROY:
return mlx5e_delete_flower(priv, cls_flower, flags); return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
flags);
case TC_CLSFLOWER_STATS: case TC_CLSFLOWER_STATS:
return mlx5e_stats_flower(priv, cls_flower, flags); return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
default: flags);
return -EOPNOTSUPP;
}
}
static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
void *cb_priv)
{
struct mlx5e_priv *priv = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1244,7 +1424,7 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -1244,7 +1424,7 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{ {
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
int err; int err;
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
...@@ -1258,12 +1438,23 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -1258,12 +1438,23 @@ mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_remove_sqs; goto err_remove_sqs;
/* init shared tc flow table */ /* init shared tc flow table */
err = mlx5e_tc_esw_init(&rpriv->tc_ht); err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
if (err) if (err)
goto err_neigh_cleanup; goto err_neigh_cleanup;
/* init indirect block notifications */
INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
if (err) {
mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
goto err_indirect_block_cleanup;
}
return 0; return 0;
err_indirect_block_cleanup:
mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
err_neigh_cleanup: err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv); mlx5e_rep_neigh_cleanup(rpriv);
err_remove_sqs: err_remove_sqs:
...@@ -1280,8 +1471,12 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep) ...@@ -1280,8 +1471,12 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_remove_sqs_fwd_rules(priv); mlx5e_remove_sqs_fwd_rules(priv);
/* clean indirect TC block notifications */
unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
mlx5e_rep_indr_clean_block_privs(rpriv);
/* clean uplink offloaded TC rules, delete shared tc flow table */ /* clean uplink offloaded TC rules, delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->tc_ht); mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
mlx5e_rep_neigh_cleanup(rpriv); mlx5e_rep_neigh_cleanup(rpriv);
} }
...@@ -1329,24 +1524,16 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) ...@@ -1329,24 +1524,16 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH); uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
upriv = netdev_priv(uplink_rpriv->netdev); upriv = netdev_priv(uplink_rpriv->netdev);
err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
if (err)
goto err_neigh_cleanup;
err = register_netdev(netdev); err = register_netdev(netdev);
if (err) { if (err) {
pr_warn("Failed to register representor netdev for vport %d\n", pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport); rep->vport);
goto err_egdev_cleanup; goto err_neigh_cleanup;
} }
return 0; return 0;
err_egdev_cleanup:
tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
err_neigh_cleanup: err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv); mlx5e_rep_neigh_cleanup(rpriv);
...@@ -1373,8 +1560,6 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) ...@@ -1373,8 +1560,6 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
REP_ETH); REP_ETH);
upriv = netdev_priv(uplink_rpriv->netdev); upriv = netdev_priv(uplink_rpriv->netdev);
tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
upriv);
mlx5e_rep_neigh_cleanup(rpriv); mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv); mlx5e_detach_netdev(priv);
mlx5e_destroy_netdev(priv); mlx5e_destroy_netdev(priv);
......
...@@ -53,13 +53,33 @@ struct mlx5e_neigh_update_table { ...@@ -53,13 +53,33 @@ struct mlx5e_neigh_update_table {
unsigned long min_interval; /* jiffies */ unsigned long min_interval; /* jiffies */
}; };
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
*/
struct rhashtable tc_ht;
/* indirect block callbacks are invoked on bind/unbind events
* on registered higher level devices (e.g. tunnel devices)
*
* tc_indr_block_cb_priv_list is used to lookup indirect callback
* private data
*
* netdevice_nb is the netdev events notifier - used to register
* tunnel devices for block events
*
*/
struct list_head tc_indr_block_priv_list;
struct notifier_block netdevice_nb;
};
struct mlx5e_rep_priv { struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep; struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update; struct mlx5e_neigh_update_table neigh_update;
struct net_device *netdev; struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule; struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list; struct list_head vport_sqs_list;
struct rhashtable tc_ht; /* valid for uplink rep */ struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
}; };
static inline static inline
...@@ -129,6 +149,8 @@ struct mlx5e_encap_entry { ...@@ -129,6 +149,8 @@ struct mlx5e_encap_entry {
struct net_device *out_dev; struct net_device *out_dev;
int tunnel_type; int tunnel_type;
int tunnel_hlen;
int reformat_type;
u8 flags; u8 flags;
char *encap_header; char *encap_header;
int encap_size; int encap_size;
......
...@@ -554,9 +554,9 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, ...@@ -554,9 +554,9 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq); mlx5_cqwq_pop(&cq->wq);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev, netdev_WARN_ONCE(cq->channel->netdev,
"Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own); "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
return; return;
} }
...@@ -898,7 +898,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -898,7 +898,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetchw(va); /* xdp_frame data area */ prefetchw(va); /* xdp_frame data area */
prefetch(data); prefetch(data);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++; rq->stats->wqe_err++;
return NULL; return NULL;
} }
...@@ -930,7 +930,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -930,7 +930,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 byte_cnt = cqe_bcnt - headlen; u16 byte_cnt = cqe_bcnt - headlen;
struct sk_buff *skb; struct sk_buff *skb;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++; rq->stats->wqe_err++;
return NULL; return NULL;
} }
...@@ -1154,7 +1154,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1154,7 +1154,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi->consumed_strides += cstrides; wi->consumed_strides += cstrides;
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++; rq->stats->wqe_err++;
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
} }
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include "lib/mlx5.h"
#include "en.h" #include "en.h"
#include "en_accel/ipsec.h" #include "en_accel/ipsec.h"
#include "en_accel/tls.h" #include "en_accel/tls.h"
...@@ -1088,13 +1089,13 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv) ...@@ -1088,13 +1089,13 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
} }
static const struct counter_desc mlx5e_pme_status_desc[] = { static const struct counter_desc mlx5e_pme_status_desc[] = {
{ "module_unplug", 8 }, { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
}; };
static const struct counter_desc mlx5e_pme_error_desc[] = { static const struct counter_desc mlx5e_pme_error_desc[] = {
{ "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
{ "module_high_temp", 48 }, /* high temperature */ { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
{ "module_bad_shorted", 56 }, /* bad or shorted cable/module */ { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
}; };
#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
...@@ -1122,15 +1123,17 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data, ...@@ -1122,15 +1123,17 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data, static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx) int idx)
{ {
struct mlx5_priv *mlx5_priv = &priv->mdev->priv; struct mlx5_pme_stats pme_stats;
int i; int i;
mlx5_get_pme_stats(priv->mdev, &pme_stats);
for (i = 0; i < NUM_PME_STATUS_STATS; i++) for (i = 0; i < NUM_PME_STATUS_STATS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters, data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
mlx5e_pme_status_desc, i); mlx5e_pme_status_desc, i);
for (i = 0; i < NUM_PME_ERR_STATS; i++) for (i = 0; i < NUM_PME_ERR_STATS; i++)
data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters, data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
mlx5e_pme_error_desc, i); mlx5e_pme_error_desc, i);
return idx; return idx;
......
...@@ -51,12 +51,12 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); ...@@ -51,12 +51,12 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_tc_esw_init(struct rhashtable *tc_ht); int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct mlx5e_priv *priv, int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags); struct tc_cls_flower_offload *f, int flags);
int mlx5e_delete_flower(struct mlx5e_priv *priv, int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags); struct tc_cls_flower_offload *f, int flags);
int mlx5e_stats_flower(struct mlx5e_priv *priv, int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags); struct tc_cls_flower_offload *f, int flags);
struct mlx5e_encap_entry; struct mlx5e_encap_entry;
...@@ -70,6 +70,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); ...@@ -70,6 +70,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
int mlx5e_tc_num_filters(struct mlx5e_priv *priv); int mlx5e_tc_num_filters(struct mlx5e_priv *priv);
#else /* CONFIG_MLX5_ESWITCH */ #else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
......
...@@ -507,7 +507,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) ...@@ -507,7 +507,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter); wqe_counter = be16_to_cpu(cqe->wqe_counter);
if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) { if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) { &sq->state)) {
mlx5e_dump_error_cqe(sq, mlx5e_dump_error_cqe(sq,
......
...@@ -36,8 +36,10 @@ ...@@ -36,8 +36,10 @@
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/eq.h"
#include "eswitch.h" #include "eswitch.h"
#include "fs_core.h" #include "fs_core.h"
#include "lib/eq.h"
#define UPLINK_VPORT 0xFFFF #define UPLINK_VPORT 0xFFFF
...@@ -1567,7 +1569,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) ...@@ -1567,7 +1569,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
/* Mark this vport as disabled to discard new events */ /* Mark this vport as disabled to discard new events */
vport->enabled = false; vport->enabled = false;
synchronize_irq(pci_irq_vector(esw->dev->pdev, MLX5_EQ_VEC_ASYNC));
/* Wait for current already scheduled events to complete */ /* Wait for current already scheduled events to complete */
flush_workqueue(esw->work_queue); flush_workqueue(esw->work_queue);
/* Disable events from this vport */ /* Disable events from this vport */
...@@ -1593,10 +1594,25 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) ...@@ -1593,10 +1594,25 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
} }
static int eswitch_vport_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
struct mlx5_eqe *eqe = data;
struct mlx5_vport *vport;
u16 vport_num;
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
vport = &esw->vports[vport_num];
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
return NOTIFY_OK;
}
/* Public E-Switch API */ /* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{ {
int err; int err;
...@@ -1640,6 +1656,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) ...@@ -1640,6 +1656,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
for (i = 0; i <= nvfs; i++) for (i = 0; i <= nvfs; i++)
esw_enable_vport(esw, i, enabled_events); esw_enable_vport(esw, i, enabled_events);
if (mode == SRIOV_LEGACY) {
MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
mlx5_eq_notifier_register(esw->dev, &esw->nb);
}
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
esw->enabled_vports); esw->enabled_vports);
return 0; return 0;
...@@ -1669,6 +1690,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1669,6 +1690,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
mc_promisc = &esw->mc_promisc; mc_promisc = &esw->mc_promisc;
nvports = esw->enabled_vports; nvports = esw->enabled_vports;
if (esw->mode == SRIOV_LEGACY)
mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
for (i = 0; i < esw->total_vports; i++) for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i); esw_disable_vport(esw, i);
...@@ -1777,23 +1801,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) ...@@ -1777,23 +1801,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
kfree(esw); kfree(esw);
} }
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
{
struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
struct mlx5_vport *vport;
if (!esw) {
pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
vport_num);
return;
}
vport = &esw->vports[vport_num];
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
}
/* Vport Administration */ /* Vport Administration */
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
......
...@@ -181,6 +181,7 @@ struct esw_mc_addr { /* SRIOV only */ ...@@ -181,6 +181,7 @@ struct esw_mc_addr { /* SRIOV only */
struct mlx5_eswitch { struct mlx5_eswitch {
struct mlx5_core_dev *dev; struct mlx5_core_dev *dev;
struct mlx5_nb nb;
struct mlx5_eswitch_fdb fdb_table; struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct workqueue_struct *work_queue; struct workqueue_struct *work_queue;
...@@ -211,7 +212,6 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw); ...@@ -211,7 +212,6 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw);
/* E-Switch API */ /* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev); int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
...@@ -352,7 +352,6 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev ...@@ -352,7 +352,6 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
/* eswitch API stubs */ /* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
......
...@@ -125,8 +125,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -125,8 +125,9 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].vport.num = attr->out_rep[j]->vport; dest[i].vport.num = attr->out_rep[j]->vport;
dest[i].vport.vhca_id = dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
dest[i].vport.vhca_id_valid = if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
!!MLX5_CAP_ESW(esw->dev, merged_eswitch); dest[i].vport.flags |=
MLX5_FLOW_DEST_VPORT_VHCA_ID;
i++; i++;
} }
} }
...@@ -220,7 +221,8 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -220,7 +221,8 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
dest[i].vport.num = attr->out_rep[i]->vport; dest[i].vport.num = attr->out_rep[i]->vport;
dest[i].vport.vhca_id = dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
} }
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb, dest[i].ft = fwd_fdb,
......
This diff is collapsed.
...@@ -334,7 +334,7 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, ...@@ -334,7 +334,7 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
{ {
u8 opcode, status = 0; u8 opcode, status = 0;
opcode = cqe->op_own >> 4; opcode = get_cqe_opcode(cqe);
switch (opcode) { switch (opcode) {
case MLX5_CQE_REQ_ERR: case MLX5_CQE_REQ_ERR:
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "lib/mlx5.h" #include "lib/mlx5.h"
#include "lib/eq.h"
#include "fpga/core.h" #include "fpga/core.h"
#include "fpga/conn.h" #include "fpga/conn.h"
...@@ -145,6 +146,22 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) ...@@ -145,6 +146,22 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
return 0; return 0;
} }
static int mlx5_fpga_event(struct mlx5_fpga_device *, unsigned long, void *);
static int fpga_err_event(struct notifier_block *nb, unsigned long event, void *eqe)
{
struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_err_nb);
return mlx5_fpga_event(fdev, event, eqe);
}
static int fpga_qp_err_event(struct notifier_block *nb, unsigned long event, void *eqe)
{
struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_qp_err_nb);
return mlx5_fpga_event(fdev, event, eqe);
}
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
{ {
struct mlx5_fpga_device *fdev = mdev->fpga; struct mlx5_fpga_device *fdev = mdev->fpga;
...@@ -185,6 +202,11 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) ...@@ -185,6 +202,11 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
if (err) if (err)
goto out; goto out;
MLX5_NB_INIT(&fdev->fpga_err_nb, fpga_err_event, FPGA_ERROR);
MLX5_NB_INIT(&fdev->fpga_qp_err_nb, fpga_qp_err_event, FPGA_QP_ERROR);
mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_err_nb);
mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_qp_err_nb);
err = mlx5_fpga_conn_device_init(fdev); err = mlx5_fpga_conn_device_init(fdev);
if (err) if (err)
goto err_rsvd_gid; goto err_rsvd_gid;
...@@ -201,6 +223,8 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) ...@@ -201,6 +223,8 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
mlx5_fpga_conn_device_cleanup(fdev); mlx5_fpga_conn_device_cleanup(fdev);
err_rsvd_gid: err_rsvd_gid:
mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
mlx5_core_unreserve_gids(mdev, max_num_qps); mlx5_core_unreserve_gids(mdev, max_num_qps);
out: out:
spin_lock_irqsave(&fdev->state_lock, flags); spin_lock_irqsave(&fdev->state_lock, flags);
...@@ -256,6 +280,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev) ...@@ -256,6 +280,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
} }
mlx5_fpga_conn_device_cleanup(fdev); mlx5_fpga_conn_device_cleanup(fdev);
mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps); max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
mlx5_core_unreserve_gids(mdev, max_num_qps); mlx5_core_unreserve_gids(mdev, max_num_qps);
} }
...@@ -283,9 +310,10 @@ static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome) ...@@ -283,9 +310,10 @@ static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome)
return "Unknown"; return "Unknown";
} }
void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) static int mlx5_fpga_event(struct mlx5_fpga_device *fdev,
unsigned long event, void *eqe)
{ {
struct mlx5_fpga_device *fdev = mdev->fpga; void *data = ((struct mlx5_eqe *)eqe)->data.raw;
const char *event_name; const char *event_name;
bool teardown = false; bool teardown = false;
unsigned long flags; unsigned long flags;
...@@ -303,9 +331,7 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) ...@@ -303,9 +331,7 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn); fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn);
break; break;
default: default:
mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n", return NOTIFY_DONE;
event);
return;
} }
spin_lock_irqsave(&fdev->state_lock, flags); spin_lock_irqsave(&fdev->state_lock, flags);
...@@ -326,4 +352,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) ...@@ -326,4 +352,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
*/ */
if (teardown) if (teardown)
mlx5_trigger_health_work(fdev->mdev); mlx5_trigger_health_work(fdev->mdev);
return NOTIFY_OK;
} }
...@@ -35,11 +35,16 @@ ...@@ -35,11 +35,16 @@
#ifdef CONFIG_MLX5_FPGA #ifdef CONFIG_MLX5_FPGA
#include <linux/mlx5/eq.h>
#include "lib/eq.h"
#include "fpga/cmd.h" #include "fpga/cmd.h"
/* Represents an Innova device */ /* Represents an Innova device */
struct mlx5_fpga_device { struct mlx5_fpga_device {
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct mlx5_nb fpga_err_nb;
struct mlx5_nb fpga_qp_err_nb;
spinlock_t state_lock; /* Protects state transitions */ spinlock_t state_lock; /* Protects state transitions */
enum mlx5_fpga_status state; enum mlx5_fpga_status state;
enum mlx5_fpga_image last_admin_image; enum mlx5_fpga_image last_admin_image;
...@@ -82,7 +87,6 @@ int mlx5_fpga_init(struct mlx5_core_dev *mdev); ...@@ -82,7 +87,6 @@ int mlx5_fpga_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev); void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev); int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev); void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev);
void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
#else #else
...@@ -104,11 +108,6 @@ static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev) ...@@ -104,11 +108,6 @@ static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
{ {
} }
static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
void *data)
{
}
#endif #endif
#endif /* __MLX5_FPGA_CORE_H__ */ #endif /* __MLX5_FPGA_CORE_H__ */
...@@ -308,22 +308,68 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, ...@@ -308,22 +308,68 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
} }
static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
struct fs_fte *fte, bool *extended_dest)
{
int fw_log_max_fdb_encap_uplink =
MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
int num_fwd_destinations = 0;
struct mlx5_flow_rule *dst;
int num_encap = 0;
*extended_dest = false;
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
continue;
if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
num_encap++;
num_fwd_destinations++;
}
if (num_fwd_destinations > 1 && num_encap > 0)
*extended_dest = true;
if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
mlx5_core_warn(dev, "FW does not support extended destination");
return -EOPNOTSUPP;
}
if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
mlx5_core_warn(dev, "FW does not support more than %d encaps",
1 << fw_log_max_fdb_encap_uplink);
return -EOPNOTSUPP;
}
return 0;
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask, int opmod, int modify_mask,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
unsigned group_id, unsigned group_id,
struct fs_fte *fte) struct fs_fte *fte)
{ {
unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
bool extended_dest = false;
struct mlx5_flow_rule *dst; struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan; void *in_flow_context, *vlan;
void *in_match_value; void *in_match_value;
unsigned int inlen;
int dst_cnt_size;
void *in_dests; void *in_dests;
u32 *in; u32 *in;
int err; int err;
if (mlx5_set_extended_dest(dev, fte, &extended_dest))
return -EOPNOTSUPP;
if (!extended_dest)
dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
...@@ -343,9 +389,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -343,9 +389,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action.action); MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
if (extended_dest) {
u32 action;
action = fte->action.action &
~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
} else {
MLX5_SET(flow_context, in_flow_context, action,
fte->action.action);
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
fte->action.reformat_id); fte->action.reformat_id);
}
MLX5_SET(flow_context, in_flow_context, modify_header_id, MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id); fte->action.modify_id);
...@@ -387,10 +444,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -387,10 +444,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
id = dst->dest_attr.vport.num; id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests, MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid, destination_eswitch_owner_vhca_id_valid,
dst->dest_attr.vport.vhca_id_valid); !!(dst->dest_attr.vport.flags &
MLX5_FLOW_DEST_VPORT_VHCA_ID));
MLX5_SET(dest_format_struct, in_dests, MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id, destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id); dst->dest_attr.vport.vhca_id);
if (extended_dest) {
MLX5_SET(dest_format_struct, in_dests,
packet_reformat,
!!(dst->dest_attr.vport.flags &
MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
MLX5_SET(extended_dest_format, in_dests,
packet_reformat_id,
dst->dest_attr.vport.reformat_id);
}
break; break;
default: default:
id = dst->dest_attr.tir_num; id = dst->dest_attr.tir_num;
...@@ -399,7 +466,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -399,7 +466,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests, destination_type, MLX5_SET(dest_format_struct, in_dests, destination_type,
type); type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id); MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); in_dests += dst_cnt_size;
list_size++; list_size++;
} }
...@@ -420,7 +487,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, ...@@ -420,7 +487,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_counter_list, in_dests, flow_counter_id, MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
dst->dest_attr.counter_id); dst->dest_attr.counter_id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); in_dests += dst_cnt_size;
list_size++; list_size++;
} }
if (list_size > max_list_size) { if (list_size > max_list_size) {
......
...@@ -145,29 +145,6 @@ struct mlx5_flow_table { ...@@ -145,29 +145,6 @@ struct mlx5_flow_table {
struct rhltable fgs_hash; struct rhltable fgs_hash;
}; };
struct mlx5_fc_cache {
u64 packets;
u64 bytes;
u64 lastuse;
};
struct mlx5_fc {
struct list_head list;
struct llist_node addlist;
struct llist_node dellist;
/* last{packets,bytes} members are used when calculating the delta since
* last reading
*/
u64 lastpackets;
u64 lastbytes;
u32 id;
bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
struct mlx5_ft_underlay_qp { struct mlx5_ft_underlay_qp {
struct list_head list; struct list_head list;
u32 qpn; u32 qpn;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment