Commit 0651ec93 authored by Doug Ledford's avatar Doug Ledford

Merge branches 'cxgb4-2', 'i40iw-2', 'ipoib', 'misc-4.7' and 'mlx5-fcs' into k.o/for-4.7

......@@ -4295,7 +4295,8 @@ static int __init cma_init(void)
if (ret)
goto err;
if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
if (ibnl_add_client(RDMA_NL_RDMA_CM, ARRAY_SIZE(cma_cb_table),
cma_cb_table))
pr_warn("RDMA CMA: failed to add netlink callback\n");
cma_configfs_init();
......
......@@ -459,7 +459,7 @@ static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
if (pm_addr->ss_family == AF_INET) {
struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
if (pm4_addr->sin_addr.s_addr == INADDR_ANY) {
if (pm4_addr->sin_addr.s_addr == htonl(INADDR_ANY)) {
struct sockaddr_in *cm4_addr =
(struct sockaddr_in *)cm_addr;
struct sockaddr_in *cm4_outaddr =
......@@ -1175,7 +1175,7 @@ static int __init iw_cm_init(void)
if (ret)
pr_err("iw_cm: couldn't init iwpm\n");
ret = ibnl_add_client(RDMA_NL_IWCM, RDMA_NL_IWPM_NUM_OPS,
ret = ibnl_add_client(RDMA_NL_IWCM, ARRAY_SIZE(iwcm_nl_cb_table),
iwcm_nl_cb_table);
if (ret)
pr_err("iw_cm: couldn't register netlink callbacks\n");
......
......@@ -634,6 +634,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid)
if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client,
RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) {
pr_warn("%s Unable to put NLMSG_DONE\n", __func__);
dev_kfree_skb(skb);
return -ENOMEM;
}
nlh->nlmsg_type = NLMSG_DONE;
......
......@@ -151,12 +151,11 @@ static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct ibnl_client *client;
int type = nlh->nlmsg_type;
int index = RDMA_NL_GET_CLIENT(type);
int op = RDMA_NL_GET_OP(type);
unsigned int op = RDMA_NL_GET_OP(type);
list_for_each_entry(client, &client_list, list) {
if (client->index == index) {
if (op < 0 || op >= client->nops ||
!client->cb_table[op].dump)
if (op >= client->nops || !client->cb_table[op].dump)
return -EINVAL;
/*
......
......@@ -536,7 +536,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
if (!data) {
kfree_skb(skb);
nlmsg_free(skb);
return -EMSGSIZE;
}
......@@ -1820,7 +1820,7 @@ static int __init ib_sa_init(void)
goto err3;
}
if (ibnl_add_client(RDMA_NL_LS, RDMA_NL_LS_NUM_OPS,
if (ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ib_sa_cb_table),
ib_sa_cb_table)) {
pr_err("Failed to add netlink callback\n");
ret = -EINVAL;
......
......@@ -1833,7 +1833,8 @@ static int create_qp(struct ib_uverbs_file *file,
if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_CROSS_CHANNEL |
IB_QP_CREATE_MANAGED_SEND |
IB_QP_CREATE_MANAGED_RECV)) {
IB_QP_CREATE_MANAGED_RECV |
IB_QP_CREATE_SCATTER_FCS)) {
ret = -EINVAL;
goto err_put;
}
......@@ -3655,6 +3656,11 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
resp.hca_core_clock = attr.hca_core_clock;
resp.response_length += sizeof(resp.hca_core_clock);
if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
goto end;
resp.device_cap_flags_ex = attr.device_cap_flags;
resp.response_length += sizeof(resp.device_cap_flags_ex);
end:
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
return err;
......
This diff is collapsed.
......@@ -755,6 +755,7 @@ enum c4iw_ep_flags {
CLOSE_SENT = 3,
TIMEOUT = 4,
QP_REFERENCED = 5,
STOP_MPA_TIMER = 7,
};
enum c4iw_ep_history {
......@@ -779,7 +780,13 @@ enum c4iw_ep_history {
EP_DISC_ABORT = 18,
CONN_RPL_UPCALL = 19,
ACT_RETRY_NOMEM = 20,
ACT_RETRY_INUSE = 21
ACT_RETRY_INUSE = 21,
CLOSE_CON_RPL = 22,
EP_DISC_FAIL = 24,
QP_REFED = 25,
QP_DEREFED = 26,
CM_ID_REFED = 27,
CM_ID_DEREFED = 28,
};
struct c4iw_ep_common {
......
......@@ -86,8 +86,9 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
(wait ? FW_WR_COMPL_F : 0));
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1));
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
T5_ULP_MEMIO_ORDER_V(1) |
T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
......
......@@ -1601,7 +1601,7 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
else if (ret == -ENXIO)
pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
else if (ret)
pr_err("Invalid argumant. Fail to register network rule.\n");
pr_err("Invalid argument. Fail to register network rule.\n");
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return ret;
......
......@@ -747,14 +747,11 @@ static struct mcast_group *search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
__be64 tid,
union ib_gid *new_mgid)
{
struct mcast_group *group = NULL, *cur_group;
struct mcast_group *group = NULL, *cur_group, *n;
struct mcast_req *req;
struct list_head *pos;
struct list_head *n;
mutex_lock(&ctx->mcg_table_lock);
list_for_each_safe(pos, n, &ctx->mcg_mgid0_list) {
group = list_entry(pos, struct mcast_group, mgid0_list);
list_for_each_entry_safe(group, n, &ctx->mcg_mgid0_list, mgid0_list) {
mutex_lock(&group->lock);
if (group->last_req_tid == tid) {
if (memcmp(new_mgid, &mgid0, sizeof mgid0)) {
......
......@@ -38,6 +38,9 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
#if defined(CONFIG_X86)
#include <asm/pat.h>
#endif
#include <linux/sched.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
......@@ -517,6 +520,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_TSO;
}
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
MLX5_CAP_ETH(dev->mdev, scatter_fcs))
props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
......@@ -1068,38 +1075,89 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
{
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
return "WC";
case MLX5_IB_MMAP_REGULAR_PAGE:
return "best effort WC";
case MLX5_IB_MMAP_NC_PAGE:
return "NC";
default:
return NULL;
}
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
{
int err;
unsigned long idx;
phys_addr_t pfn, pa;
pgprot_t prot;
switch (cmd) {
case MLX5_IB_MMAP_WC_PAGE:
/* Some architectures don't support WC memory */
#if defined(CONFIG_X86)
if (!pat_enabled())
return -EPERM;
#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
return -EPERM;
#endif
/* fall through */
case MLX5_IB_MMAP_REGULAR_PAGE:
/* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
prot = pgprot_writecombine(vma->vm_page_prot);
break;
case MLX5_IB_MMAP_NC_PAGE:
prot = pgprot_noncached(vma->vm_page_prot);
break;
default:
return -EINVAL;
}
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
idx = get_index(vma->vm_pgoff);
if (idx >= uuari->num_uars)
return -EINVAL;
pfn = uar_index2pfn(dev, uuari->uars[idx].index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
vma->vm_page_prot = prot;
err = io_remap_pfn_range(vma, vma->vm_start, pfn,
PAGE_SIZE, vma->vm_page_prot);
if (err) {
mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
return -EAGAIN;
}
pa = pfn << PAGE_SHIFT;
mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
vma->vm_start, &pa);
return 0;
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_uuar_info *uuari = &context->uuari;
unsigned long command;
unsigned long idx;
phys_addr_t pfn;
command = get_command(vma->vm_pgoff);
switch (command) {
case MLX5_IB_MMAP_WC_PAGE:
case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
idx = get_index(vma->vm_pgoff);
if (idx >= uuari->num_uars)
return -EINVAL;
pfn = uar_index2pfn(dev, uuari->uars[idx].index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
(unsigned long long)pfn);
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
if (io_remap_pfn_range(vma, vma->vm_start, pfn,
PAGE_SIZE, vma->vm_page_prot))
return -EAGAIN;
mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
vma->vm_start,
(unsigned long long)pfn << PAGE_SHIFT);
break;
return uar_mmap(dev, command, vma, uuari);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
return -ENOSYS;
......@@ -1108,7 +1166,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
if (vma->vm_flags & VM_WRITE)
return -EPERM;
/* Don't expose to user-space information it shouldn't have */
......
......@@ -70,6 +70,8 @@ enum {
enum mlx5_ib_mmap_cmd {
MLX5_IB_MMAP_REGULAR_PAGE = 0,
MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
MLX5_IB_MMAP_WC_PAGE = 2,
MLX5_IB_MMAP_NC_PAGE = 3,
/* 5 is chosen in order to be compatible with old versions of libmlx5 */
MLX5_IB_MMAP_CORE_CLOCK = 5,
};
......@@ -356,6 +358,7 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
/* QP uses 1 as its source QP number */
MLX5_IB_QP_SQPN_QP1 = 1 << 6,
MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
};
struct mlx5_umr_wr {
......
......@@ -1028,6 +1028,7 @@ static int get_rq_pas_size(void *qpc)
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
__be64 *qp_pas;
void *in;
......@@ -1051,6 +1052,9 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
MLX5_SET(rqc, rqc, scatter_fcs, 1);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, end_padding_mode,
......@@ -1136,11 +1140,12 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
if (qp->rq.wqe_cnt) {
rq->base.container_mibqp = qp;
err = create_raw_packet_qp_rq(dev, rq, in);
if (err)
goto err_destroy_sq;
rq->base.container_mibqp = qp;
err = create_raw_packet_qp_tir(dev, rq, tdn);
if (err)
......@@ -1252,6 +1257,19 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EOPNOTSUPP;
}
if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
!MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
return -EOPNOTSUPP;
}
qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
}
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
......
......@@ -44,6 +44,7 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/io.h>
#include <asm/irq.h>
......@@ -903,70 +904,15 @@ void nes_clc(unsigned long parm)
*/
void nes_dump_mem(unsigned int dump_debug_level, void *addr, int length)
{
char xlate[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f'};
char *ptr;
char hex_buf[80];
char ascii_buf[20];
int num_char;
int num_ascii;
int num_hex;
if (!(nes_debug_level & dump_debug_level)) {
return;
}
ptr = addr;
if (length > 0x100) {
nes_debug(dump_debug_level, "Length truncated from %x to %x\n", length, 0x100);
length = 0x100;
}
nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", ptr, length, length);
memset(ascii_buf, 0, 20);
memset(hex_buf, 0, 80);
num_ascii = 0;
num_hex = 0;
for (num_char = 0; num_char < length; num_char++) {
if (num_ascii == 8) {
ascii_buf[num_ascii++] = ' ';
hex_buf[num_hex++] = '-';
hex_buf[num_hex++] = ' ';
}
if (*ptr < 0x20 || *ptr > 0x7e)
ascii_buf[num_ascii++] = '.';
else
ascii_buf[num_ascii++] = *ptr;
hex_buf[num_hex++] = xlate[((*ptr & 0xf0) >> 4)];
hex_buf[num_hex++] = xlate[*ptr & 0x0f];
hex_buf[num_hex++] = ' ';
ptr++;
if (num_ascii >= 17) {
/* output line and reset */
nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
memset(ascii_buf, 0, 20);
memset(hex_buf, 0, 80);
num_ascii = 0;
num_hex = 0;
}
}
nes_debug(dump_debug_level, "Address=0x%p, length=0x%x (%d)\n", addr, length, length);
/* output the rest */
if (num_ascii) {
while (num_ascii < 17) {
if (num_ascii == 8) {
hex_buf[num_hex++] = ' ';
hex_buf[num_hex++] = ' ';
}
hex_buf[num_hex++] = ' ';
hex_buf[num_hex++] = ' ';
hex_buf[num_hex++] = ' ';
num_ascii++;
}
nes_debug(dump_debug_level, " %s | %s\n", hex_buf, ascii_buf);
}
print_hex_dump(KERN_ERR, PFX, DUMP_PREFIX_NONE, 16, 1, addr, length, true);
}
......@@ -980,7 +980,7 @@ static int nes_setup_mmap_qp(struct nes_qp *nesqp, struct nes_vnic *nesvnic,
/**
* nes_free_qp_mem() is to free up the qp's pci_alloc_consistent() memory.
*/
static inline void nes_free_qp_mem(struct nes_device *nesdev,
static void nes_free_qp_mem(struct nes_device *nesdev,
struct nes_qp *nesqp, int virt_wqs)
{
unsigned long flags;
......@@ -1314,6 +1314,8 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
nes_debug(NES_DBG_QP, "Invalid QP type: %d\n", init_attr->qp_type);
return ERR_PTR(-EINVAL);
}
init_completion(&nesqp->sq_drained);
init_completion(&nesqp->rq_drained);
nesqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR);
init_timer(&nesqp->terminate_timer);
......@@ -3451,6 +3453,29 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
return err;
}
/**
* nes_drain_sq - drain sq
* @ibqp: pointer to ibqp
*/
static void nes_drain_sq(struct ib_qp *ibqp)
{
struct nes_qp *nesqp = to_nesqp(ibqp);
if (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)
wait_for_completion(&nesqp->sq_drained);
}
/**
* nes_drain_rq - drain rq
* @ibqp: pointer to ibqp
*/
static void nes_drain_rq(struct ib_qp *ibqp)
{
struct nes_qp *nesqp = to_nesqp(ibqp);
if (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)
wait_for_completion(&nesqp->rq_drained);
}
/**
* nes_poll_cq
......@@ -3581,6 +3606,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
}
}
if (nesqp->iwarp_state > NES_CQP_QP_IWARP_STATE_RTS) {
if (nesqp->hwqp.sq_tail == nesqp->hwqp.sq_head)
complete(&nesqp->sq_drained);
if (nesqp->hwqp.rq_tail == nesqp->hwqp.rq_head)
complete(&nesqp->rq_drained);
}
entry->wr_id = wrid;
entry++;
cqe_count++;
......@@ -3753,6 +3785,8 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.req_notify_cq = nes_req_notify_cq;
nesibdev->ibdev.post_send = nes_post_send;
nesibdev->ibdev.post_recv = nes_post_recv;
nesibdev->ibdev.drain_sq = nes_drain_sq;
nesibdev->ibdev.drain_rq = nes_drain_rq;
nesibdev->ibdev.iwcm = kzalloc(sizeof(*nesibdev->ibdev.iwcm), GFP_KERNEL);
if (nesibdev->ibdev.iwcm == NULL) {
......
......@@ -189,6 +189,8 @@ struct nes_qp {
u8 pau_pending;
u8 pau_state;
__u64 nesuqp_addr;
struct completion sq_drained;
struct completion rq_drained;
};
struct ib_mr *nes_reg_phys_mr(struct ib_pd *ib_pd,
......
......@@ -36,6 +36,27 @@
#include "ipoib.h"
struct ipoib_stats {
char stat_string[ETH_GSTRING_LEN];
int stat_offset;
};
#define IPOIB_NETDEV_STAT(m) { \
.stat_string = #m, \
.stat_offset = offsetof(struct rtnl_link_stats64, m) }
static const struct ipoib_stats ipoib_gstrings_stats[] = {
IPOIB_NETDEV_STAT(rx_packets),
IPOIB_NETDEV_STAT(tx_packets),
IPOIB_NETDEV_STAT(rx_bytes),
IPOIB_NETDEV_STAT(tx_bytes),
IPOIB_NETDEV_STAT(tx_errors),
IPOIB_NETDEV_STAT(rx_dropped),
IPOIB_NETDEV_STAT(tx_dropped)
};
#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
......@@ -92,11 +113,57 @@ static int ipoib_set_coalesce(struct net_device *dev,
return 0;
}
static void ipoib_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats __always_unused *stats,
u64 *data)
{
int i;
struct net_device_stats *net_stats = &dev->stats;
u8 *p = (u8 *)net_stats;
for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
}
static void ipoib_get_strings(struct net_device __always_unused *dev,
u32 stringset, u8 *data)
{
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) {
memcpy(p, ipoib_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
case ETH_SS_TEST:
default:
break;
}
}
static int ipoib_get_sset_count(struct net_device __always_unused *dev,
int sset)
{
switch (sset) {
case ETH_SS_STATS:
return IPOIB_GLOBAL_STATS_LEN;
case ETH_SS_TEST:
default:
break;
}
return -EOPNOTSUPP;
}
static const struct ethtool_ops ipoib_ethtool_ops = {
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
.get_strings = ipoib_get_strings,
.get_ethtool_stats = ipoib_get_ethtool_stats,
.get_sset_count = ipoib_get_sset_count,
};
void ipoib_set_ethtool_ops(struct net_device *dev)
......
......@@ -51,8 +51,6 @@ MODULE_PARM_DESC(data_debug_level,
"Enable data path debug tracing if > 0");
#endif
static DEFINE_MUTEX(pkey_mutex);
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct ib_ah_attr *attr)
{
......
......@@ -1528,7 +1528,7 @@ static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
if (dev->use_fast_reg) {
state.sg = idb_sg;
sg_set_buf(idb_sg, req->indirect_desc, idb_len);
sg_init_one(idb_sg, req->indirect_desc, idb_len);
idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
#ifdef CONFIG_NEED_SG_DMA_LENGTH
idb_sg->dma_length = idb_sg->length; /* hack^2 */
......
......@@ -1392,6 +1392,10 @@ struct ulp_mem_io {
#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
#define T5_ULP_MEMIO_FID_S 4
#define T5_ULP_MEMIO_FID_M 0x7ff
#define T5_ULP_MEMIO_FID_V(x) ((x) << T5_ULP_MEMIO_FID_S)
/* ulp_mem_io.lock_addr fields */
#define ULP_MEMIO_ADDR_S 0
#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
......
......@@ -220,6 +220,7 @@ enum ib_device_cap_flags {
IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34),
};
enum ib_signature_prot_cap {
......@@ -988,6 +989,7 @@ enum ib_qp_create_flags {
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_SIGNATURE_EN = 1 << 6,
IB_QP_CREATE_USE_GFP_NOIO = 1 << 7,
IB_QP_CREATE_SCATTER_FCS = 1 << 8,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
......
......@@ -226,6 +226,7 @@ struct ib_uverbs_ex_query_device_resp {
struct ib_uverbs_odp_caps odp_caps;
__u64 timestamp_mask;
__u64 hca_core_clock; /* in KHZ */
__u64 device_cap_flags_ex;
};
struct ib_uverbs_query_port {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment