Commit 7f1d25b4 authored by Doug Ledford's avatar Doug Ledford

Merge branches 'misc' and 'rxe' into k.o/for-4.8-1

...@@ -7444,6 +7444,15 @@ W: http://www.mellanox.com ...@@ -7444,6 +7444,15 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/ Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/ F: drivers/net/ethernet/mellanox/mlxsw/
SOFT-ROCE DRIVER (rxe)
M: Moni Shoua <monis@mellanox.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/hw/rxe/
F: include/uapi/rdma/rdma_user_rxe.h
MEMBARRIER SUPPORT MEMBARRIER SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
......
...@@ -84,6 +84,7 @@ source "drivers/infiniband/ulp/iser/Kconfig" ...@@ -84,6 +84,7 @@ source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig" source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig" source "drivers/infiniband/sw/rdmavt/Kconfig"
source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig" source "drivers/infiniband/hw/hfi1/Kconfig"
......
...@@ -68,6 +68,7 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent"); ...@@ -68,6 +68,7 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL"); MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20 #define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
#define CMA_MAX_CM_RETRIES 15 #define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18 #define CMA_IBOE_PACKET_LIFETIME 18
...@@ -162,6 +163,14 @@ struct rdma_bind_list { ...@@ -162,6 +163,14 @@ struct rdma_bind_list {
unsigned short port; unsigned short port;
}; };
struct class_port_info_context {
struct ib_class_port_info *class_port_info;
struct ib_device *device;
struct completion done;
struct ib_sa_query *sa_query;
u8 port_num;
};
static int cma_ps_alloc(struct net *net, enum rdma_port_space ps, static int cma_ps_alloc(struct net *net, enum rdma_port_space ps,
struct rdma_bind_list *bind_list, int snum) struct rdma_bind_list *bind_list, int snum)
{ {
...@@ -306,6 +315,7 @@ struct cma_multicast { ...@@ -306,6 +315,7 @@ struct cma_multicast {
struct sockaddr_storage addr; struct sockaddr_storage addr;
struct kref mcref; struct kref mcref;
bool igmp_joined; bool igmp_joined;
u8 join_state;
}; };
struct cma_work { struct cma_work {
...@@ -3754,10 +3764,63 @@ static void cma_set_mgid(struct rdma_id_private *id_priv, ...@@ -3754,10 +3764,63 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
} }
} }
static void cma_query_sa_classport_info_cb(int status,
struct ib_class_port_info *rec,
void *context)
{
struct class_port_info_context *cb_ctx = context;
WARN_ON(!context);
if (status || !rec) {
pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
cb_ctx->device->name, cb_ctx->port_num, status);
goto out;
}
memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info));
out:
complete(&cb_ctx->done);
}
static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num,
struct ib_class_port_info *class_port_info)
{
struct class_port_info_context *cb_ctx;
int ret;
cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL);
if (!cb_ctx)
return -ENOMEM;
cb_ctx->device = device;
cb_ctx->class_port_info = class_port_info;
cb_ctx->port_num = port_num;
init_completion(&cb_ctx->done);
ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num,
CMA_QUERY_CLASSPORT_INFO_TIMEOUT,
GFP_KERNEL, cma_query_sa_classport_info_cb,
cb_ctx, &cb_ctx->sa_query);
if (ret < 0) {
pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
device->name, port_num, ret);
goto out;
}
wait_for_completion(&cb_ctx->done);
out:
kfree(cb_ctx);
return ret;
}
static int cma_join_ib_multicast(struct rdma_id_private *id_priv, static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc) struct cma_multicast *mc)
{ {
struct ib_sa_mcmember_rec rec; struct ib_sa_mcmember_rec rec;
struct ib_class_port_info class_port_info;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
ib_sa_comp_mask comp_mask; ib_sa_comp_mask comp_mask;
int ret; int ret;
...@@ -3776,7 +3839,24 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, ...@@ -3776,7 +3839,24 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
rec.qkey = cpu_to_be32(id_priv->qkey); rec.qkey = cpu_to_be32(id_priv->qkey);
rdma_addr_get_sgid(dev_addr, &rec.port_gid); rdma_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
rec.join_state = 1; rec.join_state = mc->join_state;
if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) {
ret = cma_query_sa_classport_info(id_priv->id.device,
id_priv->id.port_num,
&class_port_info);
if (ret)
return ret;
if (!(ib_get_cpi_capmask2(&class_port_info) &
IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) {
pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
"RDMA CM: SM doesn't support Send Only Full Member option\n",
id_priv->id.device->name, id_priv->id.port_num);
return -EOPNOTSUPP;
}
}
comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID | comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE | IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
...@@ -3845,6 +3925,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, ...@@ -3845,6 +3925,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct sockaddr *addr = (struct sockaddr *)&mc->addr; struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
enum ib_gid_type gid_type; enum ib_gid_type gid_type;
bool send_only;
send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
if (cma_zero_addr((struct sockaddr *)&mc->addr)) if (cma_zero_addr((struct sockaddr *)&mc->addr))
return -EINVAL; return -EINVAL;
...@@ -3878,12 +3961,14 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, ...@@ -3878,12 +3961,14 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)]; rdma_start_port(id_priv->cma_dev->device)];
if (addr->sa_family == AF_INET) { if (addr->sa_family == AF_INET) {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
if (!send_only) {
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true); true);
if (!err) { if (!err)
mc->igmp_joined = true; mc->igmp_joined = true;
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; }
} }
} else { } else {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
...@@ -3913,7 +3998,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, ...@@ -3913,7 +3998,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
} }
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
void *context) u8 join_state, void *context)
{ {
struct rdma_id_private *id_priv; struct rdma_id_private *id_priv;
struct cma_multicast *mc; struct cma_multicast *mc;
...@@ -3932,6 +4017,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, ...@@ -3932,6 +4017,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
mc->context = context; mc->context = context;
mc->id_priv = id_priv; mc->id_priv = id_priv;
mc->igmp_joined = false; mc->igmp_joined = false;
mc->join_state = join_state;
spin_lock(&id_priv->lock); spin_lock(&id_priv->lock);
list_add(&mc->list, &id_priv->mc_list); list_add(&mc->list, &id_priv->mc_list);
spin_unlock(&id_priv->lock); spin_unlock(&id_priv->lock);
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1) #define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
#define IWPM_REMINFO_HASH_SIZE 64 #define IWPM_REMINFO_HASH_SIZE 64
#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1) #define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
#define IWPM_MSG_SIZE 512
static LIST_HEAD(iwpm_nlmsg_req_list); static LIST_HEAD(iwpm_nlmsg_req_list);
static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
...@@ -452,7 +453,7 @@ struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh, ...@@ -452,7 +453,7 @@ struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
skb = dev_alloc_skb(NLMSG_GOODSIZE); skb = dev_alloc_skb(IWPM_MSG_SIZE);
if (!skb) { if (!skb) {
pr_err("%s Unable to allocate skb\n", __func__); pr_err("%s Unable to allocate skb\n", __func__);
goto create_nlmsg_exit; goto create_nlmsg_exit;
......
...@@ -93,18 +93,6 @@ enum { ...@@ -93,18 +93,6 @@ enum {
struct mcast_member; struct mcast_member;
/*
* There are 4 types of join states:
* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
*/
enum {
FULLMEMBER_JOIN,
NONMEMBER_JOIN,
SENDONLY_NONMEBER_JOIN,
SENDONLY_FULLMEMBER_JOIN,
NUM_JOIN_MEMBERSHIP_TYPES,
};
struct mcast_group { struct mcast_group {
struct ib_sa_mcmember_rec rec; struct ib_sa_mcmember_rec rec;
struct rb_node node; struct rb_node node;
......
...@@ -65,10 +65,17 @@ struct ib_sa_sm_ah { ...@@ -65,10 +65,17 @@ struct ib_sa_sm_ah {
u8 src_path_mask; u8 src_path_mask;
}; };
struct ib_sa_classport_cache {
bool valid;
struct ib_class_port_info data;
};
struct ib_sa_port { struct ib_sa_port {
struct ib_mad_agent *agent; struct ib_mad_agent *agent;
struct ib_sa_sm_ah *sm_ah; struct ib_sa_sm_ah *sm_ah;
struct work_struct update_task; struct work_struct update_task;
struct ib_sa_classport_cache classport_info;
spinlock_t classport_lock; /* protects class port info set */
spinlock_t ah_lock; spinlock_t ah_lock;
u8 port_num; u8 port_num;
}; };
...@@ -998,6 +1005,13 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event ...@@ -998,6 +1005,13 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
port->sm_ah = NULL; port->sm_ah = NULL;
spin_unlock_irqrestore(&port->ah_lock, flags); spin_unlock_irqrestore(&port->ah_lock, flags);
if (event->event == IB_EVENT_SM_CHANGE ||
event->event == IB_EVENT_CLIENT_REREGISTER ||
event->event == IB_EVENT_LID_CHANGE) {
spin_lock_irqsave(&port->classport_lock, flags);
port->classport_info.valid = false;
spin_unlock_irqrestore(&port->classport_lock, flags);
}
queue_work(ib_wq, &sa_dev->port[event->element.port_num - queue_work(ib_wq, &sa_dev->port[event->element.port_num -
sa_dev->start_port].update_task); sa_dev->start_port].update_task);
} }
...@@ -1719,6 +1733,7 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, ...@@ -1719,6 +1733,7 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
int status, int status,
struct ib_sa_mad *mad) struct ib_sa_mad *mad)
{ {
unsigned long flags;
struct ib_sa_classport_info_query *query = struct ib_sa_classport_info_query *query =
container_of(sa_query, struct ib_sa_classport_info_query, sa_query); container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
...@@ -1728,6 +1743,16 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, ...@@ -1728,6 +1743,16 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
ib_unpack(classport_info_rec_table, ib_unpack(classport_info_rec_table,
ARRAY_SIZE(classport_info_rec_table), ARRAY_SIZE(classport_info_rec_table),
mad->data, &rec); mad->data, &rec);
spin_lock_irqsave(&sa_query->port->classport_lock, flags);
if (!status && !sa_query->port->classport_info.valid) {
memcpy(&sa_query->port->classport_info.data, &rec,
sizeof(sa_query->port->classport_info.data));
sa_query->port->classport_info.valid = true;
}
spin_unlock_irqrestore(&sa_query->port->classport_lock, flags);
query->callback(status, &rec, query->context); query->callback(status, &rec, query->context);
} else { } else {
query->callback(status, NULL, query->context); query->callback(status, NULL, query->context);
...@@ -1754,7 +1779,9 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client, ...@@ -1754,7 +1779,9 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
struct ib_sa_port *port; struct ib_sa_port *port;
struct ib_mad_agent *agent; struct ib_mad_agent *agent;
struct ib_sa_mad *mad; struct ib_sa_mad *mad;
struct ib_class_port_info cached_class_port_info;
int ret; int ret;
unsigned long flags;
if (!sa_dev) if (!sa_dev)
return -ENODEV; return -ENODEV;
...@@ -1762,6 +1789,17 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client, ...@@ -1762,6 +1789,17 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
port = &sa_dev->port[port_num - sa_dev->start_port]; port = &sa_dev->port[port_num - sa_dev->start_port];
agent = port->agent; agent = port->agent;
/* Use cached ClassPortInfo attribute if valid instead of sending mad */
spin_lock_irqsave(&port->classport_lock, flags);
if (port->classport_info.valid && callback) {
memcpy(&cached_class_port_info, &port->classport_info.data,
sizeof(cached_class_port_info));
spin_unlock_irqrestore(&port->classport_lock, flags);
callback(0, &cached_class_port_info, context);
return 0;
}
spin_unlock_irqrestore(&port->classport_lock, flags);
query = kzalloc(sizeof(*query), gfp_mask); query = kzalloc(sizeof(*query), gfp_mask);
if (!query) if (!query)
return -ENOMEM; return -ENOMEM;
...@@ -1885,6 +1923,9 @@ static void ib_sa_add_one(struct ib_device *device) ...@@ -1885,6 +1923,9 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].sm_ah = NULL; sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s; sa_dev->port[i].port_num = i + s;
spin_lock_init(&sa_dev->port[i].classport_lock);
sa_dev->port[i].classport_info.valid = false;
sa_dev->port[i].agent = sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI, ib_register_mad_agent(device, i + s, IB_QPT_GSI,
NULL, 0, send_handler, NULL, 0, send_handler,
......
...@@ -106,6 +106,7 @@ struct ucma_multicast { ...@@ -106,6 +106,7 @@ struct ucma_multicast {
int events_reported; int events_reported;
u64 uid; u64 uid;
u8 join_state;
struct list_head list; struct list_head list;
struct sockaddr_storage addr; struct sockaddr_storage addr;
}; };
...@@ -1317,12 +1318,20 @@ static ssize_t ucma_process_join(struct ucma_file *file, ...@@ -1317,12 +1318,20 @@ static ssize_t ucma_process_join(struct ucma_file *file,
struct ucma_multicast *mc; struct ucma_multicast *mc;
struct sockaddr *addr; struct sockaddr *addr;
int ret; int ret;
u8 join_state;
if (out_len < sizeof(resp)) if (out_len < sizeof(resp))
return -ENOSPC; return -ENOSPC;
addr = (struct sockaddr *) &cmd->addr; addr = (struct sockaddr *) &cmd->addr;
if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
return -EINVAL;
if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
join_state = BIT(FULLMEMBER_JOIN);
else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
else
return -EINVAL; return -EINVAL;
ctx = ucma_get_ctx(file, cmd->id); ctx = ucma_get_ctx(file, cmd->id);
...@@ -1335,10 +1344,11 @@ static ssize_t ucma_process_join(struct ucma_file *file, ...@@ -1335,10 +1344,11 @@ static ssize_t ucma_process_join(struct ucma_file *file,
ret = -ENOMEM; ret = -ENOMEM;
goto err1; goto err1;
} }
mc->join_state = join_state;
mc->uid = cmd->uid; mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size); memcpy(&mc->addr, addr, cmd->addr_size);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
if (ret) if (ret)
goto err2; goto err2;
...@@ -1382,7 +1392,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file, ...@@ -1382,7 +1392,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd.uid = cmd.uid; join_cmd.uid = cmd.uid;
join_cmd.id = cmd.id; join_cmd.id = cmd.id;
join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
join_cmd.reserved = 0; join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
return ucma_process_join(file, &join_cmd, out_len); return ucma_process_join(file, &join_cmd, out_len);
......
...@@ -116,6 +116,7 @@ struct ib_uverbs_event_file { ...@@ -116,6 +116,7 @@ struct ib_uverbs_event_file {
struct ib_uverbs_file { struct ib_uverbs_file {
struct kref ref; struct kref ref;
struct mutex mutex; struct mutex mutex;
struct mutex cleanup_mutex; /* protect cleanup */
struct ib_uverbs_device *device; struct ib_uverbs_device *device;
struct ib_ucontext *ucontext; struct ib_ucontext *ucontext;
struct ib_event_handler event_handler; struct ib_event_handler event_handler;
......
...@@ -969,6 +969,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) ...@@ -969,6 +969,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
file->async_file = NULL; file->async_file = NULL;
kref_init(&file->ref); kref_init(&file->ref);
mutex_init(&file->mutex); mutex_init(&file->mutex);
mutex_init(&file->cleanup_mutex);
filp->private_data = file; filp->private_data = file;
kobject_get(&dev->kobj); kobject_get(&dev->kobj);
...@@ -994,18 +995,20 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) ...@@ -994,18 +995,20 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
{ {
struct ib_uverbs_file *file = filp->private_data; struct ib_uverbs_file *file = filp->private_data;
struct ib_uverbs_device *dev = file->device; struct ib_uverbs_device *dev = file->device;
struct ib_ucontext *ucontext = NULL;
mutex_lock(&file->device->lists_mutex); mutex_lock(&file->cleanup_mutex);
ucontext = file->ucontext; if (file->ucontext) {
ib_uverbs_cleanup_ucontext(file, file->ucontext);
file->ucontext = NULL; file->ucontext = NULL;
}
mutex_unlock(&file->cleanup_mutex);
mutex_lock(&file->device->lists_mutex);
if (!file->is_closed) { if (!file->is_closed) {
list_del(&file->list); list_del(&file->list);
file->is_closed = 1; file->is_closed = 1;
} }
mutex_unlock(&file->device->lists_mutex); mutex_unlock(&file->device->lists_mutex);
if (ucontext)
ib_uverbs_cleanup_ucontext(file, ucontext);
if (file->async_file) if (file->async_file)
kref_put(&file->async_file->ref, ib_uverbs_release_event_file); kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
...@@ -1219,22 +1222,30 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev, ...@@ -1219,22 +1222,30 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
mutex_lock(&uverbs_dev->lists_mutex); mutex_lock(&uverbs_dev->lists_mutex);
while (!list_empty(&uverbs_dev->uverbs_file_list)) { while (!list_empty(&uverbs_dev->uverbs_file_list)) {
struct ib_ucontext *ucontext; struct ib_ucontext *ucontext;
file = list_first_entry(&uverbs_dev->uverbs_file_list, file = list_first_entry(&uverbs_dev->uverbs_file_list,
struct ib_uverbs_file, list); struct ib_uverbs_file, list);
file->is_closed = 1; file->is_closed = 1;
ucontext = file->ucontext;
list_del(&file->list); list_del(&file->list);
file->ucontext = NULL;
kref_get(&file->ref); kref_get(&file->ref);
mutex_unlock(&uverbs_dev->lists_mutex); mutex_unlock(&uverbs_dev->lists_mutex);
/* We must release the mutex before going ahead and calling
* disassociate_ucontext. disassociate_ucontext might end up
* indirectly calling uverbs_close, for example due to freeing
* the resources (e.g mmput).
*/
ib_uverbs_event_handler(&file->event_handler, &event); ib_uverbs_event_handler(&file->event_handler, &event);
mutex_lock(&file->cleanup_mutex);
ucontext = file->ucontext;
file->ucontext = NULL;
mutex_unlock(&file->cleanup_mutex);
/* At this point ib_uverbs_close cannot be running
* ib_uverbs_cleanup_ucontext
*/
if (ucontext) { if (ucontext) {
/* We must release the mutex before going ahead and
* calling disassociate_ucontext. disassociate_ucontext
* might end up indirectly calling uverbs_close,
* for example due to freeing the resources
* (e.g mmput).
*/
ib_dev->disassociate_ucontext(ucontext); ib_dev->disassociate_ucontext(ucontext);
ib_uverbs_cleanup_ucontext(file, ucontext); ib_uverbs_cleanup_ucontext(file, ucontext);
} }
......
...@@ -3,7 +3,6 @@ config INFINIBAND_HFI1 ...@@ -3,7 +3,6 @@ config INFINIBAND_HFI1
depends on X86_64 && INFINIBAND_RDMAVT depends on X86_64 && INFINIBAND_RDMAVT
select MMU_NOTIFIER select MMU_NOTIFIER
select CRC32 select CRC32
default m
---help--- ---help---
This is a low-level driver for Intel OPA Gen1 adapter. This is a low-level driver for Intel OPA Gen1 adapter.
config HFI1_DEBUG_SDMA_ORDER config HFI1_DEBUG_SDMA_ORDER
......
...@@ -225,7 +225,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, ...@@ -225,7 +225,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sizeof(struct hfi1_base_info)); sizeof(struct hfi1_base_info));
break; break;
case HFI1_IOCTL_CREDIT_UPD: case HFI1_IOCTL_CREDIT_UPD:
if (uctxt && uctxt->sc) if (uctxt)
sc_return_credits(uctxt->sc); sc_return_credits(uctxt->sc);
break; break;
......
...@@ -288,7 +288,7 @@ static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, ...@@ -288,7 +288,7 @@ static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
if (cq->resize_buf) if (cq->resize_buf)
return -EBUSY; return -EBUSY;
cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
if (!cq->resize_buf) if (!cq->resize_buf)
return -ENOMEM; return -ENOMEM;
...@@ -316,7 +316,7 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq ...@@ -316,7 +316,7 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq
if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
return -EFAULT; return -EFAULT;
cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_KERNEL);
if (!cq->resize_buf) if (!cq->resize_buf)
return -ENOMEM; return -ENOMEM;
......
...@@ -2049,6 +2049,195 @@ static struct device_attribute *mlx4_class_attributes[] = { ...@@ -2049,6 +2049,195 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id &dev_attr_board_id
}; };
struct diag_counter {
const char *name;
u32 offset;
};
#define DIAG_COUNTER(_name, _offset) \
{ .name = #_name, .offset = _offset }
static const struct diag_counter diag_basic[] = {
DIAG_COUNTER(rq_num_lle, 0x00),
DIAG_COUNTER(sq_num_lle, 0x04),
DIAG_COUNTER(rq_num_lqpoe, 0x08),
DIAG_COUNTER(sq_num_lqpoe, 0x0C),
DIAG_COUNTER(rq_num_lpe, 0x18),
DIAG_COUNTER(sq_num_lpe, 0x1C),
DIAG_COUNTER(rq_num_wrfe, 0x20),
DIAG_COUNTER(sq_num_wrfe, 0x24),
DIAG_COUNTER(sq_num_mwbe, 0x2C),
DIAG_COUNTER(sq_num_bre, 0x34),
DIAG_COUNTER(sq_num_rire, 0x44),
DIAG_COUNTER(rq_num_rire, 0x48),
DIAG_COUNTER(sq_num_rae, 0x4C),
DIAG_COUNTER(rq_num_rae, 0x50),
DIAG_COUNTER(sq_num_roe, 0x54),
DIAG_COUNTER(sq_num_tree, 0x5C),
DIAG_COUNTER(sq_num_rree, 0x64),
DIAG_COUNTER(rq_num_rnr, 0x68),
DIAG_COUNTER(sq_num_rnr, 0x6C),
DIAG_COUNTER(rq_num_oos, 0x100),
DIAG_COUNTER(sq_num_oos, 0x104),
};
static const struct diag_counter diag_ext[] = {
DIAG_COUNTER(rq_num_dup, 0x130),
DIAG_COUNTER(sq_num_to, 0x134),
};
static const struct diag_counter diag_device_only[] = {
DIAG_COUNTER(num_cqovf, 0x1A0),
DIAG_COUNTER(rq_num_udsdprd, 0x118),
};
static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
u8 port_num)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
if (!diag[!!port_num].name)
return NULL;
return rdma_alloc_hw_stats_struct(diag[!!port_num].name,
diag[!!port_num].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u8 port, int index)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
u32 hw_value[ARRAY_SIZE(diag_device_only) +
ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {};
int ret;
int i;
ret = mlx4_query_diag_counters(dev->dev,
MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS,
diag[!!port].offset, hw_value,
diag[!!port].num_counters, port);
if (ret)
return ret;
for (i = 0; i < diag[!!port].num_counters; i++)
stats->value[i] = hw_value[i];
return diag[!!port].num_counters;
}
static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
const char ***name,
u32 **offset,
u32 *num,
bool port)
{
u32 num_counters;
num_counters = ARRAY_SIZE(diag_basic);
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
num_counters += ARRAY_SIZE(diag_ext);
if (!port)
num_counters += ARRAY_SIZE(diag_device_only);
*name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
if (!*name)
return -ENOMEM;
*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
if (!*offset)
goto err_name;
*num = num_counters;
return 0;
err_name:
kfree(*name);
return -ENOMEM;
}
static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
const char **name,
u32 *offset,
bool port)
{
int i;
int j;
for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
name[i] = diag_basic[i].name;
offset[i] = diag_basic[i].offset;
}
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
name[j] = diag_ext[i].name;
offset[j] = diag_ext[i].offset;
}
}
if (!port) {
for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
name[j] = diag_device_only[i].name;
offset[j] = diag_device_only[i].offset;
}
}
}
static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
{
struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
int i;
int ret;
bool per_port = !!(ibdev->dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT);
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
/* i == 1 means we are building port counters */
if (i && !per_port)
continue;
ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
&diag[i].offset,
&diag[i].num_counters, i);
if (ret)
goto err_alloc;
mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
diag[i].offset, i);
}
ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
return 0;
err_alloc:
if (i) {
kfree(diag[i - 1].name);
kfree(diag[i - 1].offset);
}
return ret;
}
static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
{
int i;
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
kfree(ibdev->diag_counters[i].offset);
kfree(ibdev->diag_counters[i].name);
}
}
#define MLX4_IB_INVALID_MAC ((u64)-1) #define MLX4_IB_INVALID_MAC ((u64)-1)
static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
struct net_device *dev, struct net_device *dev,
...@@ -2552,9 +2741,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -2552,9 +2741,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for (j = 1; j <= ibdev->dev->caps.num_ports; j++) for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]); atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
if (ib_register_device(&ibdev->ib_dev, NULL)) if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap; goto err_steer_free_bitmap;
if (ib_register_device(&ibdev->ib_dev, NULL))
goto err_diag_counters;
if (mlx4_ib_mad_init(ibdev)) if (mlx4_ib_mad_init(ibdev))
goto err_reg; goto err_reg;
...@@ -2620,6 +2812,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -2620,6 +2812,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
err_reg: err_reg:
ib_unregister_device(&ibdev->ib_dev); ib_unregister_device(&ibdev->ib_dev);
err_diag_counters:
mlx4_ib_diag_cleanup(ibdev);
err_steer_free_bitmap: err_steer_free_bitmap:
kfree(ibdev->ib_uc_qpns_bitmap); kfree(ibdev->ib_uc_qpns_bitmap);
...@@ -2723,6 +2918,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) ...@@ -2723,6 +2918,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_ib_close_sriov(ibdev); mlx4_ib_close_sriov(ibdev);
mlx4_ib_mad_cleanup(ibdev); mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev); ib_unregister_device(&ibdev->ib_dev);
mlx4_ib_diag_cleanup(ibdev);
if (ibdev->iboe.nb.notifier_call) { if (ibdev->iboe.nb.notifier_call) {
if (unregister_netdevice_notifier(&ibdev->iboe.nb)) if (unregister_netdevice_notifier(&ibdev->iboe.nb))
pr_warn("failure unregistering notifier\n"); pr_warn("failure unregistering notifier\n");
......
...@@ -549,6 +549,14 @@ struct mlx4_ib_counters { ...@@ -549,6 +549,14 @@ struct mlx4_ib_counters {
u32 default_counter; u32 default_counter;
}; };
#define MLX4_DIAG_COUNTERS_TYPES 2
struct mlx4_ib_diag_counters {
const char **name;
u32 *offset;
u32 num_counters;
};
struct mlx4_ib_dev { struct mlx4_ib_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct mlx4_dev *dev; struct mlx4_dev *dev;
...@@ -585,6 +593,7 @@ struct mlx4_ib_dev { ...@@ -585,6 +593,7 @@ struct mlx4_ib_dev {
/* protect resources needed as part of reset flow */ /* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock; spinlock_t reset_flow_resource_lock;
struct list_head qp_list; struct list_head qp_list;
struct mlx4_ib_diag_counters diag_counters[MLX4_DIAG_COUNTERS_TYPES];
}; };
struct ib_event_work { struct ib_event_work {
......
...@@ -98,7 +98,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -98,7 +98,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM; err = -ENOMEM;
mthca_err(mdev, "Couldn't allocate memory to save HCA " mthca_err(mdev, "Couldn't allocate memory to save HCA "
"PCI header, aborting.\n"); "PCI header, aborting.\n");
goto out; goto put_dev;
} }
for (i = 0; i < 64; ++i) { for (i = 0; i < 64; ++i) {
...@@ -108,7 +108,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -108,7 +108,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't save HCA " mthca_err(mdev, "Couldn't save HCA "
"PCI header, aborting.\n"); "PCI header, aborting.\n");
goto out; goto free_hca;
} }
} }
...@@ -121,7 +121,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -121,7 +121,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM; err = -ENOMEM;
mthca_err(mdev, "Couldn't allocate memory to save HCA " mthca_err(mdev, "Couldn't allocate memory to save HCA "
"bridge PCI header, aborting.\n"); "bridge PCI header, aborting.\n");
goto out; goto free_hca;
} }
for (i = 0; i < 64; ++i) { for (i = 0; i < 64; ++i) {
...@@ -131,7 +131,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -131,7 +131,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't save HCA bridge " mthca_err(mdev, "Couldn't save HCA bridge "
"PCI header, aborting.\n"); "PCI header, aborting.\n");
goto out; goto free_bh;
} }
} }
bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX); bridge_pcix_cap = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
...@@ -139,7 +139,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -139,7 +139,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't locate HCA bridge " mthca_err(mdev, "Couldn't locate HCA bridge "
"PCI-X capability, aborting.\n"); "PCI-X capability, aborting.\n");
goto out; goto free_bh;
} }
} }
...@@ -152,7 +152,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -152,7 +152,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENOMEM; err = -ENOMEM;
mthca_err(mdev, "Couldn't map HCA reset register, " mthca_err(mdev, "Couldn't map HCA reset register, "
"aborting.\n"); "aborting.\n");
goto out; goto free_bh;
} }
writel(MTHCA_RESET_VALUE, reset); writel(MTHCA_RESET_VALUE, reset);
...@@ -172,7 +172,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -172,7 +172,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't access HCA after reset, " mthca_err(mdev, "Couldn't access HCA after reset, "
"aborting.\n"); "aborting.\n");
goto out; goto free_bh;
} }
if (v != 0xffffffff) if (v != 0xffffffff)
...@@ -184,7 +184,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -184,7 +184,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "PCI device did not come back after reset, " mthca_err(mdev, "PCI device did not come back after reset, "
"aborting.\n"); "aborting.\n");
goto out; goto free_bh;
} }
good: good:
...@@ -195,14 +195,14 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -195,14 +195,14 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge Upstream " mthca_err(mdev, "Couldn't restore HCA bridge Upstream "
"split transaction control, aborting.\n"); "split transaction control, aborting.\n");
goto out; goto free_bh;
} }
if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc, if (pci_write_config_dword(bridge, bridge_pcix_cap + 0xc,
bridge_header[(bridge_pcix_cap + 0xc) / 4])) { bridge_header[(bridge_pcix_cap + 0xc) / 4])) {
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge Downstream " mthca_err(mdev, "Couldn't restore HCA bridge Downstream "
"split transaction control, aborting.\n"); "split transaction control, aborting.\n");
goto out; goto free_bh;
} }
/* /*
* Bridge control register is at 0x3e, so we'll * Bridge control register is at 0x3e, so we'll
...@@ -216,7 +216,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -216,7 +216,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge reg %x, " mthca_err(mdev, "Couldn't restore HCA bridge reg %x, "
"aborting.\n", i); "aborting.\n", i);
goto out; goto free_bh;
} }
} }
...@@ -225,7 +225,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -225,7 +225,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, " mthca_err(mdev, "Couldn't restore HCA bridge COMMAND, "
"aborting.\n"); "aborting.\n");
goto out; goto free_bh;
} }
} }
...@@ -235,7 +235,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -235,7 +235,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI-X " mthca_err(mdev, "Couldn't restore HCA PCI-X "
"command register, aborting.\n"); "command register, aborting.\n");
goto out; goto free_bh;
} }
} }
...@@ -246,7 +246,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -246,7 +246,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express " mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Device Control register, aborting.\n"); "Device Control register, aborting.\n");
goto out; goto free_bh;
} }
linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4]; linkctl = hca_header[(hca_pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL, if (pcie_capability_write_word(mdev->pdev, PCI_EXP_LNKCTL,
...@@ -254,7 +254,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -254,7 +254,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA PCI Express " mthca_err(mdev, "Couldn't restore HCA PCI Express "
"Link control register, aborting.\n"); "Link control register, aborting.\n");
goto out; goto free_bh;
} }
} }
...@@ -266,7 +266,7 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -266,7 +266,7 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA reg %x, " mthca_err(mdev, "Couldn't restore HCA reg %x, "
"aborting.\n", i); "aborting.\n", i);
goto out; goto free_bh;
} }
} }
...@@ -275,14 +275,12 @@ int mthca_reset(struct mthca_dev *mdev) ...@@ -275,14 +275,12 @@ int mthca_reset(struct mthca_dev *mdev)
err = -ENODEV; err = -ENODEV;
mthca_err(mdev, "Couldn't restore HCA COMMAND, " mthca_err(mdev, "Couldn't restore HCA COMMAND, "
"aborting.\n"); "aborting.\n");
goto out;
} }
free_bh:
out:
if (bridge)
pci_dev_put(bridge);
kfree(bridge_header); kfree(bridge_header);
free_hca:
kfree(hca_header); kfree(hca_header);
put_dev:
pci_dev_put(bridge);
return err; return err;
} }
obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt/ obj-$(CONFIG_INFINIBAND_RDMAVT) += rdmavt/
obj-$(CONFIG_RDMA_RXE) += rxe/
config INFINIBAND_RDMAVT config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library" tristate "RDMA verbs transport library"
depends on 64BIT depends on 64BIT
default m
---help--- ---help---
This is a common software verbs provider for RDMA networks. This is a common software verbs provider for RDMA networks.
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
depends on NET_UDP_TUNNEL
---help---
This driver implements the InfiniBand RDMA transport over
the Linux network stack. It enables a system with a
standard Ethernet adapter to interoperate with a RoCE
adapter or with another system running the RXE driver.
Documentation on InfiniBand and RoCE can be downloaded at
www.infinibandta.org and www.openfabrics.org. (See also
siw which is a similar software driver for iWARP.)
The driver is split into two layers, one interfaces with the
Linux RDMA stack and implements a kernel or user space
verbs API. The user space verbs API requires a support
library named librxe which is loaded by the generic user
space verbs API, libibverbs. The other layer interfaces
with the Linux network stack at layer 3.
To configure and work with soft-RoCE driver please use the
following wiki page under "configure Soft-RoCE (RXE)" section:
https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
obj-$(CONFIG_RDMA_RXE) += rdma_rxe.o
rdma_rxe-y := \
rxe.o \
rxe_comp.o \
rxe_req.o \
rxe_resp.o \
rxe_recv.o \
rxe_pool.o \
rxe_queue.o \
rxe_verbs.o \
rxe_av.o \
rxe_srq.o \
rxe_qp.o \
rxe_cq.o \
rxe_mr.o \
rxe_dma.o \
rxe_opcode.o \
rxe_mmap.o \
rxe_icrc.o \
rxe_mcast.o \
rxe_task.o \
rxe_net.o \
rxe_sysfs.o
This diff is collapsed.
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef RXE_H
#define RXE_H
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/crc32.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
#include "rxe_net.h"
#include "rxe_opcode.h"
#include "rxe_hdr.h"
#include "rxe_param.h"
#include "rxe_verbs.h"
#define RXE_UVERBS_ABI_VERSION (1)
#define IB_PHYS_STATE_LINK_UP (5)
#define IB_PHYS_STATE_LINK_DOWN (3)
#define RXE_ROCE_V2_SPORT (0xc000)
int rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu);
void rxe_remove(struct rxe_dev *rxe);
void rxe_remove_all(void);
int rxe_rcv(struct sk_buff *skb);
void rxe_dev_put(struct rxe_dev *rxe);
struct rxe_dev *net_to_rxe(struct net_device *ndev);
struct rxe_dev *get_rxe_by_name(const char* name);
void rxe_port_up(struct rxe_dev *rxe);
void rxe_port_down(struct rxe_dev *rxe);
#endif /* RXE_H */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "rxe.h"
#include "rxe_loc.h"
int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr)
{
struct rxe_port *port;
if (attr->port_num != 1) {
pr_info("rxe: invalid port_num = %d\n", attr->port_num);
return -EINVAL;
}
port = &rxe->port;
if (attr->ah_flags & IB_AH_GRH) {
if (attr->grh.sgid_index > port->attr.gid_tbl_len) {
pr_info("rxe: invalid sgid index = %d\n",
attr->grh.sgid_index);
return -EINVAL;
}
}
return 0;
}
int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num,
struct rxe_av *av, struct ib_ah_attr *attr)
{
memset(av, 0, sizeof(*av));
memcpy(&av->grh, &attr->grh, sizeof(attr->grh));
av->port_num = port_num;
return 0;
}
int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av,
struct ib_ah_attr *attr)
{
memcpy(&attr->grh, &av->grh, sizeof(av->grh));
attr->port_num = av->port_num;
return 0;
}
int rxe_av_fill_ip_info(struct rxe_dev *rxe,
struct rxe_av *av,
struct ib_ah_attr *attr,
struct ib_gid_attr *sgid_attr,
union ib_gid *sgid)
{
rdma_gid2ip(&av->sgid_addr._sockaddr, sgid);
rdma_gid2ip(&av->dgid_addr._sockaddr, &attr->grh.dgid);
av->network_type = ib_gid_to_network_type(sgid_attr->gid_type, sgid);
return 0;
}
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
{
if (!pkt || !pkt->qp)
return NULL;
if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC)
return &pkt->qp->pri_av;
return (pkt->wqe) ? &pkt->wqe->av : NULL;
}
This diff is collapsed.
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
int cqe, int comp_vector, struct ib_udata *udata)
{
int count;
if (cqe <= 0) {
pr_warn("cqe(%d) <= 0\n", cqe);
goto err1;
}
if (cqe > rxe->attr.max_cqe) {
pr_warn("cqe(%d) > max_cqe(%d)\n",
cqe, rxe->attr.max_cqe);
goto err1;
}
if (cq) {
count = queue_count(cq->queue);
if (cqe < count) {
pr_warn("cqe(%d) < current # elements in queue (%d)",
cqe, count);
goto err1;
}
}
return 0;
err1:
return -EINVAL;
}
static void rxe_send_complete(unsigned long data)
{
struct rxe_cq *cq = (struct rxe_cq *)data;
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_ucontext *context,
struct ib_udata *udata)
{
int err;
cq->queue = rxe_queue_init(rxe, &cqe,
sizeof(struct rxe_cqe));
if (!cq->queue) {
pr_warn("unable to create cq\n");
return -ENOMEM;
}
err = do_mmap_info(rxe, udata, false, context, cq->queue->buf,
cq->queue->buf_size, &cq->queue->ip);
if (err) {
kvfree(cq->queue->buf);
kfree(cq->queue);
return err;
}
if (udata)
cq->is_user = 1;
tasklet_init(&cq->comp_task, rxe_send_complete, (unsigned long)cq);
spin_lock_init(&cq->cq_lock);
cq->ibcq.cqe = cqe;
return 0;
}
int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct ib_udata *udata)
{
int err;
err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
sizeof(struct rxe_cqe),
cq->queue->ip ? cq->queue->ip->context : NULL,
udata, NULL, &cq->cq_lock);
if (!err)
cq->ibcq.cqe = cqe;
return err;
}
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
unsigned long flags;
spin_lock_irqsave(&cq->cq_lock, flags);
if (unlikely(queue_full(cq->queue))) {
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
ev.event = IB_EVENT_CQ_ERR;
cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
}
return -EBUSY;
}
memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
/* make sure all changes to the CQ are written before we update the
* producer pointer
*/
smp_wmb();
advance_producer(cq->queue);
spin_unlock_irqrestore(&cq->cq_lock, flags);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
cq->notify = 0;
tasklet_schedule(&cq->comp_task);
}
return 0;
}
void rxe_cq_cleanup(void *arg)
{
struct rxe_cq *cq = arg;
if (cq->queue)
rxe_queue_cleanup(cq->queue);
}
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "rxe.h"
#include "rxe_loc.h"
#define DMA_BAD_ADDER ((u64)0)
static int rxe_mapping_error(struct ib_device *dev, u64 dma_addr)
{
return dma_addr == DMA_BAD_ADDER;
}
static u64 rxe_dma_map_single(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
WARN_ON(!valid_dma_direction(direction));
return (uintptr_t)cpu_addr;
}
static void rxe_dma_unmap_single(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
WARN_ON(!valid_dma_direction(direction));
}
static u64 rxe_dma_map_page(struct ib_device *dev,
struct page *page,
unsigned long offset,
size_t size, enum dma_data_direction direction)
{
u64 addr;
WARN_ON(!valid_dma_direction(direction));
if (offset + size > PAGE_SIZE) {
addr = DMA_BAD_ADDER;
goto done;
}
addr = (uintptr_t)page_address(page);
if (addr)
addr += offset;
done:
return addr;
}
static void rxe_dma_unmap_page(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction)
{
WARN_ON(!valid_dma_direction(direction));
}
static int rxe_map_sg(struct ib_device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction)
{
struct scatterlist *sg;
u64 addr;
int i;
int ret = nents;
WARN_ON(!valid_dma_direction(direction));
for_each_sg(sgl, sg, nents, i) {
addr = (uintptr_t)page_address(sg_page(sg));
if (!addr) {
ret = 0;
break;
}
sg->dma_address = addr + sg->offset;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
sg->dma_length = sg->length;
#endif
}
return ret;
}
static void rxe_unmap_sg(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
WARN_ON(!valid_dma_direction(direction));
}
static void rxe_sync_single_for_cpu(struct ib_device *dev,
u64 addr,
size_t size, enum dma_data_direction dir)
{
}
static void rxe_sync_single_for_device(struct ib_device *dev,
u64 addr,
size_t size, enum dma_data_direction dir)
{
}
static void *rxe_dma_alloc_coherent(struct ib_device *dev, size_t size,
u64 *dma_handle, gfp_t flag)
{
struct page *p;
void *addr = NULL;
p = alloc_pages(flag, get_order(size));
if (p)
addr = page_address(p);
if (dma_handle)
*dma_handle = (uintptr_t)addr;
return addr;
}
static void rxe_dma_free_coherent(struct ib_device *dev, size_t size,
void *cpu_addr, u64 dma_handle)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
struct ib_dma_mapping_ops rxe_dma_mapping_ops = {
.mapping_error = rxe_mapping_error,
.map_single = rxe_dma_map_single,
.unmap_single = rxe_dma_unmap_single,
.map_page = rxe_dma_map_page,
.unmap_page = rxe_dma_unmap_page,
.map_sg = rxe_map_sg,
.unmap_sg = rxe_unmap_sg,
.sync_single_for_cpu = rxe_sync_single_for_cpu,
.sync_single_for_device = rxe_sync_single_for_device,
.alloc_coherent = rxe_dma_alloc_coherent,
.free_coherent = rxe_dma_free_coherent
};
This diff is collapsed.
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "rxe.h"
#include "rxe_loc.h"
/* Compute a partial ICRC for all the IB transport headers. */
u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
unsigned int bth_offset = 0;
struct iphdr *ip4h = NULL;
struct ipv6hdr *ip6h = NULL;
struct udphdr *udph;
struct rxe_bth *bth;
int crc;
int length;
int hdr_size = sizeof(struct udphdr) +
(skb->protocol == htons(ETH_P_IP) ?
sizeof(struct iphdr) : sizeof(struct ipv6hdr));
/* pseudo header buffer size is calculate using ipv6 header size since
* it is bigger than ipv4
*/
u8 pshdr[sizeof(struct udphdr) +
sizeof(struct ipv6hdr) +
RXE_BTH_BYTES];
/* This seed is the result of computing a CRC with a seed of
* 0xfffffff and 8 bytes of 0xff representing a masked LRH.
*/
crc = 0xdebb20e3;
if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */
memcpy(pshdr, ip_hdr(skb), hdr_size);
ip4h = (struct iphdr *)pshdr;
udph = (struct udphdr *)(ip4h + 1);
ip4h->ttl = 0xff;
ip4h->check = CSUM_MANGLED_0;
ip4h->tos = 0xff;
} else { /* IPv6 */
memcpy(pshdr, ipv6_hdr(skb), hdr_size);
ip6h = (struct ipv6hdr *)pshdr;
udph = (struct udphdr *)(ip6h + 1);
memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl));
ip6h->priority = 0xf;
ip6h->hop_limit = 0xff;
}
udph->check = CSUM_MANGLED_0;
bth_offset += hdr_size;
memcpy(&pshdr[bth_offset], pkt->hdr, RXE_BTH_BYTES);
bth = (struct rxe_bth *)&pshdr[bth_offset];
/* exclude bth.resv8a */
bth->qpn |= cpu_to_be32(~BTH_QPN_MASK);
length = hdr_size + RXE_BTH_BYTES;
crc = crc32_le(crc, pshdr, length);
/* And finish to compute the CRC on the remainder of the headers. */
crc = crc32_le(crc, pkt->hdr + RXE_BTH_BYTES,
rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES);
return crc;
}
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef RXE_LOC_H
#define RXE_LOC_H
/* rxe_av.c */
int rxe_av_chk_attr(struct rxe_dev *rxe, struct ib_ah_attr *attr);
int rxe_av_from_attr(struct rxe_dev *rxe, u8 port_num,
struct rxe_av *av, struct ib_ah_attr *attr);
int rxe_av_to_attr(struct rxe_dev *rxe, struct rxe_av *av,
struct ib_ah_attr *attr);
int rxe_av_fill_ip_info(struct rxe_dev *rxe,
struct rxe_av *av,
struct ib_ah_attr *attr,
struct ib_gid_attr *sgid_attr,
union ib_gid *sgid);
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt);
/* rxe_cq.c */
int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
int cqe, int comp_vector, struct ib_udata *udata);
int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int comp_vector, struct ib_ucontext *context,
struct ib_udata *udata);
int rxe_cq_resize_queue(struct rxe_cq *cq, int new_cqe, struct ib_udata *udata);
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
void rxe_cq_cleanup(void *arg);
/* rxe_mcast.c */
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
struct rxe_mc_grp **grp_p);
int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
struct rxe_mc_grp *grp);
int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
union ib_gid *mgid);
void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
void rxe_mc_cleanup(void *arg);
/* rxe_mmap.c */
struct rxe_mmap_info {
struct list_head pending_mmaps;
struct ib_ucontext *context;
struct kref ref;
void *obj;
struct mminfo info;
};
void rxe_mmap_release(struct kref *ref);
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *dev,
u32 size,
struct ib_ucontext *context,
void *obj);
int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
enum copy_direction {
to_mem_obj,
from_mem_obj,
};
int rxe_mem_init_dma(struct rxe_dev *rxe, struct rxe_pd *pd,
int access, struct rxe_mem *mem);
int rxe_mem_init_user(struct rxe_dev *rxe, struct rxe_pd *pd, u64 start,
u64 length, u64 iova, int access, struct ib_udata *udata,
struct rxe_mem *mr);
int rxe_mem_init_fast(struct rxe_dev *rxe, struct rxe_pd *pd,
int max_pages, struct rxe_mem *mem);
int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
int length, enum copy_direction dir, u32 *crcp);
int copy_data(struct rxe_dev *rxe, struct rxe_pd *pd, int access,
struct rxe_dma_info *dma, void *addr, int length,
enum copy_direction dir, u32 *crcp);
void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
enum lookup_type {
lookup_local,
lookup_remote,
};
struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
enum lookup_type type);
int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
u64 *page, int num_pages, u64 iova);
void rxe_mem_cleanup(void *arg);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
/* rxe_qp.c */
int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init);
int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
struct ib_qp_init_attr *init, struct ib_udata *udata,
struct ib_pd *ibpd);
int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init);
int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_attr *attr, int mask);
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr,
int mask, struct ib_udata *udata);
int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask);
void rxe_qp_error(struct rxe_qp *qp);
void rxe_qp_destroy(struct rxe_qp *qp);
void rxe_qp_cleanup(void *arg);
static inline int qp_num(struct rxe_qp *qp)
{
return qp->ibqp.qp_num;
}
static inline enum ib_qp_type qp_type(struct rxe_qp *qp)
{
return qp->ibqp.qp_type;
}
static inline enum ib_qp_state qp_state(struct rxe_qp *qp)
{
return qp->attr.qp_state;
}
static inline int qp_mtu(struct rxe_qp *qp)
{
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
return qp->attr.path_mtu;
else
return RXE_PORT_MAX_MTU;
}
static inline int rcv_wqe_size(int max_sge)
{
return sizeof(struct rxe_recv_wqe) +
max_sge * sizeof(struct ib_sge);
}
void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
{
qp->resp.res_head++;
if (unlikely(qp->resp.res_head == qp->attr.max_rd_atomic))
qp->resp.res_head = 0;
}
void retransmit_timer(unsigned long data);
void rnr_nak_timer(unsigned long data);
void dump_qp(struct rxe_qp *qp);
/* rxe_srq.c */
#define IB_SRQ_INIT_MASK (~IB_SRQ_LIMIT)
int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask);
int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_init_attr *init,
struct ib_ucontext *context, struct ib_udata *udata);
int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct ib_udata *udata);
extern struct ib_dma_mapping_ops rxe_dma_mapping_ops;
void rxe_release(struct kref *kref);
int rxe_completer(void *arg);
int rxe_requester(void *arg);
int rxe_responder(void *arg);
u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb);
void rxe_resp_queue_pkt(struct rxe_dev *rxe,
struct rxe_qp *qp, struct sk_buff *skb);
void rxe_comp_queue_pkt(struct rxe_dev *rxe,
struct rxe_qp *qp, struct sk_buff *skb);
static inline unsigned wr_opcode_mask(int opcode, struct rxe_qp *qp)
{
return rxe_wr_opcode_info[opcode].mask[qp->ibqp.qp_type];
}
static inline int rxe_xmit_packet(struct rxe_dev *rxe, struct rxe_qp *qp,
struct rxe_pkt_info *pkt, struct sk_buff *skb)
{
int err;
int is_request = pkt->mask & RXE_REQ_MASK;
if ((is_request && (qp->req.state != QP_STATE_READY)) ||
(!is_request && (qp->resp.state != QP_STATE_READY))) {
pr_info("Packet dropped. QP is not in ready state\n");
goto drop;
}
if (pkt->mask & RXE_LOOPBACK_MASK) {
memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
err = rxe->ifc_ops->loopback(skb);
} else {
err = rxe->ifc_ops->send(rxe, pkt, skb);
}
if (err) {
rxe->xmit_errors++;
return err;
}
atomic_inc(&qp->skb_out);
if ((qp_type(qp) != IB_QPT_RC) &&
(pkt->mask & RXE_END_MASK)) {
pkt->wqe->state = wqe_state_done;
rxe_run_task(&qp->comp.task, 1);
}
goto done;
drop:
kfree_skb(skb);
err = 0;
done:
return err;
}
#endif /* RXE_LOC_H */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "rxe.h"
#include "rxe_loc.h"
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
struct rxe_mc_grp **grp_p)
{
int err;
struct rxe_mc_grp *grp;
if (rxe->attr.max_mcast_qp_attach == 0) {
err = -EINVAL;
goto err1;
}
grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
if (grp)
goto done;
grp = rxe_alloc(&rxe->mc_grp_pool);
if (!grp) {
err = -ENOMEM;
goto err1;
}
INIT_LIST_HEAD(&grp->qp_list);
spin_lock_init(&grp->mcg_lock);
grp->rxe = rxe;
rxe_add_key(grp, mgid);
err = rxe->ifc_ops->mcast_add(rxe, mgid);
if (err)
goto err2;
done:
*grp_p = grp;
return 0;
err2:
rxe_drop_ref(grp);
err1:
return err;
}
int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
struct rxe_mc_grp *grp)
{
int err;
struct rxe_mc_elem *elem;
/* check to see of the qp is already a member of the group */
spin_lock_bh(&qp->grp_lock);
spin_lock_bh(&grp->mcg_lock);
list_for_each_entry(elem, &grp->qp_list, qp_list) {
if (elem->qp == qp) {
err = 0;
goto out;
}
}
if (grp->num_qp >= rxe->attr.max_mcast_qp_attach) {
err = -ENOMEM;
goto out;
}
elem = rxe_alloc(&rxe->mc_elem_pool);
if (!elem) {
err = -ENOMEM;
goto out;
}
/* each qp holds a ref on the grp */
rxe_add_ref(grp);
grp->num_qp++;
elem->qp = qp;
elem->grp = grp;
list_add(&elem->qp_list, &grp->qp_list);
list_add(&elem->grp_list, &qp->grp_list);
err = 0;
out:
spin_unlock_bh(&grp->mcg_lock);
spin_unlock_bh(&qp->grp_lock);
return err;
}
int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
union ib_gid *mgid)
{
struct rxe_mc_grp *grp;
struct rxe_mc_elem *elem, *tmp;
grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
if (!grp)
goto err1;
spin_lock_bh(&qp->grp_lock);
spin_lock_bh(&grp->mcg_lock);
list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
if (elem->qp == qp) {
list_del(&elem->qp_list);
list_del(&elem->grp_list);
grp->num_qp--;
spin_unlock_bh(&grp->mcg_lock);
spin_unlock_bh(&qp->grp_lock);
rxe_drop_ref(elem);
rxe_drop_ref(grp); /* ref held by QP */
rxe_drop_ref(grp); /* ref from get_key */
return 0;
}
}
spin_unlock_bh(&grp->mcg_lock);
spin_unlock_bh(&qp->grp_lock);
rxe_drop_ref(grp); /* ref from get_key */
err1:
return -EINVAL;
}
void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
{
struct rxe_mc_grp *grp;
struct rxe_mc_elem *elem;
while (1) {
spin_lock_bh(&qp->grp_lock);
if (list_empty(&qp->grp_list)) {
spin_unlock_bh(&qp->grp_lock);
break;
}
elem = list_first_entry(&qp->grp_list, struct rxe_mc_elem,
grp_list);
list_del(&elem->grp_list);
spin_unlock_bh(&qp->grp_lock);
grp = elem->grp;
spin_lock_bh(&grp->mcg_lock);
list_del(&elem->qp_list);
grp->num_qp--;
spin_unlock_bh(&grp->mcg_lock);
rxe_drop_ref(grp);
rxe_drop_ref(elem);
}
}
void rxe_mc_cleanup(void *arg)
{
struct rxe_mc_grp *grp = arg;
struct rxe_dev *rxe = grp->rxe;
rxe_drop_key(grp);
rxe->ifc_ops->mcast_delete(rxe, &grp->mgid);
}
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <asm/pgtable.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
void rxe_mmap_release(struct kref *ref)
{
struct rxe_mmap_info *ip = container_of(ref,
struct rxe_mmap_info, ref);
struct rxe_dev *rxe = to_rdev(ip->context->device);
spin_lock_bh(&rxe->pending_lock);
if (!list_empty(&ip->pending_mmaps))
list_del(&ip->pending_mmaps);
spin_unlock_bh(&rxe->pending_lock);
vfree(ip->obj); /* buf */
kfree(ip);
}
/*
* open and close keep track of how many times the memory region is mapped,
* to avoid releasing it.
*/
static void rxe_vma_open(struct vm_area_struct *vma)
{
struct rxe_mmap_info *ip = vma->vm_private_data;
kref_get(&ip->ref);
}
static void rxe_vma_close(struct vm_area_struct *vma)
{
struct rxe_mmap_info *ip = vma->vm_private_data;
kref_put(&ip->ref, rxe_mmap_release);
}
static struct vm_operations_struct rxe_vm_ops = {
.open = rxe_vma_open,
.close = rxe_vma_close,
};
/**
* rxe_mmap - create a new mmap region
* @context: the IB user context of the process making the mmap() call
* @vma: the VMA to be initialized
* Return zero if the mmap is OK. Otherwise, return an errno.
*/
int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct rxe_dev *rxe = to_rdev(context->device);
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long size = vma->vm_end - vma->vm_start;
struct rxe_mmap_info *ip, *pp;
int ret;
/*
* Search the device's list of objects waiting for a mmap call.
* Normally, this list is very short since a call to create a
* CQ, QP, or SRQ is soon followed by a call to mmap().
*/
spin_lock_bh(&rxe->pending_lock);
list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
if (context != ip->context || (__u64)offset != ip->info.offset)
continue;
/* Don't allow a mmap larger than the object. */
if (size > ip->info.size) {
pr_err("mmap region is larger than the object!\n");
spin_unlock_bh(&rxe->pending_lock);
ret = -EINVAL;
goto done;
}
goto found_it;
}
pr_warn("unable to find pending mmap info\n");
spin_unlock_bh(&rxe->pending_lock);
ret = -EINVAL;
goto done;
found_it:
list_del_init(&ip->pending_mmaps);
spin_unlock_bh(&rxe->pending_lock);
ret = remap_vmalloc_range(vma, ip->obj, 0);
if (ret) {
pr_err("rxe: err %d from remap_vmalloc_range\n", ret);
goto done;
}
vma->vm_ops = &rxe_vm_ops;
vma->vm_private_data = ip;
rxe_vma_open(vma);
done:
return ret;
}
/*
* Allocate information for rxe_mmap
*/
struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe,
u32 size,
struct ib_ucontext *context,
void *obj)
{
struct rxe_mmap_info *ip;
ip = kmalloc(sizeof(*ip), GFP_KERNEL);
if (!ip)
return NULL;
size = PAGE_ALIGN(size);
spin_lock_bh(&rxe->mmap_offset_lock);
if (rxe->mmap_offset == 0)
rxe->mmap_offset = PAGE_SIZE;
ip->info.offset = rxe->mmap_offset;
rxe->mmap_offset += size;
spin_unlock_bh(&rxe->mmap_offset_lock);
INIT_LIST_HEAD(&ip->pending_mmaps);
ip->info.size = size;
ip->context = context;
ip->obj = obj;
kref_init(&ip->ref);
return ip;
}
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef RXE_NET_H
#define RXE_NET_H
#include <net/sock.h>
#include <net/if_inet6.h>
#include <linux/module.h>
struct rxe_recv_sockets {
struct socket *sk4;
struct socket *sk6;
};
extern struct rxe_recv_sockets recv_sockets;
struct rxe_dev *rxe_net_add(struct net_device *ndev);
int rxe_net_init(void);
void rxe_net_exit(void);
#endif /* RXE_NET_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -1967,8 +1967,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca) ...@@ -1967,8 +1967,7 @@ int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca)
priv->hca_caps = hca->attrs.device_cap_flags; priv->hca_caps = hca->attrs.device_cap_flags;
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
priv->dev->hw_features = NETIF_F_SG | priv->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (priv->hca_caps & IB_DEVICE_UD_TSO) if (priv->hca_caps & IB_DEVICE_UD_TSO)
priv->dev->hw_features |= NETIF_F_TSO; priv->dev->hw_features |= NETIF_F_TSO;
......
...@@ -135,7 +135,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -135,7 +135,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
.cap = { .cap = {
.max_send_wr = ipoib_sendq_size, .max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size, .max_recv_wr = ipoib_recvq_size,
.max_send_sge = 1, .max_send_sge = min_t(u32, priv->ca->attrs.max_sge,
MAX_SKB_FRAGS + 1),
.max_recv_sge = IPOIB_UD_RX_SG .max_recv_sge = IPOIB_UD_RX_SG
}, },
.sq_sig_type = IB_SIGNAL_ALL_WR, .sq_sig_type = IB_SIGNAL_ALL_WR,
...@@ -205,10 +206,6 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -205,10 +206,6 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING) if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP; init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
if (dev->features & NETIF_F_SG)
init_attr.cap.max_send_sge =
min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
priv->qp = ib_create_qp(priv->pd, &init_attr); priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) { if (IS_ERR(priv->qp)) {
printk(KERN_WARNING "%s: failed to create QP\n", ca->name); printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
...@@ -234,6 +231,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -234,6 +231,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->rx_wr.next = NULL; priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge; priv->rx_wr.sg_list = priv->rx_sge;
if (init_attr.cap.max_send_sge > 1)
dev->features |= NETIF_F_SG;
priv->max_send_sge = init_attr.cap.max_send_sge; priv->max_send_sge = init_attr.cap.max_send_sge;
return 0; return 0;
......
This diff is collapsed.
...@@ -220,6 +220,7 @@ enum { ...@@ -220,6 +220,7 @@ enum {
MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32, MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33, MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34, MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
}; };
enum { enum {
...@@ -1340,6 +1341,9 @@ enum { ...@@ -1340,6 +1341,9 @@ enum {
VXLAN_STEER_BY_INNER_VLAN = 1 << 4, VXLAN_STEER_BY_INNER_VLAN = 1 << 4,
}; };
enum {
MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS = 0x2,
};
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode); enum mlx4_net_trans_promisc_mode mode);
...@@ -1380,6 +1384,9 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, ...@@ -1380,6 +1384,9 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev); int mlx4_test_interrupts(struct mlx4_dev *dev);
int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
const u32 offset[], u32 value[],
size_t array_len, u8 port);
u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port); u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector); bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port); struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
......
This diff is collapsed.
...@@ -333,11 +333,13 @@ int rdma_disconnect(struct rdma_cm_id *id); ...@@ -333,11 +333,13 @@ int rdma_disconnect(struct rdma_cm_id *id);
* address. * address.
* @id: Communication identifier associated with the request. * @id: Communication identifier associated with the request.
* @addr: Multicast address identifying the group to join. * @addr: Multicast address identifying the group to join.
* @join_state: Multicast JoinState bitmap requested by port.
* Bitmap is based on IB_SA_MCMEMBER_REC_JOIN_STATE bits.
* @context: User-defined context associated with the join request, returned * @context: User-defined context associated with the join request, returned
* to the user through the private_data pointer in multicast events. * to the user through the private_data pointer in multicast events.
*/ */
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
void *context); u8 join_state, void *context);
/** /**
* rdma_leave_multicast - Leave the multicast group specified by the given * rdma_leave_multicast - Leave the multicast group specified by the given
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment