Commit db392219 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (65 commits)
  IB: Fix typo in kerneldoc for ib_set_client_data()
  IPoIB: Add some likely/unlikely annotations in hot path
  IPoIB: Remove unused include of vmalloc.h
  IPoIB: Rejoin all multicast groups after a port event
  IPoIB: Create MCGs with all attributes required by RFC
  IB/sa: fix ib_sa_selector names
  IB/iser: INFINIBAND_ISER depends on INET
  IB/mthca: Simplify calls to mthca_cq_clean()
  RDMA/cma: Document rdma_accept() error handling
  IB/mthca: Recover from catastrophic errors
  RDMA/cma: Document rdma_destroy_id() function
  IB/cm: Do not track remote QPN in timewait state
  IB/sa: Require SA registration
  IPoIB: Refactor completion handling
  IB/iser: Do not use FMR for a single dma entry sg
  IB/iser: fix some debug prints
  IB/iser: make FMR "page size" be 4K and not PAGE_SIZE
  IB/iser: Limit the max size of a scsi command
  IB/iser: fix a check of SG alignment for RDMA
  RDMA/cma: Protect against adding device during destruction
  ...
parents 3e2ab46d 9cd330d3
......@@ -298,6 +298,14 @@ L: info-linux@geode.amd.com
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
AMSO1100 RNIC DRIVER
P: Tom Tucker
M: tom@opengridcomputing.com
P: Steve Wise
M: swise@opengridcomputing.com
L: openib-general@openib.org
S: Maintained
AOA (Apple Onboard Audio) ALSA DRIVER
P: Johannes Berg
M: johannes@sipsolutions.net
......@@ -991,6 +999,14 @@ EFS FILESYSTEM
W: http://aeschi.ch.eu.org/efs/
S: Orphan
EHCA (IBM GX bus InfiniBand adapter) DRIVER:
P: Hoang-Nam Nguyen
M: hnguyen@de.ibm.com
P: Christoph Raisch
M: raisch@de.ibm.com
L: openib-general@openib.org
S: Supported
EMU10K1 SOUND DRIVER
P: James Courtier-Dutton
M: James@superbug.demon.co.uk
......
......@@ -36,6 +36,8 @@ config INFINIBAND_ADDR_TRANS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/ipath/Kconfig"
source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/amso1100/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
......
obj-$(CONFIG_INFINIBAND) += core/
obj-$(CONFIG_INFINIBAND_MTHCA) += hw/mthca/
obj-$(CONFIG_IPATH_CORE) += hw/ipath/
obj-$(CONFIG_INFINIBAND_IPATH) += hw/ipath/
obj-$(CONFIG_INFINIBAND_EHCA) += hw/ehca/
obj-$(CONFIG_INFINIBAND_AMSO1100) += hw/amso1100/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
ib_cm.o $(infiniband-y)
ib_cm.o iw_cm.o $(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o
......@@ -14,6 +14,8 @@ ib_sa-y := sa_query.o
ib_cm-y := cm.o
iw_cm-y := iwcm.o
rdma_cm-y := cma.o
ib_addr-y := addr.o
......
......@@ -61,12 +61,15 @@ static LIST_HEAD(req_list);
static DECLARE_WORK(work, process_req, NULL);
static struct workqueue_struct *addr_wq;
static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
unsigned char *dst_dev_addr)
int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
const unsigned char *dst_dev_addr)
{
switch (dev->type) {
case ARPHRD_INFINIBAND:
dev_addr->dev_type = IB_NODE_CA;
dev_addr->dev_type = RDMA_NODE_IB_CA;
break;
case ARPHRD_ETHER:
dev_addr->dev_type = RDMA_NODE_RNIC;
break;
default:
return -EADDRNOTAVAIL;
......@@ -78,6 +81,7 @@ static int copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
return 0;
}
EXPORT_SYMBOL(rdma_copy_addr);
int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
{
......@@ -89,7 +93,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
if (!dev)
return -EADDRNOTAVAIL;
ret = copy_addr(dev_addr, dev, NULL);
ret = rdma_copy_addr(dev_addr, dev, NULL);
dev_put(dev);
return ret;
}
......@@ -161,7 +165,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
/* If the device does ARP internally, return 'done' */
if (rt->idev->dev->flags & IFF_NOARP) {
copy_addr(addr, rt->idev->dev, NULL);
rdma_copy_addr(addr, rt->idev->dev, NULL);
goto put;
}
......@@ -181,7 +185,7 @@ static int addr_resolve_remote(struct sockaddr_in *src_in,
src_in->sin_addr.s_addr = rt->rt_src;
}
ret = copy_addr(addr, neigh->dev, neigh->ha);
ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
release:
neigh_release(neigh);
put:
......@@ -245,7 +249,7 @@ static int addr_resolve_local(struct sockaddr_in *src_in,
if (ZERONET(src_ip)) {
src_in->sin_family = dst_in->sin_family;
src_in->sin_addr.s_addr = dst_ip;
ret = copy_addr(addr, dev, dev->dev_addr);
ret = rdma_copy_addr(addr, dev, dev->dev_addr);
} else if (LOOPBACK(src_ip)) {
ret = rdma_translate_ip((struct sockaddr *)dst_in, addr);
if (!ret)
......
......@@ -62,12 +62,13 @@ struct ib_update_work {
static inline int start_port(struct ib_device *device)
{
return device->node_type == IB_NODE_SWITCH ? 0 : 1;
return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
}
static inline int end_port(struct ib_device *device)
{
return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt;
return (device->node_type == RDMA_NODE_IB_SWITCH) ?
0 : device->phys_port_cnt;
}
int ib_get_cached_gid(struct ib_device *device,
......
/*
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004-2006 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
......@@ -41,6 +41,7 @@
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/random.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
......@@ -73,6 +74,7 @@ static struct ib_cm {
struct rb_root remote_id_table;
struct rb_root remote_sidr_table;
struct idr local_id_table;
__be32 random_id_operand;
struct workqueue_struct *wq;
} cm;
......@@ -299,15 +301,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
int ret;
int ret, id;
static int next_id;
do {
spin_lock_irqsave(&cm.lock, flags);
ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, next_id++,
(__force int *) &cm_id_priv->id.local_id);
ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
next_id++, &id);
spin_unlock_irqrestore(&cm.lock, flags);
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
cm_id_priv->id.local_id = (__force __be32) (id ^ cm.random_id_operand);
return ret;
}
......@@ -316,7 +320,8 @@ static void cm_free_id(__be32 local_id)
unsigned long flags;
spin_lock_irqsave(&cm.lock, flags);
idr_remove(&cm.local_id_table, (__force int) local_id);
idr_remove(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
spin_unlock_irqrestore(&cm.lock, flags);
}
......@@ -324,7 +329,8 @@ static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
cm_id_priv = idr_find(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
......@@ -679,6 +685,8 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{
int wait_time;
cm_cleanup_timewait(cm_id_priv->timewait_info);
/*
* The cm_id could be destroyed by the user before we exit timewait.
* To protect against this, we search for the cm_id after exiting
......@@ -1354,7 +1362,7 @@ static int cm_req_handler(struct cm_work *work)
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
goto error1;
goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
......@@ -1363,7 +1371,8 @@ static int cm_req_handler(struct cm_work *work)
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
ret = -EINVAL;
goto error2;
kfree(cm_id_priv->timewait_info);
goto destroy;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
......@@ -1373,12 +1382,22 @@ static int cm_req_handler(struct cm_work *work)
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret)
goto error3;
if (ret) {
ib_get_cached_gid(work->port->cm_dev->device,
work->port->port_num, 0, &work->path[0].sgid);
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
&work->path[0].sgid, sizeof work->path[0].sgid,
NULL, 0);
goto rejected;
}
if (req_msg->alt_local_lid) {
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
if (ret)
goto error3;
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
sizeof work->path[0].sgid, NULL, 0);
goto rejected;
}
}
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms(
......@@ -1400,12 +1419,11 @@ static int cm_req_handler(struct cm_work *work)
cm_deref_id(listen_cm_id_priv);
return 0;
error3: atomic_dec(&cm_id_priv->refcount);
rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
cm_cleanup_timewait(cm_id_priv->timewait_info);
error2: kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
error1: ib_destroy_cm_id(&cm_id_priv->id);
destroy:
ib_destroy_cm_id(cm_id);
return ret;
}
......@@ -2072,8 +2090,9 @@ static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
spin_unlock_irqrestore(&cm.lock, flags);
return NULL;
}
cm_id_priv = idr_find(&cm.local_id_table,
(__force int) timewait_info->work.local_id);
cm_id_priv = idr_find(&cm.local_id_table, (__force int)
(timewait_info->work.local_id ^
cm.random_id_operand));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
......@@ -3125,7 +3144,8 @@ static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE;
if (cm_id_priv->responder_resources)
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC;
qp_attr->pkey_index = cm_id_priv->av.pkey_index;
qp_attr->port_num = cm_id_priv->av.port->port_num;
ret = 0;
......@@ -3262,6 +3282,9 @@ static void cm_add_one(struct ib_device *device)
int ret;
u8 i;
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
......@@ -3349,6 +3372,7 @@ static int __init ib_cm_init(void)
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
idr_pre_get(&cm.local_id_table, GFP_KERNEL);
cm.wq = create_workqueue("ib_cm");
......
This diff is collapsed.
......@@ -385,7 +385,7 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
EXPORT_SYMBOL(ib_get_client_data);
/**
* ib_set_client_data - Get IB client context
* ib_set_client_data - Set IB client context
* @device:Device to set context for
* @client:Client to set context for
* @data:Context to set
......@@ -505,7 +505,7 @@ int ib_query_port(struct ib_device *device,
u8 port_num,
struct ib_port_attr *port_attr)
{
if (device->node_type == IB_NODE_SWITCH) {
if (device->node_type == RDMA_NODE_IB_SWITCH) {
if (port_num)
return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt)
......@@ -580,7 +580,7 @@ int ib_modify_port(struct ib_device *device,
u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
if (device->node_type == IB_NODE_SWITCH) {
if (device->node_type == RDMA_NODE_IB_SWITCH) {
if (port_num)
return -EINVAL;
} else if (port_num < 1 || port_num > device->phys_port_cnt)
......
This diff is collapsed.
/*
* Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef IWCM_H
#define IWCM_H
enum iw_cm_state {
IW_CM_STATE_IDLE, /* unbound, inactive */
IW_CM_STATE_LISTEN, /* listen waiting for connect */
IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */
IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */
IW_CM_STATE_ESTABLISHED, /* established */
IW_CM_STATE_CLOSING, /* disconnect */
IW_CM_STATE_DESTROYING /* object being deleted */
};
struct iwcm_id_private {
struct iw_cm_id id;
enum iw_cm_state state;
unsigned long flags;
struct ib_qp *qp;
struct completion destroy_comp;
wait_queue_head_t connect_wait;
struct list_head work_list;
spinlock_t lock;
atomic_t refcount;
struct list_head work_free_list;
};
#define IWCM_F_CALLBACK_DESTROY 1
#define IWCM_F_CONNECT_WAIT 2
#endif /* IWCM_H */
......@@ -2876,7 +2876,10 @@ static void ib_mad_init_device(struct ib_device *device)
{
int start, end, i;
if (device->node_type == IB_NODE_SWITCH) {
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
if (device->node_type == RDMA_NODE_IB_SWITCH) {
start = 0;
end = 0;
} else {
......@@ -2923,7 +2926,7 @@ static void ib_mad_remove_device(struct ib_device *device)
{
int i, num_ports, cur_port;
if (device->node_type == IB_NODE_SWITCH) {
if (device->node_type == RDMA_NODE_IB_SWITCH) {
num_ports = 1;
cur_port = 0;
} else {
......
......@@ -39,7 +39,6 @@
#include <linux/completion.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <rdma/ib_mad.h>
#include <rdma/ib_smi.h>
......
......@@ -33,8 +33,6 @@
* $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $
*/
#include <linux/dma-mapping.h>
#include "mad_priv.h"
#include "mad_rmpp.h"
......@@ -60,6 +58,7 @@ struct mad_rmpp_recv {
int last_ack;
int seg_num;
int newwin;
int repwin;
__be64 tid;
u32 src_qp;
......@@ -170,6 +169,32 @@ static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
return msg;
}
static void ack_ds_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *recv_wc)
{
struct ib_mad_send_buf *msg;
struct ib_rmpp_mad *rmpp_mad;
int ret;
msg = alloc_response_msg(&agent->agent, recv_wc);
if (IS_ERR(msg))
return;
rmpp_mad = msg->mad;
memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
rmpp_mad->rmpp_hdr.seg_num = 0;
rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
ib_destroy_ah(msg->ah);
ib_free_send_mad(msg);
}
}
void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
{
struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad;
......@@ -271,6 +296,7 @@ create_rmpp_recv(struct ib_mad_agent_private *agent,
rmpp_recv->newwin = 1;
rmpp_recv->seg_num = 1;
rmpp_recv->last_ack = 0;
rmpp_recv->repwin = 1;
mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
rmpp_recv->tid = mad_hdr->tid;
......@@ -591,6 +617,16 @@ static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
break;
}
static void process_ds_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc, int newwin)
{
struct mad_rmpp_recv *rmpp_recv;
rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
rmpp_recv->repwin = newwin;
}
static void process_rmpp_ack(struct ib_mad_agent_private *agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
......@@ -616,8 +652,18 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
spin_lock_irqsave(&agent->lock, flags);
mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
if (!mad_send_wr)
goto out; /* Unmatched ACK */
if (!mad_send_wr) {
if (!seg_num)
process_ds_ack(agent, mad_recv_wc, newwin);
goto out; /* Unmatched or DS RMPP ACK */
}
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
(mad_send_wr->timeout)) {
spin_unlock_irqrestore(&agent->lock, flags);
ack_ds_ack(agent, mad_recv_wc);
return; /* Repeated ACK for DS RMPP transaction */
}
if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
(!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
......@@ -656,6 +702,9 @@ static void process_rmpp_ack(struct ib_mad_agent_private *agent,
if (mad_send_wr->refcount == 1)
ib_reset_mad_timeout(mad_send_wr,
mad_send_wr->send_buf.timeout_ms);
spin_unlock_irqrestore(&agent->lock, flags);
ack_ds_ack(agent, mad_recv_wc);
return;
} else if (mad_send_wr->refcount == 1 &&
mad_send_wr->seg_num < mad_send_wr->newwin &&
mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
......@@ -772,6 +821,39 @@ ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
return NULL;
}
static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
struct mad_rmpp_recv *rmpp_recv;
struct ib_ah_attr ah_attr;
unsigned long flags;
int newwin = 1;
if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
goto out;
spin_lock_irqsave(&agent->lock, flags);
list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
if (rmpp_recv->tid != mad_hdr->tid ||
rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
rmpp_recv->class_version != mad_hdr->class_version ||
(rmpp_recv->method & IB_MGMT_METHOD_RESP))
continue;
if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
continue;
if (rmpp_recv->slid == ah_attr.dlid) {
newwin = rmpp_recv->repwin;
break;
}
}
spin_unlock_irqrestore(&agent->lock, flags);
out:
return newwin;
}
int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
{
struct ib_rmpp_mad *rmpp_mad;
......@@ -787,7 +869,7 @@ int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
return IB_RMPP_RESULT_INTERNAL;
}
mad_send_wr->newwin = 1;
mad_send_wr->newwin = init_newwin(mad_send_wr);
/* We need to wait for the final ACK even if there isn't a response */
mad_send_wr->refcount += (mad_send_wr->timeout == 0);
......
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
* Copyright (c) 2006 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
......@@ -75,6 +76,7 @@ struct ib_sa_device {
struct ib_sa_query {
void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
struct ib_mad_send_buf *mad_buf;
struct ib_sa_sm_ah *sm_ah;
......@@ -415,6 +417,31 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
}
}
void ib_sa_register_client(struct ib_sa_client *client)
{
atomic_set(&client->users, 1);
init_completion(&client->comp);
}
EXPORT_SYMBOL(ib_sa_register_client);
static inline void ib_sa_client_get(struct ib_sa_client *client)
{
atomic_inc(&client->users);
}
static inline void ib_sa_client_put(struct ib_sa_client *client)
{
if (atomic_dec_and_test(&client->users))
complete(&client->comp);
}
void ib_sa_unregister_client(struct ib_sa_client *client)
{
ib_sa_client_put(client);
wait_for_completion(&client->comp);
}
EXPORT_SYMBOL(ib_sa_unregister_client);
/**
* ib_sa_cancel_query - try to cancel an SA query
* @id:ID of query to cancel
......@@ -557,6 +584,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
/**
* ib_sa_path_rec_get - Start a Path get query
* @client:SA client
* @device:device to send query on
* @port_num: port number to send query on
* @rec:Path Record to send in query
......@@ -579,7 +607,8 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
* error code. Otherwise it is a query ID that can be used to cancel
* the query.
*/
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
int ib_sa_path_rec_get(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
int timeout_ms, gfp_t gfp_mask,
......@@ -614,6 +643,8 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
goto err1;
}
ib_sa_client_get(client);
query->sa_query.client = client;
query->callback = callback;
query->context = context;
......@@ -639,6 +670,7 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
err2:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf);
err1:
......@@ -671,6 +703,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
/**
* ib_sa_service_rec_query - Start Service Record operation
* @client:SA client
* @device:device to send request on
* @port_num: port number to send request on
* @method:SA method - should be get, set, or delete
......@@ -695,7 +728,8 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
* error code. Otherwise it is a request ID that can be used to cancel
* the query.
*/
int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
int ib_sa_service_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
int timeout_ms, gfp_t gfp_mask,
......@@ -735,6 +769,8 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
goto err1;
}
ib_sa_client_get(client);
query->sa_query.client = client;
query->callback = callback;
query->context = context;
......@@ -761,6 +797,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
err2:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf);
err1:
......@@ -791,7 +828,8 @@ static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
}
int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
......@@ -827,6 +865,8 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
goto err1;
}
ib_sa_client_get(client);
query->sa_query.client = client;
query->callback = callback;
query->context = context;
......@@ -853,6 +893,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
err2:
*sa_query = NULL;
ib_sa_client_put(query->sa_query.client);
ib_free_send_mad(query->sa_query.mad_buf);
err1:
......@@ -889,6 +930,7 @@ static void send_handler(struct ib_mad_agent *agent,
ib_free_send_mad(mad_send_wc->send_buf);
kref_put(&query->sm_ah->ref, free_sm_ah);
ib_sa_client_put(query->client);
query->release(query);
}
......@@ -919,7 +961,10 @@ static void ib_sa_add_one(struct ib_device *device)
struct ib_sa_device *sa_dev;
int s, e, i;
if (device->node_type == IB_NODE_SWITCH)
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
if (device->node_type == RDMA_NODE_IB_SWITCH)
s = e = 0;
else {
s = 1;
......
......@@ -64,7 +64,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
/* C14-9:2 */
if (hop_ptr && hop_ptr < hop_cnt) {
if (node_type != IB_NODE_SWITCH)
if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
/* smp->return_path set when received */
......@@ -77,7 +77,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
if (hop_ptr == hop_cnt) {
/* smp->return_path set when received */
smp->hop_ptr++;
return (node_type == IB_NODE_SWITCH ||
return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_dlid == IB_LID_PERMISSIVE);
}
......@@ -95,7 +95,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
/* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
if (node_type != IB_NODE_SWITCH)
if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
smp->hop_ptr--;
......@@ -107,7 +107,7 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
if (hop_ptr == 1) {
smp->hop_ptr--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
return (node_type == IB_NODE_SWITCH ||
return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_slid == IB_LID_PERMISSIVE);
}
......@@ -142,7 +142,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
/* C14-9:2 -- intermediate hop */
if (hop_ptr && hop_ptr < hop_cnt) {
if (node_type != IB_NODE_SWITCH)
if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
smp->return_path[hop_ptr] = port_num;
......@@ -156,7 +156,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
smp->return_path[hop_ptr] = port_num;
/* smp->hop_ptr updated when sending */
return (node_type == IB_NODE_SWITCH ||
return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_dlid == IB_LID_PERMISSIVE);
}
......@@ -175,7 +175,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
/* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
if (node_type != IB_NODE_SWITCH)
if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
/* smp->hop_ptr updated when sending */
......@@ -190,7 +190,7 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
return 1;
}
/* smp->hop_ptr updated when sending */
return (node_type == IB_NODE_SWITCH);
return (node_type == RDMA_NODE_IB_SWITCH);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
......
......@@ -589,9 +589,10 @@ static ssize_t show_node_type(struct class_device *cdev, char *buf)
return -ENODEV;
switch (dev->node_type) {
case IB_NODE_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case IB_NODE_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
case IB_NODE_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type);
case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type);
case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type);
case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type);
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
}
}
......@@ -708,7 +709,7 @@ int ib_device_register_sysfs(struct ib_device *device)
if (ret)
goto err_put;
if (device->node_type == IB_NODE_SWITCH) {
if (device->node_type == RDMA_NODE_IB_SWITCH) {
ret = add_port(device, 0);
if (ret)
goto err_put;
......
......@@ -1247,7 +1247,8 @@ static void ib_ucm_add_one(struct ib_device *device)
{
struct ib_ucm_device *ucm_dev;
if (!device->alloc_ucontext)
if (!device->alloc_ucontext ||
rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
......
......@@ -1032,7 +1032,10 @@ static void ib_umad_add_one(struct ib_device *device)
struct ib_umad_device *umad_dev;
int s, e, i;
if (device->node_type == IB_NODE_SWITCH)
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
if (device->node_type == RDMA_NODE_IB_SWITCH)
s = e = 0;
else {
s = 1;
......
......@@ -155,7 +155,7 @@ static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
}
static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
struct ib_ucontext *context)
struct ib_ucontext *context, int nested)
{
struct ib_uobject *uobj;
......@@ -163,6 +163,9 @@ static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
if (!uobj)
return NULL;
if (nested)
down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
else
down_read(&uobj->mutex);
if (!uobj->live) {
put_uobj_read(uobj);
......@@ -190,17 +193,18 @@ static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
return uobj;
}
static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context)
static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
int nested)
{
struct ib_uobject *uobj;
uobj = idr_read_uobj(idr, id, context);
uobj = idr_read_uobj(idr, id, context, nested);
return uobj ? uobj->object : NULL;
}
static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
{
return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context);
return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
}
static void put_pd_read(struct ib_pd *pd)
......@@ -208,9 +212,9 @@ static void put_pd_read(struct ib_pd *pd)
put_uobj_read(pd->uobject);
}
static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context)
static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
{
return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context);
return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
}
static void put_cq_read(struct ib_cq *cq)
......@@ -220,7 +224,7 @@ static void put_cq_read(struct ib_cq *cq)
static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
{
return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context);
return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
}
static void put_ah_read(struct ib_ah *ah)
......@@ -230,7 +234,7 @@ static void put_ah_read(struct ib_ah *ah)
static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
{
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context);
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
}
static void put_qp_read(struct ib_qp *qp)
......@@ -240,7 +244,7 @@ static void put_qp_read(struct ib_qp *qp)
static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
{
return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context);
return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
}
static void put_srq_read(struct ib_srq *srq)
......@@ -837,7 +841,6 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
err_copy:
idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
err_free:
ib_destroy_cq(cq);
......@@ -867,7 +870,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
cq = idr_read_cq(cmd.cq_handle, file->ucontext);
cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
if (!cq)
return -EINVAL;
......@@ -875,11 +878,10 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
if (ret)
goto out;
memset(&resp, 0, sizeof resp);
resp.cqe = cq->cqe;
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
&resp, sizeof resp.cqe))
ret = -EFAULT;
out:
......@@ -894,7 +896,6 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
{
struct ib_uverbs_poll_cq cmd;
struct ib_uverbs_poll_cq_resp *resp;
struct ib_uobject *uobj;
struct ib_cq *cq;
struct ib_wc *wc;
int ret = 0;
......@@ -915,16 +916,15 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
goto out_wc;
}
uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
if (!uobj) {
cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
if (!cq) {
ret = -EINVAL;
goto out;
}
cq = uobj->object;
resp->count = ib_poll_cq(cq, cmd.ne, wc);
put_uobj_read(uobj);
put_cq_read(cq);
for (i = 0; i < resp->count; i++) {
resp->wc[i].wr_id = wc[i].wr_id;
......@@ -959,21 +959,19 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_req_notify_cq cmd;
struct ib_uobject *uobj;
struct ib_cq *cq;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
uobj = idr_read_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
if (!uobj)
cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
if (!cq)
return -EINVAL;
cq = uobj->object;
ib_req_notify_cq(cq, cmd.solicited_only ?
IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
put_uobj_read(uobj);
put_cq_read(cq);
return in_len;
}
......@@ -1064,9 +1062,9 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
scq = idr_read_cq(cmd.send_cq_handle, file->ucontext);
scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext);
scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
ret = -EINVAL;
......@@ -1274,6 +1272,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_modify_qp cmd;
struct ib_udata udata;
struct ib_qp *qp;
struct ib_qp_attr *attr;
int ret;
......@@ -1281,6 +1280,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr)
return -ENOMEM;
......@@ -1337,7 +1339,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
ret = ib_modify_qp(qp, attr, cmd.attr_mask);
ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
put_qp_read(qp);
......@@ -1674,7 +1676,6 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
break;
}
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
......@@ -1724,7 +1725,6 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
break;
}
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp))
ret = -EFAULT;
......@@ -2055,6 +2055,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
int out_len)
{
struct ib_uverbs_modify_srq cmd;
struct ib_udata udata;
struct ib_srq *srq;
struct ib_srq_attr attr;
int ret;
......@@ -2062,6 +2063,9 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
srq = idr_read_srq(cmd.srq_handle, file->ucontext);
if (!srq)
return -EINVAL;
......@@ -2069,7 +2073,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
attr.max_wr = cmd.max_wr;
attr.srq_limit = cmd.srq_limit;
ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
put_srq_read(srq);
......
......@@ -79,6 +79,23 @@ enum ib_rate mult_to_ib_rate(int mult)
}
EXPORT_SYMBOL(mult_to_ib_rate);
enum rdma_transport_type
rdma_node_get_transport(enum rdma_node_type node_type)
{
switch (node_type) {
case RDMA_NODE_IB_CA:
case RDMA_NODE_IB_SWITCH:
case RDMA_NODE_IB_ROUTER:
return RDMA_TRANSPORT_IB;
case RDMA_NODE_RNIC:
return RDMA_TRANSPORT_IWARP;
default:
BUG();
return 0;
}
}
EXPORT_SYMBOL(rdma_node_get_transport);
/* Protection domains */
struct ib_pd *ib_alloc_pd(struct ib_device *device)
......@@ -231,7 +248,7 @@ int ib_modify_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask)
{
return srq->device->modify_srq(srq, srq_attr, srq_attr_mask);
return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_srq);
......@@ -547,7 +564,7 @@ int ib_modify_qp(struct ib_qp *qp,
struct ib_qp_attr *qp_attr,
int qp_attr_mask)
{
return qp->device->modify_qp(qp, qp_attr, qp_attr_mask);
return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
}
EXPORT_SYMBOL(ib_modify_qp);
......
ifdef CONFIG_INFINIBAND_AMSO1100_DEBUG
EXTRA_CFLAGS += -DDEBUG
endif
obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
config INFINIBAND_AMSO1100
tristate "Ammasso 1100 HCA support"
depends on PCI && INET && INFINIBAND
---help---
This is a low-level driver for the Ammasso 1100 host
channel adapter (HCA).
config INFINIBAND_AMSO1100_DEBUG
bool "Verbose debugging output"
depends on INFINIBAND_AMSO1100
default n
---help---
This option causes the amso1100 driver to produce a bunch of
debug messages. Select this if you are developing the driver
or trying to diagnose a problem.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "c2.h"
#include <rdma/iw_cm.h>
#include "c2_status.h"
#include "c2_ae.h"
static int c2_convert_cm_status(u32 c2_status)
{
switch (c2_status) {
case C2_CONN_STATUS_SUCCESS:
return 0;
case C2_CONN_STATUS_REJECTED:
return -ENETRESET;
case C2_CONN_STATUS_REFUSED:
return -ECONNREFUSED;
case C2_CONN_STATUS_TIMEDOUT:
return -ETIMEDOUT;
case C2_CONN_STATUS_NETUNREACH:
return -ENETUNREACH;
case C2_CONN_STATUS_HOSTUNREACH:
return -EHOSTUNREACH;
case C2_CONN_STATUS_INVALID_RNIC:
return -EINVAL;
case C2_CONN_STATUS_INVALID_QP:
return -EINVAL;
case C2_CONN_STATUS_INVALID_QP_STATE:
return -EINVAL;
case C2_CONN_STATUS_ADDR_NOT_AVAIL:
return -EADDRNOTAVAIL;
default:
printk(KERN_ERR PFX
"%s - Unable to convert CM status: %d\n",
__FUNCTION__, c2_status);
return -EIO;
}
}
#ifdef DEBUG
static const char* to_event_str(int event)
{
static const char* event_str[] = {
"CCAE_REMOTE_SHUTDOWN",
"CCAE_ACTIVE_CONNECT_RESULTS",
"CCAE_CONNECTION_REQUEST",
"CCAE_LLP_CLOSE_COMPLETE",
"CCAE_TERMINATE_MESSAGE_RECEIVED",
"CCAE_LLP_CONNECTION_RESET",
"CCAE_LLP_CONNECTION_LOST",
"CCAE_LLP_SEGMENT_SIZE_INVALID",
"CCAE_LLP_INVALID_CRC",
"CCAE_LLP_BAD_FPDU",
"CCAE_INVALID_DDP_VERSION",
"CCAE_INVALID_RDMA_VERSION",
"CCAE_UNEXPECTED_OPCODE",
"CCAE_INVALID_DDP_QUEUE_NUMBER",
"CCAE_RDMA_READ_NOT_ENABLED",
"CCAE_RDMA_WRITE_NOT_ENABLED",
"CCAE_RDMA_READ_TOO_SMALL",
"CCAE_NO_L_BIT",
"CCAE_TAGGED_INVALID_STAG",
"CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
"CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
"CCAE_TAGGED_INVALID_PD",
"CCAE_WRAP_ERROR",
"CCAE_BAD_CLOSE",
"CCAE_BAD_LLP_CLOSE",
"CCAE_INVALID_MSN_RANGE",
"CCAE_INVALID_MSN_GAP",
"CCAE_IRRQ_OVERFLOW",
"CCAE_IRRQ_MSN_GAP",
"CCAE_IRRQ_MSN_RANGE",
"CCAE_IRRQ_INVALID_STAG",
"CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
"CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
"CCAE_IRRQ_INVALID_PD",
"CCAE_IRRQ_WRAP_ERROR",
"CCAE_CQ_SQ_COMPLETION_OVERFLOW",
"CCAE_CQ_RQ_COMPLETION_ERROR",
"CCAE_QP_SRQ_WQE_ERROR",
"CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
"CCAE_CQ_OVERFLOW",
"CCAE_CQ_OPERATION_ERROR",
"CCAE_SRQ_LIMIT_REACHED",
"CCAE_QP_RQ_LIMIT_REACHED",
"CCAE_SRQ_CATASTROPHIC_ERROR",
"CCAE_RNIC_CATASTROPHIC_ERROR"
};
if (event < CCAE_REMOTE_SHUTDOWN ||
event > CCAE_RNIC_CATASTROPHIC_ERROR)
return "<invalid event>";
event -= CCAE_REMOTE_SHUTDOWN;
return event_str[event];
}
static const char *to_qp_state_str(int state)
{
switch (state) {
case C2_QP_STATE_IDLE:
return "C2_QP_STATE_IDLE";
case C2_QP_STATE_CONNECTING:
return "C2_QP_STATE_CONNECTING";
case C2_QP_STATE_RTS:
return "C2_QP_STATE_RTS";
case C2_QP_STATE_CLOSING:
return "C2_QP_STATE_CLOSING";
case C2_QP_STATE_TERMINATE:
return "C2_QP_STATE_TERMINATE";
case C2_QP_STATE_ERROR:
return "C2_QP_STATE_ERROR";
default:
return "<invalid QP state>";
};
}
#endif
void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
{
struct c2_mq *mq = c2dev->qptr_array[mq_index];
union c2wr *wr;
void *resource_user_context;
struct iw_cm_event cm_event;
struct ib_event ib_event;
enum c2_resource_indicator resource_indicator;
enum c2_event_id event_id;
unsigned long flags;
int status;
/*
* retreive the message
*/
wr = c2_mq_consume(mq);
if (!wr)
return;
memset(&ib_event, 0, sizeof(ib_event));
memset(&cm_event, 0, sizeof(cm_event));
event_id = c2_wr_get_id(wr);
resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
resource_user_context =
(void *) (unsigned long) wr->ae.ae_generic.user_context;
status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
pr_debug("event received c2_dev=%p, event_id=%d, "
"resource_indicator=%d, user_context=%p, status = %d\n",
c2dev, event_id, resource_indicator, resource_user_context,
status);
switch (resource_indicator) {
case C2_RES_IND_QP:{
struct c2_qp *qp = (struct c2_qp *)resource_user_context;
struct iw_cm_id *cm_id = qp->cm_id;
struct c2wr_ae_active_connect_results *res;
if (!cm_id) {
pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
qp);
goto ignore_it;
}
pr_debug("%s: event = %s, user_context=%llx, "
"resource_type=%x, "
"resource=%x, qp_state=%s\n",
__FUNCTION__,
to_event_str(event_id),
be64_to_cpu(wr->ae.ae_generic.user_context),
be32_to_cpu(wr->ae.ae_generic.resource_type),
be32_to_cpu(wr->ae.ae_generic.resource),
to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
switch (event_id) {
case CCAE_ACTIVE_CONNECT_RESULTS:
res = &wr->ae.ae_active_connect_results;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.local_addr.sin_addr.s_addr = res->laddr;
cm_event.remote_addr.sin_addr.s_addr = res->raddr;
cm_event.local_addr.sin_port = res->lport;
cm_event.remote_addr.sin_port = res->rport;
if (status == 0) {
cm_event.private_data_len =
be32_to_cpu(res->private_data_length);
cm_event.private_data = res->private_data;
} else {
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id) {
qp->cm_id->rem_ref(qp->cm_id);
qp->cm_id = NULL;
}
spin_unlock_irqrestore(&qp->lock, flags);
cm_event.private_data_len = 0;
cm_event.private_data = NULL;
}
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
case CCAE_TERMINATE_MESSAGE_RECEIVED:
case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
ib_event.device = &c2dev->ibdev;
ib_event.element.qp = &qp->ibqp;
ib_event.event = IB_EVENT_QP_REQ_ERR;
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&ib_event,
qp->ibqp.
qp_context);
break;
case CCAE_BAD_CLOSE:
case CCAE_LLP_CLOSE_COMPLETE:
case CCAE_LLP_CONNECTION_RESET:
case CCAE_LLP_CONNECTION_LOST:
BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id) {
qp->cm_id->rem_ref(qp->cm_id);
qp->cm_id = NULL;
}
spin_unlock_irqrestore(&qp->lock, flags);
cm_event.event = IW_CM_EVENT_CLOSE;
cm_event.status = 0;
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
default:
BUG_ON(1);
pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
"CM_ID=%p\n",
__FUNCTION__, __LINE__,
event_id, qp, cm_id);
break;
}
break;
}
case C2_RES_IND_EP:{
struct c2wr_ae_connection_request *req =
&wr->ae.ae_connection_request;
struct iw_cm_id *cm_id =
(struct iw_cm_id *)resource_user_context;
pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
if (event_id != CCAE_CONNECTION_REQUEST) {
pr_debug("%s: Invalid event_id: %d\n",
__FUNCTION__, event_id);
break;
}
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
cm_event.local_addr.sin_addr.s_addr = req->laddr;
cm_event.remote_addr.sin_addr.s_addr = req->raddr;
cm_event.local_addr.sin_port = req->lport;
cm_event.remote_addr.sin_port = req->rport;
cm_event.private_data_len =
be32_to_cpu(req->private_data_length);
cm_event.private_data = req->private_data;
if (cm_id->event_handler)
cm_id->event_handler(cm_id, &cm_event);
break;
}
case C2_RES_IND_CQ:{
struct c2_cq *cq =
(struct c2_cq *) resource_user_context;
pr_debug("IB_EVENT_CQ_ERR\n");
ib_event.device = &c2dev->ibdev;
ib_event.element.cq = &cq->ibcq;
ib_event.event = IB_EVENT_CQ_ERR;
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&ib_event,
cq->ibcq.cq_context);
}
default:
printk("Bad resource indicator = %d\n",
resource_indicator);
break;
}
ignore_it:
c2_mq_free(mq);
}
/*
* Copyright (c) 2005 Ammasso, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _C2_AE_H_
#define _C2_AE_H_
/*
* WARNING: If you change this file, also bump C2_IVN_BASE
* in common/include/clustercore/c2_ivn.h.
*/
/*
* Asynchronous Event Identifiers
*
* These start at 0x80 only so it's obvious from inspection that
* they are not work-request statuses. This isn't critical.
*
* NOTE: these event id's must fit in eight bits.
*/
enum c2_event_id {
CCAE_REMOTE_SHUTDOWN = 0x80,
CCAE_ACTIVE_CONNECT_RESULTS,
CCAE_CONNECTION_REQUEST,
CCAE_LLP_CLOSE_COMPLETE,
CCAE_TERMINATE_MESSAGE_RECEIVED,
CCAE_LLP_CONNECTION_RESET,
CCAE_LLP_CONNECTION_LOST,
CCAE_LLP_SEGMENT_SIZE_INVALID,
CCAE_LLP_INVALID_CRC,
CCAE_LLP_BAD_FPDU,
CCAE_INVALID_DDP_VERSION,
CCAE_INVALID_RDMA_VERSION,
CCAE_UNEXPECTED_OPCODE,
CCAE_INVALID_DDP_QUEUE_NUMBER,
CCAE_RDMA_READ_NOT_ENABLED,
CCAE_RDMA_WRITE_NOT_ENABLED,
CCAE_RDMA_READ_TOO_SMALL,
CCAE_NO_L_BIT,
CCAE_TAGGED_INVALID_STAG,
CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
CCAE_TAGGED_INVALID_PD,
CCAE_WRAP_ERROR,
CCAE_BAD_CLOSE,
CCAE_BAD_LLP_CLOSE,
CCAE_INVALID_MSN_RANGE,
CCAE_INVALID_MSN_GAP,
CCAE_IRRQ_OVERFLOW,
CCAE_IRRQ_MSN_GAP,
CCAE_IRRQ_MSN_RANGE,
CCAE_IRRQ_INVALID_STAG,
CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
CCAE_IRRQ_INVALID_PD,
CCAE_IRRQ_WRAP_ERROR,
CCAE_CQ_SQ_COMPLETION_OVERFLOW,
CCAE_CQ_RQ_COMPLETION_ERROR,
CCAE_QP_SRQ_WQE_ERROR,
CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
CCAE_CQ_OVERFLOW,
CCAE_CQ_OPERATION_ERROR,
CCAE_SRQ_LIMIT_REACHED,
CCAE_QP_RQ_LIMIT_REACHED,
CCAE_SRQ_CATASTROPHIC_ERROR,
CCAE_RNIC_CATASTROPHIC_ERROR
/* WARNING If you add more id's, make sure their values fit in eight bits. */
};
/*
* Resource Indicators and Identifiers
*/
enum c2_resource_indicator {
C2_RES_IND_QP = 1,
C2_RES_IND_EP,
C2_RES_IND_CQ,
C2_RES_IND_SRQ,
};
#endif /* _C2_AE_H_ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
config INFINIBAND_EHCA
tristate "eHCA support"
depends on IBMEBUS && INFINIBAND
---help---
This driver supports the IBM pSeries eHCA InfiniBand adapter.
To compile the driver as a module, choose M here. The module
will be called ib_ehca.
config INFINIBAND_EHCA_SCALING
bool "Scaling support (EXPERIMENTAL)"
depends on IBMEBUS && INFINIBAND_EHCA && HOTPLUG_CPU && EXPERIMENTAL
---help---
eHCA scaling support schedules the CQ callbacks to different CPUs.
To enable this feature choose Y here.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment