Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
ba77df57
Commit
ba77df57
authored
Nov 04, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
parents
602d4a7e
d09e3276
Changes
25
Hide whitespace changes
Inline
Side-by-side
Showing
25 changed files
with
178 additions
and
124 deletions
+178
-124
drivers/infiniband/core/agent.c
drivers/infiniband/core/agent.c
+1
-2
drivers/infiniband/core/cm.c
drivers/infiniband/core/cm.c
+2
-4
drivers/infiniband/core/device.c
drivers/infiniband/core/device.c
+1
-9
drivers/infiniband/core/mad.c
drivers/infiniband/core/mad.c
+12
-19
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/sysfs.c
+2
-4
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucm.c
+3
-6
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/user_mad.c
+64
-16
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs.h
+1
-0
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_cmd.c
+1
-0
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_main.c
+4
-9
drivers/infiniband/hw/mthca/mthca_cq.c
drivers/infiniband/hw/mthca/mthca_cq.c
+30
-1
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_dev.h
+3
-1
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_eq.c
+3
-1
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_main.c
+1
-1
drivers/infiniband/hw/mthca/mthca_mr.c
drivers/infiniband/hw/mthca/mthca_mr.c
+1
-3
drivers/infiniband/hw/mthca/mthca_profile.c
drivers/infiniband/hw/mthca/mthca_profile.c
+1
-3
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.c
+1
-1
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/mthca/mthca_qp.c
+7
-0
drivers/infiniband/hw/mthca/mthca_srq.c
drivers/infiniband/hw/mthca/mthca_srq.c
+7
-6
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib.h
+3
-0
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
+6
-7
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
+5
-19
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+5
-3
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+1
-3
include/rdma/ib_user_cm.h
include/rdma/ib_user_cm.h
+13
-6
No files found.
drivers/infiniband/core/agent.c
View file @
ba77df57
...
...
@@ -155,13 +155,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
int
ret
;
/* Create new device info */
port_priv
=
k
m
alloc
(
sizeof
*
port_priv
,
GFP_KERNEL
);
port_priv
=
k
z
alloc
(
sizeof
*
port_priv
,
GFP_KERNEL
);
if
(
!
port_priv
)
{
printk
(
KERN_ERR
SPFX
"No memory for ib_agent_port_private
\n
"
);
ret
=
-
ENOMEM
;
goto
error1
;
}
memset
(
port_priv
,
0
,
sizeof
*
port_priv
);
/* Obtain send only MAD agent for SMI QP */
port_priv
->
agent
[
0
]
=
ib_register_mad_agent
(
device
,
port_num
,
...
...
drivers/infiniband/core/cm.c
View file @
ba77df57
...
...
@@ -544,11 +544,10 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
struct
cm_id_private
*
cm_id_priv
;
int
ret
;
cm_id_priv
=
k
m
alloc
(
sizeof
*
cm_id_priv
,
GFP_KERNEL
);
cm_id_priv
=
k
z
alloc
(
sizeof
*
cm_id_priv
,
GFP_KERNEL
);
if
(
!
cm_id_priv
)
return
ERR_PTR
(
-
ENOMEM
);
memset
(
cm_id_priv
,
0
,
sizeof
*
cm_id_priv
);
cm_id_priv
->
id
.
state
=
IB_CM_IDLE
;
cm_id_priv
->
id
.
device
=
device
;
cm_id_priv
->
id
.
cm_handler
=
cm_handler
;
...
...
@@ -621,10 +620,9 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
{
struct
cm_timewait_info
*
timewait_info
;
timewait_info
=
k
m
alloc
(
sizeof
*
timewait_info
,
GFP_KERNEL
);
timewait_info
=
k
z
alloc
(
sizeof
*
timewait_info
,
GFP_KERNEL
);
if
(
!
timewait_info
)
return
ERR_PTR
(
-
ENOMEM
);
memset
(
timewait_info
,
0
,
sizeof
*
timewait_info
);
timewait_info
->
work
.
local_id
=
local_id
;
INIT_WORK
(
&
timewait_info
->
work
.
work
,
cm_work_handler
,
...
...
drivers/infiniband/core/device.c
View file @
ba77df57
...
...
@@ -161,17 +161,9 @@ static int alloc_name(char *name)
*/
struct
ib_device
*
ib_alloc_device
(
size_t
size
)
{
void
*
dev
;
BUG_ON
(
size
<
sizeof
(
struct
ib_device
));
dev
=
kmalloc
(
size
,
GFP_KERNEL
);
if
(
!
dev
)
return
NULL
;
memset
(
dev
,
0
,
size
);
return
dev
;
return
kzalloc
(
size
,
GFP_KERNEL
);
}
EXPORT_SYMBOL
(
ib_alloc_device
);
...
...
drivers/infiniband/core/mad.c
View file @
ba77df57
...
...
@@ -255,12 +255,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
}
/* Allocate structures */
mad_agent_priv
=
k
m
alloc
(
sizeof
*
mad_agent_priv
,
GFP_KERNEL
);
mad_agent_priv
=
k
z
alloc
(
sizeof
*
mad_agent_priv
,
GFP_KERNEL
);
if
(
!
mad_agent_priv
)
{
ret
=
ERR_PTR
(
-
ENOMEM
);
goto
error1
;
}
memset
(
mad_agent_priv
,
0
,
sizeof
*
mad_agent_priv
);
mad_agent_priv
->
agent
.
mr
=
ib_get_dma_mr
(
port_priv
->
qp_info
[
qpn
].
qp
->
pd
,
IB_ACCESS_LOCAL_WRITE
);
...
...
@@ -448,14 +447,13 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
goto
error1
;
}
/* Allocate structures */
mad_snoop_priv
=
k
m
alloc
(
sizeof
*
mad_snoop_priv
,
GFP_KERNEL
);
mad_snoop_priv
=
k
z
alloc
(
sizeof
*
mad_snoop_priv
,
GFP_KERNEL
);
if
(
!
mad_snoop_priv
)
{
ret
=
ERR_PTR
(
-
ENOMEM
);
goto
error1
;
}
/* Now, fill in the various structures */
memset
(
mad_snoop_priv
,
0
,
sizeof
*
mad_snoop_priv
);
mad_snoop_priv
->
qp_info
=
&
port_priv
->
qp_info
[
qpn
];
mad_snoop_priv
->
agent
.
device
=
device
;
mad_snoop_priv
->
agent
.
recv_handler
=
recv_handler
;
...
...
@@ -794,10 +792,9 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
(
!
rmpp_active
&&
buf_size
>
sizeof
(
struct
ib_mad
)))
return
ERR_PTR
(
-
EINVAL
);
buf
=
k
m
alloc
(
sizeof
*
mad_send_wr
+
buf_size
,
gfp_mask
);
buf
=
k
z
alloc
(
sizeof
*
mad_send_wr
+
buf_size
,
gfp_mask
);
if
(
!
buf
)
return
ERR_PTR
(
-
ENOMEM
);
memset
(
buf
,
0
,
sizeof
*
mad_send_wr
+
buf_size
);
mad_send_wr
=
buf
+
buf_size
;
mad_send_wr
->
send_buf
.
mad
=
buf
;
...
...
@@ -1039,14 +1036,12 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
static
int
allocate_method_table
(
struct
ib_mad_mgmt_method_table
**
method
)
{
/* Allocate management method table */
*
method
=
k
m
alloc
(
sizeof
**
method
,
GFP_ATOMIC
);
*
method
=
k
z
alloc
(
sizeof
**
method
,
GFP_ATOMIC
);
if
(
!*
method
)
{
printk
(
KERN_ERR
PFX
"No memory for "
"ib_mad_mgmt_method_table
\n
"
);
return
-
ENOMEM
;
}
/* Clear management method table */
memset
(
*
method
,
0
,
sizeof
**
method
);
return
0
;
}
...
...
@@ -1137,15 +1132,14 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
class
=
&
port_priv
->
version
[
mad_reg_req
->
mgmt_class_version
].
class
;
if
(
!*
class
)
{
/* Allocate management class table for "new" class version */
*
class
=
k
m
alloc
(
sizeof
**
class
,
GFP_ATOMIC
);
*
class
=
k
z
alloc
(
sizeof
**
class
,
GFP_ATOMIC
);
if
(
!*
class
)
{
printk
(
KERN_ERR
PFX
"No memory for "
"ib_mad_mgmt_class_table
\n
"
);
ret
=
-
ENOMEM
;
goto
error1
;
}
/* Clear management class table */
memset
(
*
class
,
0
,
sizeof
(
**
class
));
/* Allocate method table for this management class */
method
=
&
(
*
class
)
->
method_table
[
mgmt_class
];
if
((
ret
=
allocate_method_table
(
method
)))
...
...
@@ -1209,25 +1203,24 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
mad_reg_req
->
mgmt_class_version
].
vendor
;
if
(
!*
vendor_table
)
{
/* Allocate mgmt vendor class table for "new" class version */
vendor
=
k
m
alloc
(
sizeof
*
vendor
,
GFP_ATOMIC
);
vendor
=
k
z
alloc
(
sizeof
*
vendor
,
GFP_ATOMIC
);
if
(
!
vendor
)
{
printk
(
KERN_ERR
PFX
"No memory for "
"ib_mad_mgmt_vendor_class_table
\n
"
);
goto
error1
;
}
/* Clear management vendor class table */
memset
(
vendor
,
0
,
sizeof
(
*
vendor
));
*
vendor_table
=
vendor
;
}
if
(
!
(
*
vendor_table
)
->
vendor_class
[
vclass
])
{
/* Allocate table for this management vendor class */
vendor_class
=
k
m
alloc
(
sizeof
*
vendor_class
,
GFP_ATOMIC
);
vendor_class
=
k
z
alloc
(
sizeof
*
vendor_class
,
GFP_ATOMIC
);
if
(
!
vendor_class
)
{
printk
(
KERN_ERR
PFX
"No memory for "
"ib_mad_mgmt_vendor_class
\n
"
);
goto
error2
;
}
memset
(
vendor_class
,
0
,
sizeof
(
*
vendor_class
));
(
*
vendor_table
)
->
vendor_class
[
vclass
]
=
vendor_class
;
}
for
(
i
=
0
;
i
<
MAX_MGMT_OUI
;
i
++
)
{
...
...
@@ -2524,12 +2517,12 @@ static int ib_mad_port_open(struct ib_device *device,
char
name
[
sizeof
"ib_mad123"
];
/* Create new device info */
port_priv
=
k
m
alloc
(
sizeof
*
port_priv
,
GFP_KERNEL
);
port_priv
=
k
z
alloc
(
sizeof
*
port_priv
,
GFP_KERNEL
);
if
(
!
port_priv
)
{
printk
(
KERN_ERR
PFX
"No memory for ib_mad_port_private
\n
"
);
return
-
ENOMEM
;
}
memset
(
port_priv
,
0
,
sizeof
*
port_priv
);
port_priv
->
device
=
device
;
port_priv
->
port_num
=
port_num
;
spin_lock_init
(
&
port_priv
->
reg_lock
);
...
...
drivers/infiniband/core/sysfs.c
View file @
ba77df57
...
...
@@ -307,14 +307,13 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
if
(
!
p
->
ibdev
->
process_mad
)
return
sprintf
(
buf
,
"N/A (no PMA)
\n
"
);
in_mad
=
k
m
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
in_mad
=
k
z
alloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
out_mad
=
kmalloc
(
sizeof
*
in_mad
,
GFP_KERNEL
);
if
(
!
in_mad
||
!
out_mad
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
memset
(
in_mad
,
0
,
sizeof
*
in_mad
);
in_mad
->
mad_hdr
.
base_version
=
1
;
in_mad
->
mad_hdr
.
mgmt_class
=
IB_MGMT_CLASS_PERF_MGMT
;
in_mad
->
mad_hdr
.
class_version
=
1
;
...
...
@@ -508,10 +507,9 @@ static int add_port(struct ib_device *device, int port_num)
if
(
ret
)
return
ret
;
p
=
k
m
alloc
(
sizeof
*
p
,
GFP_KERNEL
);
p
=
k
z
alloc
(
sizeof
*
p
,
GFP_KERNEL
);
if
(
!
p
)
return
-
ENOMEM
;
memset
(
p
,
0
,
sizeof
*
p
);
p
->
ibdev
=
device
;
p
->
port_num
=
port_num
;
...
...
drivers/infiniband/core/ucm.c
View file @
ba77df57
...
...
@@ -172,11 +172,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
struct
ib_ucm_context
*
ctx
;
int
result
;
ctx
=
k
malloc
(
sizeof
(
*
ctx
)
,
GFP_KERNEL
);
ctx
=
k
zalloc
(
sizeof
*
ctx
,
GFP_KERNEL
);
if
(
!
ctx
)
return
NULL
;
memset
(
ctx
,
0
,
sizeof
*
ctx
);
atomic_set
(
&
ctx
->
ref
,
1
);
init_waitqueue_head
(
&
ctx
->
wait
);
ctx
->
file
=
file
;
...
...
@@ -386,11 +385,10 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
ctx
=
cm_id
->
context
;
uevent
=
k
malloc
(
sizeof
(
*
uevent
)
,
GFP_KERNEL
);
uevent
=
k
zalloc
(
sizeof
*
uevent
,
GFP_KERNEL
);
if
(
!
uevent
)
goto
err1
;
memset
(
uevent
,
0
,
sizeof
(
*
uevent
));
uevent
->
ctx
=
ctx
;
uevent
->
cm_id
=
cm_id
;
uevent
->
resp
.
uid
=
ctx
->
uid
;
...
...
@@ -1345,11 +1343,10 @@ static void ib_ucm_add_one(struct ib_device *device)
if
(
!
device
->
alloc_ucontext
)
return
;
ucm_dev
=
k
m
alloc
(
sizeof
*
ucm_dev
,
GFP_KERNEL
);
ucm_dev
=
k
z
alloc
(
sizeof
*
ucm_dev
,
GFP_KERNEL
);
if
(
!
ucm_dev
)
return
;
memset
(
ucm_dev
,
0
,
sizeof
*
ucm_dev
);
ucm_dev
->
ib_dev
=
device
;
ucm_dev
->
devnum
=
find_first_zero_bit
(
dev_map
,
IB_UCM_MAX_DEVICES
);
...
...
drivers/infiniband/core/user_mad.c
View file @
ba77df57
...
...
@@ -94,6 +94,9 @@ struct ib_umad_port {
struct
class_device
*
sm_class_dev
;
struct
semaphore
sm_sem
;
struct
rw_semaphore
mutex
;
struct
list_head
file_list
;
struct
ib_device
*
ib_dev
;
struct
ib_umad_device
*
umad_dev
;
int
dev_num
;
...
...
@@ -108,10 +111,10 @@ struct ib_umad_device {
struct
ib_umad_file
{
struct
ib_umad_port
*
port
;
spinlock_t
recv_lock
;
struct
list_head
recv_list
;
struct
list_head
port_list
;
spinlock_t
recv_lock
;
wait_queue_head_t
recv_wait
;
struct
rw_semaphore
agent_mutex
;
struct
ib_mad_agent
*
agent
[
IB_UMAD_MAX_AGENTS
];
struct
ib_mr
*
mr
[
IB_UMAD_MAX_AGENTS
];
};
...
...
@@ -148,7 +151,7 @@ static int queue_packet(struct ib_umad_file *file,
{
int
ret
=
1
;
down_read
(
&
file
->
agent_
mutex
);
down_read
(
&
file
->
port
->
mutex
);
for
(
packet
->
mad
.
hdr
.
id
=
0
;
packet
->
mad
.
hdr
.
id
<
IB_UMAD_MAX_AGENTS
;
packet
->
mad
.
hdr
.
id
++
)
...
...
@@ -161,7 +164,7 @@ static int queue_packet(struct ib_umad_file *file,
break
;
}
up_read
(
&
file
->
agent_
mutex
);
up_read
(
&
file
->
port
->
mutex
);
return
ret
;
}
...
...
@@ -322,7 +325,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto
err
;
}
down_read
(
&
file
->
agent_
mutex
);
down_read
(
&
file
->
port
->
mutex
);
agent
=
file
->
agent
[
packet
->
mad
.
hdr
.
id
];
if
(
!
agent
)
{
...
...
@@ -419,7 +422,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
if
(
ret
)
goto
err_msg
;
up_read
(
&
file
->
agent_
mutex
);
up_read
(
&
file
->
port
->
mutex
);
return
count
;
...
...
@@ -430,7 +433,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
ib_destroy_ah
(
ah
);
err_up:
up_read
(
&
file
->
agent_
mutex
);
up_read
(
&
file
->
port
->
mutex
);
err:
kfree
(
packet
);
...
...
@@ -460,7 +463,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
int
agent_id
;
int
ret
;
down_write
(
&
file
->
agent_mutex
);
down_write
(
&
file
->
port
->
mutex
);
if
(
!
file
->
port
->
ib_dev
)
{
ret
=
-
EPIPE
;
goto
out
;
}
if
(
copy_from_user
(
&
ureq
,
(
void
__user
*
)
arg
,
sizeof
ureq
))
{
ret
=
-
EFAULT
;
...
...
@@ -522,7 +530,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
ib_unregister_mad_agent
(
agent
);
out:
up_write
(
&
file
->
agent_
mutex
);
up_write
(
&
file
->
port
->
mutex
);
return
ret
;
}
...
...
@@ -531,7 +539,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
u32
id
;
int
ret
=
0
;
down_write
(
&
file
->
agent_
mutex
);
down_write
(
&
file
->
port
->
mutex
);
if
(
get_user
(
id
,
(
u32
__user
*
)
arg
))
{
ret
=
-
EFAULT
;
...
...
@@ -548,7 +556,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
file
->
agent
[
id
]
=
NULL
;
out:
up_write
(
&
file
->
agent_
mutex
);
up_write
(
&
file
->
port
->
mutex
);
return
ret
;
}
...
...
@@ -569,6 +577,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{
struct
ib_umad_port
*
port
;
struct
ib_umad_file
*
file
;
int
ret
=
0
;
spin_lock
(
&
port_lock
);
port
=
umad_port
[
iminor
(
inode
)
-
IB_UMAD_MINOR_BASE
];
...
...
@@ -579,21 +588,32 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
if
(
!
port
)
return
-
ENXIO
;
down_write
(
&
port
->
mutex
);
if
(
!
port
->
ib_dev
)
{
ret
=
-
ENXIO
;
goto
out
;
}
file
=
kzalloc
(
sizeof
*
file
,
GFP_KERNEL
);
if
(
!
file
)
{
kref_put
(
&
port
->
umad_dev
->
ref
,
ib_umad_release_dev
);
return
-
ENOMEM
;
ret
=
-
ENOMEM
;
goto
out
;
}
spin_lock_init
(
&
file
->
recv_lock
);
init_rwsem
(
&
file
->
agent_mutex
);
INIT_LIST_HEAD
(
&
file
->
recv_list
);
init_waitqueue_head
(
&
file
->
recv_wait
);
file
->
port
=
port
;
filp
->
private_data
=
file
;
return
0
;
list_add_tail
(
&
file
->
port_list
,
&
port
->
file_list
);
out:
up_write
(
&
port
->
mutex
);
return
ret
;
}
static
int
ib_umad_close
(
struct
inode
*
inode
,
struct
file
*
filp
)
...
...
@@ -603,6 +623,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
struct
ib_umad_packet
*
packet
,
*
tmp
;
int
i
;
down_write
(
&
file
->
port
->
mutex
);
for
(
i
=
0
;
i
<
IB_UMAD_MAX_AGENTS
;
++
i
)
if
(
file
->
agent
[
i
])
{
ib_dereg_mr
(
file
->
mr
[
i
]);
...
...
@@ -612,6 +633,9 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
list_for_each_entry_safe
(
packet
,
tmp
,
&
file
->
recv_list
,
list
)
kfree
(
packet
);
list_del
(
&
file
->
port_list
);
up_write
(
&
file
->
port
->
mutex
);
kfree
(
file
);
kref_put
(
&
dev
->
ref
,
ib_umad_release_dev
);
...
...
@@ -680,9 +704,13 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
struct
ib_port_modify
props
=
{
.
clr_port_cap_mask
=
IB_PORT_SM
};
int
ret
;
int
ret
=
0
;
down_write
(
&
port
->
mutex
);
if
(
port
->
ib_dev
)
ret
=
ib_modify_port
(
port
->
ib_dev
,
port
->
port_num
,
0
,
&
props
);
up_write
(
&
port
->
mutex
);
ret
=
ib_modify_port
(
port
->
ib_dev
,
port
->
port_num
,
0
,
&
props
);
up
(
&
port
->
sm_sem
);
kref_put
(
&
port
->
umad_dev
->
ref
,
ib_umad_release_dev
);
...
...
@@ -745,6 +773,8 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port
->
ib_dev
=
device
;
port
->
port_num
=
port_num
;
init_MUTEX
(
&
port
->
sm_sem
);
init_rwsem
(
&
port
->
mutex
);
INIT_LIST_HEAD
(
&
port
->
file_list
);
port
->
dev
=
cdev_alloc
();
if
(
!
port
->
dev
)
...
...
@@ -813,6 +843,9 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
static
void
ib_umad_kill_port
(
struct
ib_umad_port
*
port
)
{
struct
ib_umad_file
*
file
;
int
id
;
class_set_devdata
(
port
->
class_dev
,
NULL
);
class_set_devdata
(
port
->
sm_class_dev
,
NULL
);
...
...
@@ -826,6 +859,21 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
umad_port
[
port
->
dev_num
]
=
NULL
;
spin_unlock
(
&
port_lock
);
down_write
(
&
port
->
mutex
);
port
->
ib_dev
=
NULL
;
list_for_each_entry
(
file
,
&
port
->
file_list
,
port_list
)
for
(
id
=
0
;
id
<
IB_UMAD_MAX_AGENTS
;
++
id
)
{
if
(
!
file
->
agent
[
id
])
continue
;
ib_dereg_mr
(
file
->
mr
[
id
]);
ib_unregister_mad_agent
(
file
->
agent
[
id
]);
file
->
agent
[
id
]
=
NULL
;
}
up_write
(
&
port
->
mutex
);
clear_bit
(
port
->
dev_num
,
dev_map
);
}
...
...
drivers/infiniband/core/uverbs.h
View file @
ba77df57
...
...
@@ -113,6 +113,7 @@ struct ib_uevent_object {
struct
ib_ucq_object
{
struct
ib_uobject
uobject
;
struct
ib_uverbs_file
*
uverbs_file
;
struct
list_head
comp_list
;
struct
list_head
async_list
;
u32
comp_events_reported
;
...
...
drivers/infiniband/core/uverbs_cmd.c
View file @
ba77df57
...
...
@@ -602,6 +602,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
uobj
->
uobject
.
user_handle
=
cmd
.
user_handle
;
uobj
->
uobject
.
context
=
file
->
ucontext
;
uobj
->
uverbs_file
=
file
;
uobj
->
comp_events_reported
=
0
;
uobj
->
async_events_reported
=
0
;
INIT_LIST_HEAD
(
&
uobj
->
comp_list
);
...
...
drivers/infiniband/core/uverbs_main.c
View file @
ba77df57
...
...
@@ -442,13 +442,10 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
void
ib_uverbs_cq_event_handler
(
struct
ib_event
*
event
,
void
*
context_ptr
)
{
struct
ib_u
verbs_event_file
*
ev_file
=
context_ptr
;
struct
ib_ucq_object
*
uobj
;
struct
ib_u
cq_object
*
uobj
=
container_of
(
event
->
element
.
cq
->
uobject
,
struct
ib_ucq_object
,
uobject
)
;
uobj
=
container_of
(
event
->
element
.
cq
->
uobject
,
struct
ib_ucq_object
,
uobject
);
ib_uverbs_async_handler
(
ev_file
->
uverbs_file
,
uobj
->
uobject
.
user_handle
,
ib_uverbs_async_handler
(
uobj
->
uverbs_file
,
uobj
->
uobject
.
user_handle
,
event
->
event
,
&
uobj
->
async_list
,
&
uobj
->
async_events_reported
);
...
...
@@ -728,12 +725,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
if
(
!
device
->
alloc_ucontext
)
return
;
uverbs_dev
=
k
m
alloc
(
sizeof
*
uverbs_dev
,
GFP_KERNEL
);
uverbs_dev
=
k
z
alloc
(
sizeof
*
uverbs_dev
,
GFP_KERNEL
);
if
(
!
uverbs_dev
)
return
;
memset
(
uverbs_dev
,
0
,
sizeof
*
uverbs_dev
);
kref_init
(
&
uverbs_dev
->
ref
);
spin_lock
(
&
map_lock
);
...
...
drivers/infiniband/hw/mthca/mthca_cq.c
View file @
ba77df57
...
...
@@ -208,7 +208,7 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
}
}
void
mthca_cq_
event
(
struct
mthca_dev
*
dev
,
u32
cqn
)
void
mthca_cq_
completion
(
struct
mthca_dev
*
dev
,
u32
cqn
)
{
struct
mthca_cq
*
cq
;
...
...
@@ -224,6 +224,35 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
cq
->
ibcq
.
comp_handler
(
&
cq
->
ibcq
,
cq
->
ibcq
.
cq_context
);
}
void
mthca_cq_event
(
struct
mthca_dev
*
dev
,
u32
cqn
,
enum
ib_event_type
event_type
)
{
struct
mthca_cq
*
cq
;
struct
ib_event
event
;
spin_lock
(
&
dev
->
cq_table
.
lock
);
cq
=
mthca_array_get
(
&
dev
->
cq_table
.
cq
,
cqn
&
(
dev
->
limits
.
num_cqs
-
1
));
if
(
cq
)
atomic_inc
(
&
cq
->
refcount
);
spin_unlock
(
&
dev
->
cq_table
.
lock
);
if
(
!
cq
)
{
mthca_warn
(
dev
,
"Async event for bogus CQ %08x
\n
"
,
cqn
);
return
;
}
event
.
device
=
&
dev
->
ib_dev
;
event
.
event
=
event_type
;
event
.
element
.
cq
=
&
cq
->
ibcq
;
if
(
cq
->
ibcq
.
event_handler
)
cq
->
ibcq
.
event_handler
(
&
event
,
cq
->
ibcq
.
cq_context
);
if
(
atomic_dec_and_test
(
&
cq
->
refcount
))
wake_up
(
&
cq
->
wait
);
}
void
mthca_cq_clean
(
struct
mthca_dev
*
dev
,
u32
cqn
,
u32
qpn
,
struct
mthca_srq
*
srq
)
{
...
...
drivers/infiniband/hw/mthca/mthca_dev.h
View file @
ba77df57
...
...
@@ -460,7 +460,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
struct
mthca_cq
*
cq
);
void
mthca_free_cq
(
struct
mthca_dev
*
dev
,
struct
mthca_cq
*
cq
);
void
mthca_cq_event
(
struct
mthca_dev
*
dev
,
u32
cqn
);
void
mthca_cq_completion
(
struct
mthca_dev
*
dev
,
u32
cqn
);
void
mthca_cq_event
(
struct
mthca_dev
*
dev
,
u32
cqn
,
enum
ib_event_type
event_type
);
void
mthca_cq_clean
(
struct
mthca_dev
*
dev
,
u32
cqn
,
u32
qpn
,
struct
mthca_srq
*
srq
);
...
...
drivers/infiniband/hw/mthca/mthca_eq.c
View file @
ba77df57
...
...
@@ -292,7 +292,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
case
MTHCA_EVENT_TYPE_COMP
:
disarm_cqn
=
be32_to_cpu
(
eqe
->
event
.
comp
.
cqn
)
&
0xffffff
;
disarm_cq
(
dev
,
eq
->
eqn
,
disarm_cqn
);
mthca_cq_
event
(
dev
,
disarm_cqn
);
mthca_cq_
completion
(
dev
,
disarm_cqn
);
break
;
case
MTHCA_EVENT_TYPE_PATH_MIG
:
...
...
@@ -364,6 +364,8 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
eqe
->
event
.
cq_err
.
syndrome
==
1
?
"overrun"
:
"access violation"
,
be32_to_cpu
(
eqe
->
event
.
cq_err
.
cqn
)
&
0xffffff
);
mthca_cq_event
(
dev
,
be32_to_cpu
(
eqe
->
event
.
cq_err
.
cqn
),
IB_EVENT_CQ_ERR
);
break
;
case
MTHCA_EVENT_TYPE_EQ_OVERFLOW
:
...
...
drivers/infiniband/hw/mthca/mthca_main.c
View file @
ba77df57
...
...
@@ -1057,7 +1057,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
goto
err_cmd
;
if
(
mdev
->
fw_ver
<
mthca_hca_table
[
id
->
driver_data
].
latest_fw
)
{
mthca_warn
(
mdev
,
"HCA FW version %
x.%x.%x is old (%x.%x.%x
is current).
\n
"
,
mthca_warn
(
mdev
,
"HCA FW version %
d.%d.%d is old (%d.%d.%d
is current).
\n
"
,
(
int
)
(
mdev
->
fw_ver
>>
32
),
(
int
)
(
mdev
->
fw_ver
>>
16
)
&
0xffff
,
(
int
)
(
mdev
->
fw_ver
&
0xffff
),
(
int
)
(
mthca_hca_table
[
id
->
driver_data
].
latest_fw
>>
32
),
...
...
drivers/infiniband/hw/mthca/mthca_mr.c
View file @
ba77df57
...
...
@@ -140,13 +140,11 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy
->
max_order
=
max_order
;
spin_lock_init
(
&
buddy
->
lock
);
buddy
->
bits
=
k
m
alloc
((
buddy
->
max_order
+
1
)
*
sizeof
(
long
*
),
buddy
->
bits
=
k
z
alloc
((
buddy
->
max_order
+
1
)
*
sizeof
(
long
*
),
GFP_KERNEL
);
if
(
!
buddy
->
bits
)
goto
err_out
;
memset
(
buddy
->
bits
,
0
,
(
buddy
->
max_order
+
1
)
*
sizeof
(
long
*
));
for
(
i
=
0
;
i
<=
buddy
->
max_order
;
++
i
)
{
s
=
BITS_TO_LONGS
(
1
<<
(
buddy
->
max_order
-
i
));
buddy
->
bits
[
i
]
=
kmalloc
(
s
*
sizeof
(
long
),
GFP_KERNEL
);
...
...
drivers/infiniband/hw/mthca/mthca_profile.c
View file @
ba77df57
...
...
@@ -82,12 +82,10 @@ u64 mthca_make_profile(struct mthca_dev *dev,
struct
mthca_resource
tmp
;
int
i
,
j
;
profile
=
k
m
alloc
(
MTHCA_RES_NUM
*
sizeof
*
profile
,
GFP_KERNEL
);
profile
=
k
z
alloc
(
MTHCA_RES_NUM
*
sizeof
*
profile
,
GFP_KERNEL
);
if
(
!
profile
)
return
-
ENOMEM
;
memset
(
profile
,
0
,
MTHCA_RES_NUM
*
sizeof
*
profile
);
profile
[
MTHCA_RES_QP
].
size
=
dev_lim
->
qpc_entry_sz
;
profile
[
MTHCA_RES_EEC
].
size
=
dev_lim
->
eec_entry_sz
;
profile
[
MTHCA_RES_SRQ
].
size
=
dev_lim
->
srq_entry_sz
;
...
...
drivers/infiniband/hw/mthca/mthca_provider.c
View file @
ba77df57
...
...
@@ -1028,7 +1028,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
static
ssize_t
show_fw_ver
(
struct
class_device
*
cdev
,
char
*
buf
)
{
struct
mthca_dev
*
dev
=
container_of
(
cdev
,
struct
mthca_dev
,
ib_dev
.
class_dev
);
return
sprintf
(
buf
,
"%
x.%x.%x
\n
"
,
(
int
)
(
dev
->
fw_ver
>>
32
),
return
sprintf
(
buf
,
"%
d.%d.%d
\n
"
,
(
int
)
(
dev
->
fw_ver
>>
32
),
(
int
)
(
dev
->
fw_ver
>>
16
)
&
0xffff
,
(
int
)
dev
->
fw_ver
&
0xffff
);
}
...
...
drivers/infiniband/hw/mthca/mthca_qp.c
View file @
ba77df57
...
...
@@ -584,6 +584,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return
-
EINVAL
;
}
if
((
attr_mask
&
IB_QP_PKEY_INDEX
)
&&
attr
->
pkey_index
>=
dev
->
limits
.
pkey_table_len
)
{
mthca_dbg
(
dev
,
"PKey index (%u) too large. max is %d
\n
"
,
attr
->
pkey_index
,
dev
->
limits
.
pkey_table_len
-
1
);
return
-
EINVAL
;
}
mailbox
=
mthca_alloc_mailbox
(
dev
,
GFP_KERNEL
);
if
(
IS_ERR
(
mailbox
))
return
PTR_ERR
(
mailbox
);
...
...
drivers/infiniband/hw/mthca/mthca_srq.c
View file @
ba77df57
...
...
@@ -75,15 +75,16 @@ static void *get_wqe(struct mthca_srq *srq, int n)
/*
* Return a pointer to the location within a WQE that we're using as a
* link when the WQE is in the free list. We use an offset of 4
* because in the Tavor case, posting a WQE may overwrite the first
* four bytes of the previous WQE. The offset avoids corrupting our
* free list if the WQE has already completed and been put on the free
* list when we post the next WQE.
* link when the WQE is in the free list. We use the imm field
* because in the Tavor case, posting a WQE may overwrite the next
* segment of the previous WQE, but a receive WQE will never touch the
* imm field. This avoids corrupting our free list if the previous
* WQE has already completed and been put on the free list when we
* post the next WQE.
*/
static
inline
int
*
wqe_to_link
(
void
*
wqe
)
{
return
(
int
*
)
(
wqe
+
4
);
return
(
int
*
)
(
wqe
+
offsetof
(
struct
mthca_next_seg
,
imm
)
);
}
static
void
mthca_tavor_init_srq_context
(
struct
mthca_dev
*
dev
,
...
...
drivers/infiniband/ulp/ipoib/ipoib.h
View file @
ba77df57
...
...
@@ -235,6 +235,7 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah)
kref_put
(
&
ah
->
ref
,
ipoib_free_ah
);
}
int
ipoib_open
(
struct
net_device
*
dev
);
int
ipoib_add_pkey_attr
(
struct
net_device
*
dev
);
void
ipoib_send
(
struct
net_device
*
dev
,
struct
sk_buff
*
skb
,
...
...
@@ -267,6 +268,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
void
ipoib_mcast_dev_down
(
struct
net_device
*
dev
);
void
ipoib_mcast_dev_flush
(
struct
net_device
*
dev
);
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct
ipoib_mcast_iter
*
ipoib_mcast_iter_init
(
struct
net_device
*
dev
);
void
ipoib_mcast_iter_free
(
struct
ipoib_mcast_iter
*
iter
);
int
ipoib_mcast_iter_next
(
struct
ipoib_mcast_iter
*
iter
);
...
...
@@ -276,6 +278,7 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
unsigned
int
*
queuelen
,
unsigned
int
*
complete
,
unsigned
int
*
send_only
);
#endif
int
ipoib_mcast_attach
(
struct
net_device
*
dev
,
u16
mlid
,
union
ib_gid
*
mgid
);
...
...
drivers/infiniband/ulp/ipoib/ipoib_ib.c
View file @
ba77df57
...
...
@@ -486,15 +486,16 @@ int ipoib_ib_dev_stop(struct net_device *dev)
{
struct
ipoib_dev_priv
*
priv
=
netdev_priv
(
dev
);
struct
ib_qp_attr
qp_attr
;
int
attr_mask
;
unsigned
long
begin
;
struct
ipoib_tx_buf
*
tx_req
;
int
i
;
/* Kill the existing QP and allocate a new one */
/*
* Move our QP to the error state and then reinitialize in
* when all work requests have completed or have been flushed.
*/
qp_attr
.
qp_state
=
IB_QPS_ERR
;
attr_mask
=
IB_QP_STATE
;
if
(
ib_modify_qp
(
priv
->
qp
,
&
qp_attr
,
attr_mask
))
if
(
ib_modify_qp
(
priv
->
qp
,
&
qp_attr
,
IB_QP_STATE
))
ipoib_warn
(
priv
,
"Failed to modify QP to ERROR state
\n
"
);
/* Wait for all sends and receives to complete */
...
...
@@ -541,8 +542,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
timeout:
qp_attr
.
qp_state
=
IB_QPS_RESET
;
attr_mask
=
IB_QP_STATE
;
if
(
ib_modify_qp
(
priv
->
qp
,
&
qp_attr
,
attr_mask
))
if
(
ib_modify_qp
(
priv
->
qp
,
&
qp_attr
,
IB_QP_STATE
))
ipoib_warn
(
priv
,
"Failed to modify QP to RESET state
\n
"
);
/* Wait for all AHs to be reaped */
...
...
@@ -636,7 +636,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
* Bug #2507. This implementation will probably be removed when the P_Key
* change async notification is available.
*/
int
ipoib_open
(
struct
net_device
*
dev
);
static
void
ipoib_pkey_dev_check_presence
(
struct
net_device
*
dev
)
{
...
...
drivers/infiniband/ulp/ipoib/ipoib_main.c
View file @
ba77df57
...
...
@@ -356,18 +356,15 @@ static struct ipoib_path *path_rec_create(struct net_device *dev,
struct
ipoib_dev_priv
*
priv
=
netdev_priv
(
dev
);
struct
ipoib_path
*
path
;
path
=
k
m
alloc
(
sizeof
*
path
,
GFP_ATOMIC
);
path
=
k
z
alloc
(
sizeof
*
path
,
GFP_ATOMIC
);
if
(
!
path
)
return
NULL
;
path
->
dev
=
dev
;
path
->
pathrec
.
dlid
=
0
;
path
->
ah
=
NULL
;
path
->
dev
=
dev
;
skb_queue_head_init
(
&
path
->
queue
);
INIT_LIST_HEAD
(
&
path
->
neigh_list
);
path
->
query
=
NULL
;
init_completion
(
&
path
->
done
);
memcpy
(
path
->
pathrec
.
dgid
.
raw
,
gid
->
raw
,
sizeof
(
union
ib_gid
));
...
...
@@ -551,11 +548,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct
ipoib_neigh
*
neigh
;
unsigned
long
flags
;
local_irq_save
(
flags
);
if
(
!
spin_trylock
(
&
priv
->
tx_lock
))
{
local_irq_restore
(
flags
);
if
(
!
spin_trylock_irqsave
(
&
priv
->
tx_lock
,
flags
))
return
NETDEV_TX_LOCKED
;
}
/*
* Check if our queue is stopped. Since we have the LLTX bit
...
...
@@ -732,25 +726,21 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
/* Allocate RX/TX "rings" to hold queued skbs */
priv
->
rx_ring
=
k
m
alloc
(
IPOIB_RX_RING_SIZE
*
sizeof
(
struct
ipoib_rx_buf
),
priv
->
rx_ring
=
k
z
alloc
(
IPOIB_RX_RING_SIZE
*
sizeof
(
struct
ipoib_rx_buf
),
GFP_KERNEL
);
if
(
!
priv
->
rx_ring
)
{
printk
(
KERN_WARNING
"%s: failed to allocate RX ring (%d entries)
\n
"
,
ca
->
name
,
IPOIB_RX_RING_SIZE
);
goto
out
;
}
memset
(
priv
->
rx_ring
,
0
,
IPOIB_RX_RING_SIZE
*
sizeof
(
struct
ipoib_rx_buf
));
priv
->
tx_ring
=
k
m
alloc
(
IPOIB_TX_RING_SIZE
*
sizeof
(
struct
ipoib_tx_buf
),
priv
->
tx_ring
=
k
z
alloc
(
IPOIB_TX_RING_SIZE
*
sizeof
(
struct
ipoib_tx_buf
),
GFP_KERNEL
);
if
(
!
priv
->
tx_ring
)
{
printk
(
KERN_WARNING
"%s: failed to allocate TX ring (%d entries)
\n
"
,
ca
->
name
,
IPOIB_TX_RING_SIZE
);
goto
out_rx_ring_cleanup
;
}
memset
(
priv
->
tx_ring
,
0
,
IPOIB_TX_RING_SIZE
*
sizeof
(
struct
ipoib_tx_buf
));
/* priv->tx_head & tx_tail are already 0 */
...
...
@@ -807,10 +797,6 @@ static void ipoib_setup(struct net_device *dev)
dev
->
watchdog_timeo
=
HZ
;
dev
->
rebuild_header
=
NULL
;
dev
->
set_mac_address
=
NULL
;
dev
->
header_cache_update
=
NULL
;
dev
->
flags
|=
IFF_BROADCAST
|
IFF_MULTICAST
;
/*
...
...
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
View file @
ba77df57
...
...
@@ -135,12 +135,10 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
{
struct
ipoib_mcast
*
mcast
;
mcast
=
k
malloc
(
sizeof
(
*
mcast
)
,
can_sleep
?
GFP_KERNEL
:
GFP_ATOMIC
);
mcast
=
k
zalloc
(
sizeof
*
mcast
,
can_sleep
?
GFP_KERNEL
:
GFP_ATOMIC
);
if
(
!
mcast
)
return
NULL
;
memset
(
mcast
,
0
,
sizeof
(
*
mcast
));
init_completion
(
&
mcast
->
done
);
mcast
->
dev
=
dev
;
...
...
@@ -919,6 +917,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
ipoib_mcast_start_thread
(
dev
);
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct
ipoib_mcast_iter
*
ipoib_mcast_iter_init
(
struct
net_device
*
dev
)
{
struct
ipoib_mcast_iter
*
iter
;
...
...
@@ -991,3 +991,5 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
*
complete
=
iter
->
complete
;
*
send_only
=
iter
->
send_only
;
}
#endif
/* CONFIG_INFINIBAND_IPOIB_DEBUG */
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
View file @
ba77df57
...
...
@@ -41,7 +41,6 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
{
struct
ipoib_dev_priv
*
priv
=
netdev_priv
(
dev
);
struct
ib_qp_attr
*
qp_attr
;
int
attr_mask
;
int
ret
;
u16
pkey_index
;
...
...
@@ -59,8 +58,7 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
/* set correct QKey for QP */
qp_attr
->
qkey
=
priv
->
qkey
;
attr_mask
=
IB_QP_QKEY
;
ret
=
ib_modify_qp
(
priv
->
qp
,
qp_attr
,
attr_mask
);
ret
=
ib_modify_qp
(
priv
->
qp
,
qp_attr
,
IB_QP_QKEY
);
if
(
ret
)
{
ipoib_warn
(
priv
,
"failed to modify QP, ret = %d
\n
"
,
ret
);
goto
out
;
...
...
include/rdma/ib_user_cm.h
View file @
ba77df57
...
...
@@ -38,7 +38,7 @@
#include <linux/types.h>
#define IB_USER_CM_ABI_VERSION
3
#define IB_USER_CM_ABI_VERSION
4
enum
{
IB_USER_CM_CMD_CREATE_ID
,
...
...
@@ -84,6 +84,7 @@ struct ib_ucm_create_id_resp {
struct
ib_ucm_destroy_id
{
__u64
response
;
__u32
id
;
__u32
reserved
;
};
struct
ib_ucm_destroy_id_resp
{
...
...
@@ -93,6 +94,7 @@ struct ib_ucm_destroy_id_resp {
struct
ib_ucm_attr_id
{
__u64
response
;
__u32
id
;
__u32
reserved
;
};
struct
ib_ucm_attr_id_resp
{
...
...
@@ -164,6 +166,7 @@ struct ib_ucm_listen {
__be64
service_id
;
__be64
service_mask
;
__u32
id
;
__u32
reserved
;
};
struct
ib_ucm_establish
{
...
...
@@ -219,7 +222,7 @@ struct ib_ucm_req {
__u8
rnr_retry_count
;
__u8
max_cm_retries
;
__u8
srq
;
__u8
reserved
[
1
];
__u8
reserved
[
5
];
};
struct
ib_ucm_rep
{
...
...
@@ -236,6 +239,7 @@ struct ib_ucm_rep {
__u8
flow_control
;
__u8
rnr_retry_count
;
__u8
srq
;
__u8
reserved
[
4
];
};
struct
ib_ucm_info
{
...
...
@@ -245,7 +249,7 @@ struct ib_ucm_info {
__u64
data
;
__u8
info_len
;
__u8
data_len
;
__u8
reserved
[
2
];
__u8
reserved
[
6
];
};
struct
ib_ucm_mra
{
...
...
@@ -273,6 +277,7 @@ struct ib_ucm_sidr_req {
__u16
pkey
;
__u8
len
;
__u8
max_cm_retries
;
__u8
reserved
[
4
];
};
struct
ib_ucm_sidr_rep
{
...
...
@@ -284,7 +289,7 @@ struct ib_ucm_sidr_rep {
__u64
data
;
__u8
info_len
;
__u8
data_len
;
__u8
reserved
[
2
];
__u8
reserved
[
6
];
};
/*
* event notification ABI structures.
...
...
@@ -295,7 +300,7 @@ struct ib_ucm_event_get {
__u64
info
;
__u8
data_len
;
__u8
info_len
;
__u8
reserved
[
2
];
__u8
reserved
[
6
];
};
struct
ib_ucm_req_event_resp
{
...
...
@@ -315,6 +320,7 @@ struct ib_ucm_req_event_resp {
__u8
rnr_retry_count
;
__u8
srq
;
__u8
port
;
__u8
reserved
[
7
];
};
struct
ib_ucm_rep_event_resp
{
...
...
@@ -329,7 +335,7 @@ struct ib_ucm_rep_event_resp {
__u8
flow_control
;
__u8
rnr_retry_count
;
__u8
srq
;
__u8
reserved
[
1
];
__u8
reserved
[
5
];
};
struct
ib_ucm_rej_event_resp
{
...
...
@@ -374,6 +380,7 @@ struct ib_ucm_event_resp {
__u32
id
;
__u32
event
;
__u32
present
;
__u32
reserved
;
union
{
struct
ib_ucm_req_event_resp
req_resp
;
struct
ib_ucm_rep_event_resp
rep_resp
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment