Commit ba77df57 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

parents 602d4a7e d09e3276
...@@ -155,13 +155,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num) ...@@ -155,13 +155,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
int ret; int ret;
/* Create new device info */ /* Create new device info */
port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) { if (!port_priv) {
printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }
memset(port_priv, 0, sizeof *port_priv);
/* Obtain send only MAD agent for SMI QP */ /* Obtain send only MAD agent for SMI QP */
port_priv->agent[0] = ib_register_mad_agent(device, port_num, port_priv->agent[0] = ib_register_mad_agent(device, port_num,
......
...@@ -544,11 +544,10 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, ...@@ -544,11 +544,10 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
struct cm_id_private *cm_id_priv; struct cm_id_private *cm_id_priv;
int ret; int ret;
cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL); cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
if (!cm_id_priv) if (!cm_id_priv)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
memset(cm_id_priv, 0, sizeof *cm_id_priv);
cm_id_priv->id.state = IB_CM_IDLE; cm_id_priv->id.state = IB_CM_IDLE;
cm_id_priv->id.device = device; cm_id_priv->id.device = device;
cm_id_priv->id.cm_handler = cm_handler; cm_id_priv->id.cm_handler = cm_handler;
...@@ -621,10 +620,9 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id) ...@@ -621,10 +620,9 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
{ {
struct cm_timewait_info *timewait_info; struct cm_timewait_info *timewait_info;
timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL); timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
if (!timewait_info) if (!timewait_info)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
memset(timewait_info, 0, sizeof *timewait_info);
timewait_info->work.local_id = local_id; timewait_info->work.local_id = local_id;
INIT_WORK(&timewait_info->work.work, cm_work_handler, INIT_WORK(&timewait_info->work.work, cm_work_handler,
......
...@@ -161,17 +161,9 @@ static int alloc_name(char *name) ...@@ -161,17 +161,9 @@ static int alloc_name(char *name)
*/ */
struct ib_device *ib_alloc_device(size_t size) struct ib_device *ib_alloc_device(size_t size)
{ {
void *dev;
BUG_ON(size < sizeof (struct ib_device)); BUG_ON(size < sizeof (struct ib_device));
dev = kmalloc(size, GFP_KERNEL); return kzalloc(size, GFP_KERNEL);
if (!dev)
return NULL;
memset(dev, 0, size);
return dev;
} }
EXPORT_SYMBOL(ib_alloc_device); EXPORT_SYMBOL(ib_alloc_device);
......
...@@ -255,12 +255,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -255,12 +255,11 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
} }
/* Allocate structures */ /* Allocate structures */
mad_agent_priv = kmalloc(sizeof *mad_agent_priv, GFP_KERNEL); mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
if (!mad_agent_priv) { if (!mad_agent_priv) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto error1; goto error1;
} }
memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
...@@ -448,14 +447,13 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, ...@@ -448,14 +447,13 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
goto error1; goto error1;
} }
/* Allocate structures */ /* Allocate structures */
mad_snoop_priv = kmalloc(sizeof *mad_snoop_priv, GFP_KERNEL); mad_snoop_priv = kzalloc(sizeof *mad_snoop_priv, GFP_KERNEL);
if (!mad_snoop_priv) { if (!mad_snoop_priv) {
ret = ERR_PTR(-ENOMEM); ret = ERR_PTR(-ENOMEM);
goto error1; goto error1;
} }
/* Now, fill in the various structures */ /* Now, fill in the various structures */
memset(mad_snoop_priv, 0, sizeof *mad_snoop_priv);
mad_snoop_priv->qp_info = &port_priv->qp_info[qpn]; mad_snoop_priv->qp_info = &port_priv->qp_info[qpn];
mad_snoop_priv->agent.device = device; mad_snoop_priv->agent.device = device;
mad_snoop_priv->agent.recv_handler = recv_handler; mad_snoop_priv->agent.recv_handler = recv_handler;
...@@ -794,10 +792,9 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, ...@@ -794,10 +792,9 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
(!rmpp_active && buf_size > sizeof(struct ib_mad))) (!rmpp_active && buf_size > sizeof(struct ib_mad)))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
buf = kmalloc(sizeof *mad_send_wr + buf_size, gfp_mask); buf = kzalloc(sizeof *mad_send_wr + buf_size, gfp_mask);
if (!buf) if (!buf)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
memset(buf, 0, sizeof *mad_send_wr + buf_size);
mad_send_wr = buf + buf_size; mad_send_wr = buf + buf_size;
mad_send_wr->send_buf.mad = buf; mad_send_wr->send_buf.mad = buf;
...@@ -1039,14 +1036,12 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method, ...@@ -1039,14 +1036,12 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
static int allocate_method_table(struct ib_mad_mgmt_method_table **method) static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
{ {
/* Allocate management method table */ /* Allocate management method table */
*method = kmalloc(sizeof **method, GFP_ATOMIC); *method = kzalloc(sizeof **method, GFP_ATOMIC);
if (!*method) { if (!*method) {
printk(KERN_ERR PFX "No memory for " printk(KERN_ERR PFX "No memory for "
"ib_mad_mgmt_method_table\n"); "ib_mad_mgmt_method_table\n");
return -ENOMEM; return -ENOMEM;
} }
/* Clear management method table */
memset(*method, 0, sizeof **method);
return 0; return 0;
} }
...@@ -1137,15 +1132,14 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req, ...@@ -1137,15 +1132,14 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
class = &port_priv->version[mad_reg_req->mgmt_class_version].class; class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
if (!*class) { if (!*class) {
/* Allocate management class table for "new" class version */ /* Allocate management class table for "new" class version */
*class = kmalloc(sizeof **class, GFP_ATOMIC); *class = kzalloc(sizeof **class, GFP_ATOMIC);
if (!*class) { if (!*class) {
printk(KERN_ERR PFX "No memory for " printk(KERN_ERR PFX "No memory for "
"ib_mad_mgmt_class_table\n"); "ib_mad_mgmt_class_table\n");
ret = -ENOMEM; ret = -ENOMEM;
goto error1; goto error1;
} }
/* Clear management class table */
memset(*class, 0, sizeof(**class));
/* Allocate method table for this management class */ /* Allocate method table for this management class */
method = &(*class)->method_table[mgmt_class]; method = &(*class)->method_table[mgmt_class];
if ((ret = allocate_method_table(method))) if ((ret = allocate_method_table(method)))
...@@ -1209,25 +1203,24 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, ...@@ -1209,25 +1203,24 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
mad_reg_req->mgmt_class_version].vendor; mad_reg_req->mgmt_class_version].vendor;
if (!*vendor_table) { if (!*vendor_table) {
/* Allocate mgmt vendor class table for "new" class version */ /* Allocate mgmt vendor class table for "new" class version */
vendor = kmalloc(sizeof *vendor, GFP_ATOMIC); vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
if (!vendor) { if (!vendor) {
printk(KERN_ERR PFX "No memory for " printk(KERN_ERR PFX "No memory for "
"ib_mad_mgmt_vendor_class_table\n"); "ib_mad_mgmt_vendor_class_table\n");
goto error1; goto error1;
} }
/* Clear management vendor class table */
memset(vendor, 0, sizeof(*vendor));
*vendor_table = vendor; *vendor_table = vendor;
} }
if (!(*vendor_table)->vendor_class[vclass]) { if (!(*vendor_table)->vendor_class[vclass]) {
/* Allocate table for this management vendor class */ /* Allocate table for this management vendor class */
vendor_class = kmalloc(sizeof *vendor_class, GFP_ATOMIC); vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
if (!vendor_class) { if (!vendor_class) {
printk(KERN_ERR PFX "No memory for " printk(KERN_ERR PFX "No memory for "
"ib_mad_mgmt_vendor_class\n"); "ib_mad_mgmt_vendor_class\n");
goto error2; goto error2;
} }
memset(vendor_class, 0, sizeof(*vendor_class));
(*vendor_table)->vendor_class[vclass] = vendor_class; (*vendor_table)->vendor_class[vclass] = vendor_class;
} }
for (i = 0; i < MAX_MGMT_OUI; i++) { for (i = 0; i < MAX_MGMT_OUI; i++) {
...@@ -2524,12 +2517,12 @@ static int ib_mad_port_open(struct ib_device *device, ...@@ -2524,12 +2517,12 @@ static int ib_mad_port_open(struct ib_device *device,
char name[sizeof "ib_mad123"]; char name[sizeof "ib_mad123"];
/* Create new device info */ /* Create new device info */
port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
if (!port_priv) { if (!port_priv) {
printk(KERN_ERR PFX "No memory for ib_mad_port_private\n"); printk(KERN_ERR PFX "No memory for ib_mad_port_private\n");
return -ENOMEM; return -ENOMEM;
} }
memset(port_priv, 0, sizeof *port_priv);
port_priv->device = device; port_priv->device = device;
port_priv->port_num = port_num; port_priv->port_num = port_num;
spin_lock_init(&port_priv->reg_lock); spin_lock_init(&port_priv->reg_lock);
......
...@@ -307,14 +307,13 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, ...@@ -307,14 +307,13 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
if (!p->ibdev->process_mad) if (!p->ibdev->process_mad)
return sprintf(buf, "N/A (no PMA)\n"); return sprintf(buf, "N/A (no PMA)\n");
in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL); out_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
if (!in_mad || !out_mad) { if (!in_mad || !out_mad) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
memset(in_mad, 0, sizeof *in_mad);
in_mad->mad_hdr.base_version = 1; in_mad->mad_hdr.base_version = 1;
in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT; in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT;
in_mad->mad_hdr.class_version = 1; in_mad->mad_hdr.class_version = 1;
...@@ -508,10 +507,9 @@ static int add_port(struct ib_device *device, int port_num) ...@@ -508,10 +507,9 @@ static int add_port(struct ib_device *device, int port_num)
if (ret) if (ret)
return ret; return ret;
p = kmalloc(sizeof *p, GFP_KERNEL); p = kzalloc(sizeof *p, GFP_KERNEL);
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
memset(p, 0, sizeof *p);
p->ibdev = device; p->ibdev = device;
p->port_num = port_num; p->port_num = port_num;
......
...@@ -172,11 +172,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file) ...@@ -172,11 +172,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
struct ib_ucm_context *ctx; struct ib_ucm_context *ctx;
int result; int result;
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx) if (!ctx)
return NULL; return NULL;
memset(ctx, 0, sizeof *ctx);
atomic_set(&ctx->ref, 1); atomic_set(&ctx->ref, 1);
init_waitqueue_head(&ctx->wait); init_waitqueue_head(&ctx->wait);
ctx->file = file; ctx->file = file;
...@@ -386,11 +385,10 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id, ...@@ -386,11 +385,10 @@ static int ib_ucm_event_handler(struct ib_cm_id *cm_id,
ctx = cm_id->context; ctx = cm_id->context;
uevent = kmalloc(sizeof(*uevent), GFP_KERNEL); uevent = kzalloc(sizeof *uevent, GFP_KERNEL);
if (!uevent) if (!uevent)
goto err1; goto err1;
memset(uevent, 0, sizeof(*uevent));
uevent->ctx = ctx; uevent->ctx = ctx;
uevent->cm_id = cm_id; uevent->cm_id = cm_id;
uevent->resp.uid = ctx->uid; uevent->resp.uid = ctx->uid;
...@@ -1345,11 +1343,10 @@ static void ib_ucm_add_one(struct ib_device *device) ...@@ -1345,11 +1343,10 @@ static void ib_ucm_add_one(struct ib_device *device)
if (!device->alloc_ucontext) if (!device->alloc_ucontext)
return; return;
ucm_dev = kmalloc(sizeof *ucm_dev, GFP_KERNEL); ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL);
if (!ucm_dev) if (!ucm_dev)
return; return;
memset(ucm_dev, 0, sizeof *ucm_dev);
ucm_dev->ib_dev = device; ucm_dev->ib_dev = device;
ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
......
...@@ -94,6 +94,9 @@ struct ib_umad_port { ...@@ -94,6 +94,9 @@ struct ib_umad_port {
struct class_device *sm_class_dev; struct class_device *sm_class_dev;
struct semaphore sm_sem; struct semaphore sm_sem;
struct rw_semaphore mutex;
struct list_head file_list;
struct ib_device *ib_dev; struct ib_device *ib_dev;
struct ib_umad_device *umad_dev; struct ib_umad_device *umad_dev;
int dev_num; int dev_num;
...@@ -108,10 +111,10 @@ struct ib_umad_device { ...@@ -108,10 +111,10 @@ struct ib_umad_device {
struct ib_umad_file { struct ib_umad_file {
struct ib_umad_port *port; struct ib_umad_port *port;
spinlock_t recv_lock;
struct list_head recv_list; struct list_head recv_list;
struct list_head port_list;
spinlock_t recv_lock;
wait_queue_head_t recv_wait; wait_queue_head_t recv_wait;
struct rw_semaphore agent_mutex;
struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
struct ib_mr *mr[IB_UMAD_MAX_AGENTS]; struct ib_mr *mr[IB_UMAD_MAX_AGENTS];
}; };
...@@ -148,7 +151,7 @@ static int queue_packet(struct ib_umad_file *file, ...@@ -148,7 +151,7 @@ static int queue_packet(struct ib_umad_file *file,
{ {
int ret = 1; int ret = 1;
down_read(&file->agent_mutex); down_read(&file->port->mutex);
for (packet->mad.hdr.id = 0; for (packet->mad.hdr.id = 0;
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
packet->mad.hdr.id++) packet->mad.hdr.id++)
...@@ -161,7 +164,7 @@ static int queue_packet(struct ib_umad_file *file, ...@@ -161,7 +164,7 @@ static int queue_packet(struct ib_umad_file *file,
break; break;
} }
up_read(&file->agent_mutex); up_read(&file->port->mutex);
return ret; return ret;
} }
...@@ -322,7 +325,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -322,7 +325,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto err; goto err;
} }
down_read(&file->agent_mutex); down_read(&file->port->mutex);
agent = file->agent[packet->mad.hdr.id]; agent = file->agent[packet->mad.hdr.id];
if (!agent) { if (!agent) {
...@@ -419,7 +422,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -419,7 +422,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
if (ret) if (ret)
goto err_msg; goto err_msg;
up_read(&file->agent_mutex); up_read(&file->port->mutex);
return count; return count;
...@@ -430,7 +433,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ...@@ -430,7 +433,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
ib_destroy_ah(ah); ib_destroy_ah(ah);
err_up: err_up:
up_read(&file->agent_mutex); up_read(&file->port->mutex);
err: err:
kfree(packet); kfree(packet);
...@@ -460,7 +463,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg) ...@@ -460,7 +463,12 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
int agent_id; int agent_id;
int ret; int ret;
down_write(&file->agent_mutex); down_write(&file->port->mutex);
if (!file->port->ib_dev) {
ret = -EPIPE;
goto out;
}
if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) { if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {
ret = -EFAULT; ret = -EFAULT;
...@@ -522,7 +530,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg) ...@@ -522,7 +530,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
ib_unregister_mad_agent(agent); ib_unregister_mad_agent(agent);
out: out:
up_write(&file->agent_mutex); up_write(&file->port->mutex);
return ret; return ret;
} }
...@@ -531,7 +539,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg) ...@@ -531,7 +539,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
u32 id; u32 id;
int ret = 0; int ret = 0;
down_write(&file->agent_mutex); down_write(&file->port->mutex);
if (get_user(id, (u32 __user *) arg)) { if (get_user(id, (u32 __user *) arg)) {
ret = -EFAULT; ret = -EFAULT;
...@@ -548,7 +556,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg) ...@@ -548,7 +556,7 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
file->agent[id] = NULL; file->agent[id] = NULL;
out: out:
up_write(&file->agent_mutex); up_write(&file->port->mutex);
return ret; return ret;
} }
...@@ -569,6 +577,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp) ...@@ -569,6 +577,7 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
{ {
struct ib_umad_port *port; struct ib_umad_port *port;
struct ib_umad_file *file; struct ib_umad_file *file;
int ret = 0;
spin_lock(&port_lock); spin_lock(&port_lock);
port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE]; port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
...@@ -579,21 +588,32 @@ static int ib_umad_open(struct inode *inode, struct file *filp) ...@@ -579,21 +588,32 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
if (!port) if (!port)
return -ENXIO; return -ENXIO;
down_write(&port->mutex);
if (!port->ib_dev) {
ret = -ENXIO;
goto out;
}
file = kzalloc(sizeof *file, GFP_KERNEL); file = kzalloc(sizeof *file, GFP_KERNEL);
if (!file) { if (!file) {
kref_put(&port->umad_dev->ref, ib_umad_release_dev); kref_put(&port->umad_dev->ref, ib_umad_release_dev);
return -ENOMEM; ret = -ENOMEM;
goto out;
} }
spin_lock_init(&file->recv_lock); spin_lock_init(&file->recv_lock);
init_rwsem(&file->agent_mutex);
INIT_LIST_HEAD(&file->recv_list); INIT_LIST_HEAD(&file->recv_list);
init_waitqueue_head(&file->recv_wait); init_waitqueue_head(&file->recv_wait);
file->port = port; file->port = port;
filp->private_data = file; filp->private_data = file;
return 0; list_add_tail(&file->port_list, &port->file_list);
out:
up_write(&port->mutex);
return ret;
} }
static int ib_umad_close(struct inode *inode, struct file *filp) static int ib_umad_close(struct inode *inode, struct file *filp)
...@@ -603,6 +623,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp) ...@@ -603,6 +623,7 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
struct ib_umad_packet *packet, *tmp; struct ib_umad_packet *packet, *tmp;
int i; int i;
down_write(&file->port->mutex);
for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
if (file->agent[i]) { if (file->agent[i]) {
ib_dereg_mr(file->mr[i]); ib_dereg_mr(file->mr[i]);
...@@ -612,6 +633,9 @@ static int ib_umad_close(struct inode *inode, struct file *filp) ...@@ -612,6 +633,9 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
list_for_each_entry_safe(packet, tmp, &file->recv_list, list) list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
kfree(packet); kfree(packet);
list_del(&file->port_list);
up_write(&file->port->mutex);
kfree(file); kfree(file);
kref_put(&dev->ref, ib_umad_release_dev); kref_put(&dev->ref, ib_umad_release_dev);
...@@ -680,9 +704,13 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp) ...@@ -680,9 +704,13 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
struct ib_port_modify props = { struct ib_port_modify props = {
.clr_port_cap_mask = IB_PORT_SM .clr_port_cap_mask = IB_PORT_SM
}; };
int ret; int ret = 0;
down_write(&port->mutex);
if (port->ib_dev)
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
up_write(&port->mutex);
ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
up(&port->sm_sem); up(&port->sm_sem);
kref_put(&port->umad_dev->ref, ib_umad_release_dev); kref_put(&port->umad_dev->ref, ib_umad_release_dev);
...@@ -745,6 +773,8 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, ...@@ -745,6 +773,8 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
port->ib_dev = device; port->ib_dev = device;
port->port_num = port_num; port->port_num = port_num;
init_MUTEX(&port->sm_sem); init_MUTEX(&port->sm_sem);
init_rwsem(&port->mutex);
INIT_LIST_HEAD(&port->file_list);
port->dev = cdev_alloc(); port->dev = cdev_alloc();
if (!port->dev) if (!port->dev)
...@@ -813,6 +843,9 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, ...@@ -813,6 +843,9 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
static void ib_umad_kill_port(struct ib_umad_port *port) static void ib_umad_kill_port(struct ib_umad_port *port)
{ {
struct ib_umad_file *file;
int id;
class_set_devdata(port->class_dev, NULL); class_set_devdata(port->class_dev, NULL);
class_set_devdata(port->sm_class_dev, NULL); class_set_devdata(port->sm_class_dev, NULL);
...@@ -826,6 +859,21 @@ static void ib_umad_kill_port(struct ib_umad_port *port) ...@@ -826,6 +859,21 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
umad_port[port->dev_num] = NULL; umad_port[port->dev_num] = NULL;
spin_unlock(&port_lock); spin_unlock(&port_lock);
down_write(&port->mutex);
port->ib_dev = NULL;
list_for_each_entry(file, &port->file_list, port_list)
for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) {
if (!file->agent[id])
continue;
ib_dereg_mr(file->mr[id]);
ib_unregister_mad_agent(file->agent[id]);
file->agent[id] = NULL;
}
up_write(&port->mutex);
clear_bit(port->dev_num, dev_map); clear_bit(port->dev_num, dev_map);
} }
......
...@@ -113,6 +113,7 @@ struct ib_uevent_object { ...@@ -113,6 +113,7 @@ struct ib_uevent_object {
struct ib_ucq_object { struct ib_ucq_object {
struct ib_uobject uobject; struct ib_uobject uobject;
struct ib_uverbs_file *uverbs_file;
struct list_head comp_list; struct list_head comp_list;
struct list_head async_list; struct list_head async_list;
u32 comp_events_reported; u32 comp_events_reported;
......
...@@ -602,6 +602,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file, ...@@ -602,6 +602,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
uobj->uobject.user_handle = cmd.user_handle; uobj->uobject.user_handle = cmd.user_handle;
uobj->uobject.context = file->ucontext; uobj->uobject.context = file->ucontext;
uobj->uverbs_file = file;
uobj->comp_events_reported = 0; uobj->comp_events_reported = 0;
uobj->async_events_reported = 0; uobj->async_events_reported = 0;
INIT_LIST_HEAD(&uobj->comp_list); INIT_LIST_HEAD(&uobj->comp_list);
......
...@@ -442,13 +442,10 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file, ...@@ -442,13 +442,10 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr) void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
{ {
struct ib_uverbs_event_file *ev_file = context_ptr; struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
struct ib_ucq_object *uobj; struct ib_ucq_object, uobject);
uobj = container_of(event->element.cq->uobject, ib_uverbs_async_handler(uobj->uverbs_file, uobj->uobject.user_handle,
struct ib_ucq_object, uobject);
ib_uverbs_async_handler(ev_file->uverbs_file, uobj->uobject.user_handle,
event->event, &uobj->async_list, event->event, &uobj->async_list,
&uobj->async_events_reported); &uobj->async_events_reported);
...@@ -728,12 +725,10 @@ static void ib_uverbs_add_one(struct ib_device *device) ...@@ -728,12 +725,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (!device->alloc_ucontext) if (!device->alloc_ucontext)
return; return;
uverbs_dev = kmalloc(sizeof *uverbs_dev, GFP_KERNEL); uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL);
if (!uverbs_dev) if (!uverbs_dev)
return; return;
memset(uverbs_dev, 0, sizeof *uverbs_dev);
kref_init(&uverbs_dev->ref); kref_init(&uverbs_dev->ref);
spin_lock(&map_lock); spin_lock(&map_lock);
......
...@@ -208,7 +208,7 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, ...@@ -208,7 +208,7 @@ static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq,
} }
} }
void mthca_cq_event(struct mthca_dev *dev, u32 cqn) void mthca_cq_completion(struct mthca_dev *dev, u32 cqn)
{ {
struct mthca_cq *cq; struct mthca_cq *cq;
...@@ -224,6 +224,35 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn) ...@@ -224,6 +224,35 @@ void mthca_cq_event(struct mthca_dev *dev, u32 cqn)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
} }
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
enum ib_event_type event_type)
{
struct mthca_cq *cq;
struct ib_event event;
spin_lock(&dev->cq_table.lock);
cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1));
if (cq)
atomic_inc(&cq->refcount);
spin_unlock(&dev->cq_table.lock);
if (!cq) {
mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
event.device = &dev->ib_dev;
event.event = event_type;
event.element.cq = &cq->ibcq;
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&event, cq->ibcq.cq_context);
if (atomic_dec_and_test(&cq->refcount))
wake_up(&cq->wait);
}
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq) struct mthca_srq *srq)
{ {
......
...@@ -460,7 +460,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent, ...@@ -460,7 +460,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
struct mthca_cq *cq); struct mthca_cq *cq);
void mthca_free_cq(struct mthca_dev *dev, void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq); struct mthca_cq *cq);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn); void mthca_cq_completion(struct mthca_dev *dev, u32 cqn);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn,
enum ib_event_type event_type);
void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
struct mthca_srq *srq); struct mthca_srq *srq);
......
...@@ -292,7 +292,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) ...@@ -292,7 +292,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
case MTHCA_EVENT_TYPE_COMP: case MTHCA_EVENT_TYPE_COMP:
disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
disarm_cq(dev, eq->eqn, disarm_cqn); disarm_cq(dev, eq->eqn, disarm_cqn);
mthca_cq_event(dev, disarm_cqn); mthca_cq_completion(dev, disarm_cqn);
break; break;
case MTHCA_EVENT_TYPE_PATH_MIG: case MTHCA_EVENT_TYPE_PATH_MIG:
...@@ -364,6 +364,8 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) ...@@ -364,6 +364,8 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
eqe->event.cq_err.syndrome == 1 ? eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation", "overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
IB_EVENT_CQ_ERR);
break; break;
case MTHCA_EVENT_TYPE_EQ_OVERFLOW: case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
......
...@@ -1057,7 +1057,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev, ...@@ -1057,7 +1057,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,
goto err_cmd; goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) { if (mdev->fw_ver < mthca_hca_table[id->driver_data].latest_fw) {
mthca_warn(mdev, "HCA FW version %x.%x.%x is old (%x.%x.%x is current).\n", mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff), (int) (mdev->fw_ver & 0xffff),
(int) (mthca_hca_table[id->driver_data].latest_fw >> 32), (int) (mthca_hca_table[id->driver_data].latest_fw >> 32),
......
...@@ -140,13 +140,11 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order) ...@@ -140,13 +140,11 @@ static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->max_order = max_order; buddy->max_order = max_order;
spin_lock_init(&buddy->lock); spin_lock_init(&buddy->lock);
buddy->bits = kmalloc((buddy->max_order + 1) * sizeof (long *), buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL); GFP_KERNEL);
if (!buddy->bits) if (!buddy->bits)
goto err_out; goto err_out;
memset(buddy->bits, 0, (buddy->max_order + 1) * sizeof (long *));
for (i = 0; i <= buddy->max_order; ++i) { for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i)); s = BITS_TO_LONGS(1 << (buddy->max_order - i));
buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
......
...@@ -82,12 +82,10 @@ u64 mthca_make_profile(struct mthca_dev *dev, ...@@ -82,12 +82,10 @@ u64 mthca_make_profile(struct mthca_dev *dev,
struct mthca_resource tmp; struct mthca_resource tmp;
int i, j; int i, j;
profile = kmalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL); profile = kzalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL);
if (!profile) if (!profile)
return -ENOMEM; return -ENOMEM;
memset(profile, 0, MTHCA_RES_NUM * sizeof *profile);
profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz; profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz;
profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz; profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz;
profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz; profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz;
......
...@@ -1028,7 +1028,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf) ...@@ -1028,7 +1028,7 @@ static ssize_t show_rev(struct class_device *cdev, char *buf)
static ssize_t show_fw_ver(struct class_device *cdev, char *buf) static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
{ {
struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev); struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
return sprintf(buf, "%x.%x.%x\n", (int) (dev->fw_ver >> 32), return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
(int) (dev->fw_ver >> 16) & 0xffff, (int) (dev->fw_ver >> 16) & 0xffff,
(int) dev->fw_ver & 0xffff); (int) dev->fw_ver & 0xffff);
} }
......
...@@ -584,6 +584,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -584,6 +584,13 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return -EINVAL; return -EINVAL;
} }
if ((attr_mask & IB_QP_PKEY_INDEX) &&
attr->pkey_index >= dev->limits.pkey_table_len) {
mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",
attr->pkey_index,dev->limits.pkey_table_len-1);
return -EINVAL;
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) if (IS_ERR(mailbox))
return PTR_ERR(mailbox); return PTR_ERR(mailbox);
......
...@@ -75,15 +75,16 @@ static void *get_wqe(struct mthca_srq *srq, int n) ...@@ -75,15 +75,16 @@ static void *get_wqe(struct mthca_srq *srq, int n)
/* /*
* Return a pointer to the location within a WQE that we're using as a * Return a pointer to the location within a WQE that we're using as a
* link when the WQE is in the free list. We use an offset of 4 * link when the WQE is in the free list. We use the imm field
* because in the Tavor case, posting a WQE may overwrite the first * because in the Tavor case, posting a WQE may overwrite the next
* four bytes of the previous WQE. The offset avoids corrupting our * segment of the previous WQE, but a receive WQE will never touch the
* free list if the WQE has already completed and been put on the free * imm field. This avoids corrupting our free list if the previous
* list when we post the next WQE. * WQE has already completed and been put on the free list when we
* post the next WQE.
*/ */
static inline int *wqe_to_link(void *wqe) static inline int *wqe_to_link(void *wqe)
{ {
return (int *) (wqe + 4); return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
} }
static void mthca_tavor_init_srq_context(struct mthca_dev *dev, static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
......
...@@ -235,6 +235,7 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah) ...@@ -235,6 +235,7 @@ static inline void ipoib_put_ah(struct ipoib_ah *ah)
kref_put(&ah->ref, ipoib_free_ah); kref_put(&ah->ref, ipoib_free_ah);
} }
int ipoib_open(struct net_device *dev);
int ipoib_add_pkey_attr(struct net_device *dev); int ipoib_add_pkey_attr(struct net_device *dev);
void ipoib_send(struct net_device *dev, struct sk_buff *skb, void ipoib_send(struct net_device *dev, struct sk_buff *skb,
...@@ -267,6 +268,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush); ...@@ -267,6 +268,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush);
void ipoib_mcast_dev_down(struct net_device *dev); void ipoib_mcast_dev_down(struct net_device *dev);
void ipoib_mcast_dev_flush(struct net_device *dev); void ipoib_mcast_dev_flush(struct net_device *dev);
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev); struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter); void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter);
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter); int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
...@@ -276,6 +278,7 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, ...@@ -276,6 +278,7 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
unsigned int *queuelen, unsigned int *queuelen,
unsigned int *complete, unsigned int *complete,
unsigned int *send_only); unsigned int *send_only);
#endif
int ipoib_mcast_attach(struct net_device *dev, u16 mlid, int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
union ib_gid *mgid); union ib_gid *mgid);
......
...@@ -486,15 +486,16 @@ int ipoib_ib_dev_stop(struct net_device *dev) ...@@ -486,15 +486,16 @@ int ipoib_ib_dev_stop(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr; struct ib_qp_attr qp_attr;
int attr_mask;
unsigned long begin; unsigned long begin;
struct ipoib_tx_buf *tx_req; struct ipoib_tx_buf *tx_req;
int i; int i;
/* Kill the existing QP and allocate a new one */ /*
* Move our QP to the error state and then reinitialize in
* when all work requests have completed or have been flushed.
*/
qp_attr.qp_state = IB_QPS_ERR; qp_attr.qp_state = IB_QPS_ERR;
attr_mask = IB_QP_STATE; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
/* Wait for all sends and receives to complete */ /* Wait for all sends and receives to complete */
...@@ -541,8 +542,7 @@ int ipoib_ib_dev_stop(struct net_device *dev) ...@@ -541,8 +542,7 @@ int ipoib_ib_dev_stop(struct net_device *dev)
timeout: timeout:
qp_attr.qp_state = IB_QPS_RESET; qp_attr.qp_state = IB_QPS_RESET;
attr_mask = IB_QP_STATE; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
ipoib_warn(priv, "Failed to modify QP to RESET state\n"); ipoib_warn(priv, "Failed to modify QP to RESET state\n");
/* Wait for all AHs to be reaped */ /* Wait for all AHs to be reaped */
...@@ -636,7 +636,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) ...@@ -636,7 +636,6 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
* Bug #2507. This implementation will probably be removed when the P_Key * Bug #2507. This implementation will probably be removed when the P_Key
* change async notification is available. * change async notification is available.
*/ */
int ipoib_open(struct net_device *dev);
static void ipoib_pkey_dev_check_presence(struct net_device *dev) static void ipoib_pkey_dev_check_presence(struct net_device *dev)
{ {
......
...@@ -356,18 +356,15 @@ static struct ipoib_path *path_rec_create(struct net_device *dev, ...@@ -356,18 +356,15 @@ static struct ipoib_path *path_rec_create(struct net_device *dev,
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path; struct ipoib_path *path;
path = kmalloc(sizeof *path, GFP_ATOMIC); path = kzalloc(sizeof *path, GFP_ATOMIC);
if (!path) if (!path)
return NULL; return NULL;
path->dev = dev; path->dev = dev;
path->pathrec.dlid = 0;
path->ah = NULL;
skb_queue_head_init(&path->queue); skb_queue_head_init(&path->queue);
INIT_LIST_HEAD(&path->neigh_list); INIT_LIST_HEAD(&path->neigh_list);
path->query = NULL;
init_completion(&path->done); init_completion(&path->done);
memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid)); memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
...@@ -551,11 +548,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -551,11 +548,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipoib_neigh *neigh; struct ipoib_neigh *neigh;
unsigned long flags; unsigned long flags;
local_irq_save(flags); if (!spin_trylock_irqsave(&priv->tx_lock, flags))
if (!spin_trylock(&priv->tx_lock)) {
local_irq_restore(flags);
return NETDEV_TX_LOCKED; return NETDEV_TX_LOCKED;
}
/* /*
* Check if our queue is stopped. Since we have the LLTX bit * Check if our queue is stopped. Since we have the LLTX bit
...@@ -732,25 +726,21 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port) ...@@ -732,25 +726,21 @@ int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
/* Allocate RX/TX "rings" to hold queued skbs */ /* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf), priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
GFP_KERNEL); GFP_KERNEL);
if (!priv->rx_ring) { if (!priv->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n", printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
ca->name, IPOIB_RX_RING_SIZE); ca->name, IPOIB_RX_RING_SIZE);
goto out; goto out;
} }
memset(priv->rx_ring, 0,
IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf));
priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf), priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
GFP_KERNEL); GFP_KERNEL);
if (!priv->tx_ring) { if (!priv->tx_ring) {
printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n", printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
ca->name, IPOIB_TX_RING_SIZE); ca->name, IPOIB_TX_RING_SIZE);
goto out_rx_ring_cleanup; goto out_rx_ring_cleanup;
} }
memset(priv->tx_ring, 0,
IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf));
/* priv->tx_head & tx_tail are already 0 */ /* priv->tx_head & tx_tail are already 0 */
...@@ -807,10 +797,6 @@ static void ipoib_setup(struct net_device *dev) ...@@ -807,10 +797,6 @@ static void ipoib_setup(struct net_device *dev)
dev->watchdog_timeo = HZ; dev->watchdog_timeo = HZ;
dev->rebuild_header = NULL;
dev->set_mac_address = NULL;
dev->header_cache_update = NULL;
dev->flags |= IFF_BROADCAST | IFF_MULTICAST; dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
/* /*
......
...@@ -135,12 +135,10 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev, ...@@ -135,12 +135,10 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
{ {
struct ipoib_mcast *mcast; struct ipoib_mcast *mcast;
mcast = kmalloc(sizeof (*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC); mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
if (!mcast) if (!mcast)
return NULL; return NULL;
memset(mcast, 0, sizeof (*mcast));
init_completion(&mcast->done); init_completion(&mcast->done);
mcast->dev = dev; mcast->dev = dev;
...@@ -919,6 +917,8 @@ void ipoib_mcast_restart_task(void *dev_ptr) ...@@ -919,6 +917,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
ipoib_mcast_start_thread(dev); ipoib_mcast_start_thread(dev);
} }
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev) struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
{ {
struct ipoib_mcast_iter *iter; struct ipoib_mcast_iter *iter;
...@@ -991,3 +991,5 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter, ...@@ -991,3 +991,5 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
*complete = iter->complete; *complete = iter->complete;
*send_only = iter->send_only; *send_only = iter->send_only;
} }
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
...@@ -41,7 +41,6 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid) ...@@ -41,7 +41,6 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr *qp_attr; struct ib_qp_attr *qp_attr;
int attr_mask;
int ret; int ret;
u16 pkey_index; u16 pkey_index;
...@@ -59,8 +58,7 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid) ...@@ -59,8 +58,7 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
/* set correct QKey for QP */ /* set correct QKey for QP */
qp_attr->qkey = priv->qkey; qp_attr->qkey = priv->qkey;
attr_mask = IB_QP_QKEY; ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
ret = ib_modify_qp(priv->qp, qp_attr, attr_mask);
if (ret) { if (ret) {
ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret); ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
goto out; goto out;
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#include <linux/types.h> #include <linux/types.h>
#define IB_USER_CM_ABI_VERSION 3 #define IB_USER_CM_ABI_VERSION 4
enum { enum {
IB_USER_CM_CMD_CREATE_ID, IB_USER_CM_CMD_CREATE_ID,
...@@ -84,6 +84,7 @@ struct ib_ucm_create_id_resp { ...@@ -84,6 +84,7 @@ struct ib_ucm_create_id_resp {
struct ib_ucm_destroy_id { struct ib_ucm_destroy_id {
__u64 response; __u64 response;
__u32 id; __u32 id;
__u32 reserved;
}; };
struct ib_ucm_destroy_id_resp { struct ib_ucm_destroy_id_resp {
...@@ -93,6 +94,7 @@ struct ib_ucm_destroy_id_resp { ...@@ -93,6 +94,7 @@ struct ib_ucm_destroy_id_resp {
struct ib_ucm_attr_id { struct ib_ucm_attr_id {
__u64 response; __u64 response;
__u32 id; __u32 id;
__u32 reserved;
}; };
struct ib_ucm_attr_id_resp { struct ib_ucm_attr_id_resp {
...@@ -164,6 +166,7 @@ struct ib_ucm_listen { ...@@ -164,6 +166,7 @@ struct ib_ucm_listen {
__be64 service_id; __be64 service_id;
__be64 service_mask; __be64 service_mask;
__u32 id; __u32 id;
__u32 reserved;
}; };
struct ib_ucm_establish { struct ib_ucm_establish {
...@@ -219,7 +222,7 @@ struct ib_ucm_req { ...@@ -219,7 +222,7 @@ struct ib_ucm_req {
__u8 rnr_retry_count; __u8 rnr_retry_count;
__u8 max_cm_retries; __u8 max_cm_retries;
__u8 srq; __u8 srq;
__u8 reserved[1]; __u8 reserved[5];
}; };
struct ib_ucm_rep { struct ib_ucm_rep {
...@@ -236,6 +239,7 @@ struct ib_ucm_rep { ...@@ -236,6 +239,7 @@ struct ib_ucm_rep {
__u8 flow_control; __u8 flow_control;
__u8 rnr_retry_count; __u8 rnr_retry_count;
__u8 srq; __u8 srq;
__u8 reserved[4];
}; };
struct ib_ucm_info { struct ib_ucm_info {
...@@ -245,7 +249,7 @@ struct ib_ucm_info { ...@@ -245,7 +249,7 @@ struct ib_ucm_info {
__u64 data; __u64 data;
__u8 info_len; __u8 info_len;
__u8 data_len; __u8 data_len;
__u8 reserved[2]; __u8 reserved[6];
}; };
struct ib_ucm_mra { struct ib_ucm_mra {
...@@ -273,6 +277,7 @@ struct ib_ucm_sidr_req { ...@@ -273,6 +277,7 @@ struct ib_ucm_sidr_req {
__u16 pkey; __u16 pkey;
__u8 len; __u8 len;
__u8 max_cm_retries; __u8 max_cm_retries;
__u8 reserved[4];
}; };
struct ib_ucm_sidr_rep { struct ib_ucm_sidr_rep {
...@@ -284,7 +289,7 @@ struct ib_ucm_sidr_rep { ...@@ -284,7 +289,7 @@ struct ib_ucm_sidr_rep {
__u64 data; __u64 data;
__u8 info_len; __u8 info_len;
__u8 data_len; __u8 data_len;
__u8 reserved[2]; __u8 reserved[6];
}; };
/* /*
* event notification ABI structures. * event notification ABI structures.
...@@ -295,7 +300,7 @@ struct ib_ucm_event_get { ...@@ -295,7 +300,7 @@ struct ib_ucm_event_get {
__u64 info; __u64 info;
__u8 data_len; __u8 data_len;
__u8 info_len; __u8 info_len;
__u8 reserved[2]; __u8 reserved[6];
}; };
struct ib_ucm_req_event_resp { struct ib_ucm_req_event_resp {
...@@ -315,6 +320,7 @@ struct ib_ucm_req_event_resp { ...@@ -315,6 +320,7 @@ struct ib_ucm_req_event_resp {
__u8 rnr_retry_count; __u8 rnr_retry_count;
__u8 srq; __u8 srq;
__u8 port; __u8 port;
__u8 reserved[7];
}; };
struct ib_ucm_rep_event_resp { struct ib_ucm_rep_event_resp {
...@@ -329,7 +335,7 @@ struct ib_ucm_rep_event_resp { ...@@ -329,7 +335,7 @@ struct ib_ucm_rep_event_resp {
__u8 flow_control; __u8 flow_control;
__u8 rnr_retry_count; __u8 rnr_retry_count;
__u8 srq; __u8 srq;
__u8 reserved[1]; __u8 reserved[5];
}; };
struct ib_ucm_rej_event_resp { struct ib_ucm_rej_event_resp {
...@@ -374,6 +380,7 @@ struct ib_ucm_event_resp { ...@@ -374,6 +380,7 @@ struct ib_ucm_event_resp {
__u32 id; __u32 id;
__u32 event; __u32 event;
__u32 present; __u32 present;
__u32 reserved;
union { union {
struct ib_ucm_req_event_resp req_resp; struct ib_ucm_req_event_resp req_resp;
struct ib_ucm_rep_event_resp rep_resp; struct ib_ucm_rep_event_resp rep_resp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment