Commit f3b8436a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/ehca: Use consistent types for ehca_plpar_hcall9()
  IB/ehca: Fix printk format warnings from u64 type change
  IPoIB: Do not print error messages for multicast join retries
  IB/mlx4: Fix memory ordering problem when posting LSO sends
  mlx4_core: Fix min() warning
  IPoIB: Fix deadlock between ipoib_open() and child interface create
  IPoIB: Fix hang in napi_disable() if P_Key is never found
parents 0bac038a ac8581d4
......@@ -196,7 +196,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
if (h_ret != H_SUCCESS) {
ehca_err(device, "hipz_h_alloc_resource_cq() failed "
"h_ret=%li device=%p", h_ret, device);
"h_ret=%lli device=%p", h_ret, device);
cq = ERR_PTR(ehca2ib_return_code(h_ret));
goto create_cq_exit2;
}
......@@ -232,7 +232,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
if (h_ret < H_SUCCESS) {
ehca_err(device, "hipz_h_register_rpage_cq() failed "
"ehca_cq=%p cq_num=%x h_ret=%li counter=%i "
"ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
"act_pages=%i", my_cq, my_cq->cq_number,
h_ret, counter, param.act_pages);
cq = ERR_PTR(-EINVAL);
......@@ -244,7 +244,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
if ((h_ret != H_SUCCESS) || vpage) {
ehca_err(device, "Registration of pages not "
"complete ehca_cq=%p cq_num=%x "
"h_ret=%li", my_cq, my_cq->cq_number,
"h_ret=%lli", my_cq, my_cq->cq_number,
h_ret);
cq = ERR_PTR(-EAGAIN);
goto create_cq_exit4;
......@@ -252,7 +252,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
} else {
if (h_ret != H_PAGE_REGISTERED) {
ehca_err(device, "Registration of page failed "
"ehca_cq=%p cq_num=%x h_ret=%li "
"ehca_cq=%p cq_num=%x h_ret=%lli "
"counter=%i act_pages=%i",
my_cq, my_cq->cq_number,
h_ret, counter, param.act_pages);
......@@ -266,7 +266,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
gal = my_cq->galpas.kernel;
cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx",
ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
my_cq, my_cq->cq_number, cqx_fec);
my_cq->ib_cq.cqe = my_cq->nr_of_entries =
......@@ -307,7 +307,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
if (h_ret != H_SUCCESS)
ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
"cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret);
"cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
create_cq_exit2:
write_lock_irqsave(&ehca_cq_idr_lock, flags);
......@@ -355,7 +355,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
if (h_ret == H_R_STATE) {
/* cq in err: read err data and destroy it forcibly */
ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err "
ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
"state. Try to delete it forcibly.",
my_cq, cq_num, my_cq->ipz_cq_handle.handle);
ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
......@@ -365,7 +365,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
cq_num);
}
if (h_ret != H_SUCCESS) {
ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li "
ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
"ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
return ehca2ib_return_code(h_ret);
}
......
......@@ -393,7 +393,7 @@ int ehca_modify_port(struct ib_device *ibdev,
hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
cap, props->init_type, port_modify_mask);
if (hret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Modify port failed h_ret=%li",
ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli",
hret);
ret = -EINVAL;
}
......
......@@ -99,7 +99,7 @@ static void print_error_data(struct ehca_shca *shca, void *data,
return;
ehca_err(&shca->ib_device,
"QP 0x%x (resource=%lx) has errors.",
"QP 0x%x (resource=%llx) has errors.",
qp->ib_qp.qp_num, resource);
break;
}
......@@ -108,21 +108,21 @@ static void print_error_data(struct ehca_shca *shca, void *data,
struct ehca_cq *cq = (struct ehca_cq *)data;
ehca_err(&shca->ib_device,
"CQ 0x%x (resource=%lx) has errors.",
"CQ 0x%x (resource=%llx) has errors.",
cq->cq_number, resource);
break;
}
default:
ehca_err(&shca->ib_device,
"Unknown error type: %lx on %s.",
"Unknown error type: %llx on %s.",
type, shca->ib_device.name);
break;
}
ehca_err(&shca->ib_device, "Error data is available: %lx.", resource);
ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
ehca_err(&shca->ib_device, "EHCA ----- error data begin "
"---------------------------------------------------");
ehca_dmp(rblock, length, "resource=%lx", resource);
ehca_dmp(rblock, length, "resource=%llx", resource);
ehca_err(&shca->ib_device, "EHCA ----- error data end "
"----------------------------------------------------");
......@@ -152,7 +152,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
if (ret == H_R_STATE)
ehca_err(&shca->ib_device,
"No error data is available: %lx.", resource);
"No error data is available: %llx.", resource);
else if (ret == H_SUCCESS) {
int length;
......@@ -164,7 +164,7 @@ int ehca_error_data(struct ehca_shca *shca, void *data,
print_error_data(shca, data, rblock, length);
} else
ehca_err(&shca->ib_device,
"Error data could not be fetched: %lx", resource);
"Error data could not be fetched: %llx", resource);
ehca_free_fw_ctrlblock(rblock);
......@@ -514,7 +514,7 @@ static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
struct ehca_cq *cq;
eqe_value = eqe->entry;
ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
ehca_dbg(&shca->ib_device, "Got completion event");
token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
......@@ -603,7 +603,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
ret = hipz_h_eoi(eq->ist);
if (ret != H_SUCCESS)
ehca_err(&shca->ib_device,
"bad return code EOI -rc = %ld\n", ret);
"bad return code EOI -rc = %lld\n", ret);
ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
}
if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
......
......@@ -304,7 +304,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
if (h_ret != H_SUCCESS) {
ehca_gen_err("Cannot query device properties. h_ret=%li",
ehca_gen_err("Cannot query device properties. h_ret=%lli",
h_ret);
ret = -EPERM;
goto sense_attributes1;
......@@ -391,7 +391,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
port = (struct hipz_query_port *)rblock;
h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
if (h_ret != H_SUCCESS) {
ehca_gen_err("Cannot query port properties. h_ret=%li",
ehca_gen_err("Cannot query port properties. h_ret=%lli",
h_ret);
ret = -EPERM;
goto sense_attributes1;
......@@ -682,7 +682,7 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
{
struct ehca_shca *shca = dev->driver_data;
return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle);
return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
}
static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
......
......@@ -88,7 +88,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (h_ret != H_SUCCESS)
ehca_err(ibqp->device,
"ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
"h_ret=%li", my_qp, ibqp->qp_num, h_ret);
"h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
......@@ -125,7 +125,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (h_ret != H_SUCCESS)
ehca_err(ibqp->device,
"ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
"h_ret=%li", my_qp, ibqp->qp_num, h_ret);
"h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
This diff is collapsed.
......@@ -331,7 +331,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
if (cnt == (nr_q_pages - 1)) { /* last page! */
if (h_ret != expected_hret) {
ehca_err(ib_dev, "hipz_qp_register_rpage() "
"h_ret=%li", h_ret);
"h_ret=%lli", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queue1;
}
......@@ -345,7 +345,7 @@ static inline int init_qp_queue(struct ehca_shca *shca,
} else {
if (h_ret != H_PAGE_REGISTERED) {
ehca_err(ib_dev, "hipz_qp_register_rpage() "
"h_ret=%li", h_ret);
"h_ret=%lli", h_ret);
ret = ehca2ib_return_code(h_ret);
goto init_qp_queue1;
}
......@@ -709,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
if (h_ret != H_SUCCESS) {
ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li",
ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
h_ret);
ret = ehca2ib_return_code(h_ret);
goto create_qp_exit1;
......@@ -1010,7 +1010,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not modify SRQ to INIT "
"ehca_qp=%p qp_num=%x h_ret=%li",
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
......@@ -1024,7 +1024,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not enable SRQ "
"ehca_qp=%p qp_num=%x h_ret=%li",
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
......@@ -1038,7 +1038,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
mqpcb, my_qp->galpas.kernel);
if (hret != H_SUCCESS) {
ehca_err(pd->device, "Could not modify SRQ to RTR "
"ehca_qp=%p qp_num=%x h_ret=%li",
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, my_qp->real_qp_num, hret);
goto create_srq2;
}
......@@ -1078,7 +1078,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
&bad_send_wqe_p, NULL, 2);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
" ehca_qp=%p qp_num=%x h_ret=%li",
" ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
......@@ -1134,7 +1134,7 @@ static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
ehca_gen_err("Invalid offset for calculating left cqes "
"wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v);
"wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
return -EFAULT;
}
......@@ -1168,7 +1168,7 @@ static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
&send_wqe_p, &recv_wqe_p, 4);
if (h_ret != H_SUCCESS) {
ehca_err(&shca->ib_device, "disable_and_get_wqe() "
"failed ehca_qp=%p qp_num=%x h_ret=%li",
"failed ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, qp_num, h_ret);
return ehca2ib_return_code(h_ret);
}
......@@ -1261,7 +1261,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
mqpcb, my_qp->galpas.kernel);
if (h_ret != H_SUCCESS) {
ehca_err(ibqp->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%li",
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, ibqp->qp_num, h_ret);
ret = ehca2ib_return_code(h_ret);
goto modify_qp_exit1;
......@@ -1690,7 +1690,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li "
ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
"ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
......@@ -1723,7 +1723,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
ret = ehca2ib_return_code(h_ret);
ehca_err(ibqp->device, "ENABLE in context of "
"RESET_2_INIT failed! Maybe you didn't get "
"a LID h_ret=%li ehca_qp=%p qp_num=%x",
"a LID h_ret=%lli ehca_qp=%p qp_num=%x",
h_ret, my_qp, ibqp->qp_num);
goto modify_qp_exit2;
}
......@@ -1909,7 +1909,7 @@ int ehca_query_qp(struct ib_qp *qp,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(qp->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%li",
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, qp->qp_num, h_ret);
goto query_qp_exit1;
}
......@@ -2074,7 +2074,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li "
ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
"ehca_qp=%p qp_num=%x",
h_ret, my_qp, my_qp->real_qp_num);
}
......@@ -2108,7 +2108,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
if (h_ret != H_SUCCESS) {
ret = ehca2ib_return_code(h_ret);
ehca_err(srq->device, "hipz_h_query_qp() failed "
"ehca_qp=%p qp_num=%x h_ret=%li",
"ehca_qp=%p qp_num=%x h_ret=%lli",
my_qp, my_qp->real_qp_num, h_ret);
goto query_srq_exit1;
}
......@@ -2179,7 +2179,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
if (h_ret != H_SUCCESS) {
ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "
ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
"ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
return ehca2ib_return_code(h_ret);
}
......
......@@ -822,7 +822,7 @@ static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
offset = qmap->next_wqe_idx * ipz_queue->qe_size;
wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
if (!wqe) {
ehca_err(cq->device, "Invalid wqe offset=%#lx on "
ehca_err(cq->device, "Invalid wqe offset=%#llx on "
"qp_num=%#x", offset, my_qp->real_qp_num);
return nr;
}
......
......@@ -85,7 +85,7 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
if (ret != H_SUCCESS) {
ehca_err(&shca->ib_device,
"Can't define AQP1 for port %x. h_ret=%li",
"Can't define AQP1 for port %x. h_ret=%lli",
port, ret);
return ret;
}
......
......@@ -116,7 +116,7 @@ extern int ehca_debug_level;
unsigned char *deb = (unsigned char *)(adr); \
for (x = 0; x < l; x += 16) { \
printk(KERN_INFO "EHCA_DMP:%s " format \
" adr=%p ofs=%04x %016lx %016lx\n", \
" adr=%p ofs=%04x %016llx %016llx\n", \
__func__, ##args, deb, x, \
*((u64 *)&deb[0]), *((u64 *)&deb[8])); \
deb += 16; \
......
......@@ -114,7 +114,7 @@ static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
physical = galpas->user.fw_handle;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical);
ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
/* VM_IO | VM_RESERVED are set by remap_pfn_range() */
ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
vma->vm_page_prot);
......
......@@ -226,7 +226,7 @@ u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
u32 *eq_ist)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
u64 allocate_controls;
/* resource type */
......@@ -249,7 +249,7 @@ u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
*eq_ist = (u32)outs[5];
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resource - ret=%li ", ret);
ehca_gen_err("Not enough resource - ret=%lli ", ret);
return ret;
}
......@@ -270,7 +270,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
struct ehca_alloc_cq_parms *param)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
......@@ -287,7 +287,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%li", ret);
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
......@@ -297,7 +297,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
{
u64 ret;
u64 allocate_controls, max_r10_reg, r11, r12;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
allocate_controls =
EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
......@@ -362,7 +362,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%li", ret);
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
......@@ -454,7 +454,7 @@ u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
const u64 count)
{
if (count != 1) {
ehca_gen_err("Ppage counter=%lx", count);
ehca_gen_err("Ppage counter=%llx", count);
return H_PARAMETER;
}
return hipz_h_register_rpage(adapter_handle,
......@@ -489,7 +489,7 @@ u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
const struct h_galpa gal)
{
if (count != 1) {
ehca_gen_err("Page counter=%lx", count);
ehca_gen_err("Page counter=%llx", count);
return H_PARAMETER;
}
......@@ -508,7 +508,7 @@ u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
const struct h_galpa galpa)
{
if (count > 1) {
ehca_gen_err("Page counter=%lx", count);
ehca_gen_err("Page counter=%llx", count);
return H_PARAMETER;
}
......@@ -525,7 +525,7 @@ u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
int dis_and_get_function_code)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
adapter_handle.handle, /* r4 */
......@@ -548,7 +548,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
struct h_galpa gal)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
adapter_handle.handle, /* r4 */
qp_handle.handle, /* r5 */
......@@ -557,7 +557,7 @@ u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
0, 0, 0, 0, 0);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Insufficient resources ret=%li", ret);
ehca_gen_err("Insufficient resources ret=%lli", ret);
return ret;
}
......@@ -579,7 +579,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
struct ehca_qp *qp)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = hcp_galpas_dtor(&qp->galpas);
if (ret) {
......@@ -593,7 +593,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
qp->ipz_qp_handle.handle, /* r6 */
0, 0, 0, 0, 0, 0);
if (ret == H_HARDWARE)
ehca_gen_err("HCA not operational. ret=%li", ret);
ehca_gen_err("HCA not operational. ret=%lli", ret);
ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
adapter_handle.handle, /* r4 */
......@@ -601,7 +601,7 @@ u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
0, 0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("Resource still in use. ret=%li", ret);
ehca_gen_err("Resource still in use. ret=%lli", ret);
return ret;
}
......@@ -625,7 +625,7 @@ u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
u32 * bma_qp_nr)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
adapter_handle.handle, /* r4 */
......@@ -636,7 +636,7 @@ u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
*bma_qp_nr = (u32)outs[1];
if (ret == H_ALIAS_EXIST)
ehca_gen_err("AQP1 already exists. ret=%li", ret);
ehca_gen_err("AQP1 already exists. ret=%lli", ret);
return ret;
}
......@@ -658,7 +658,7 @@ u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
0, 0);
if (ret == H_NOT_ENOUGH_RESOURCES)
ehca_gen_err("Not enough resources. ret=%li", ret);
ehca_gen_err("Not enough resources. ret=%lli", ret);
return ret;
}
......@@ -697,7 +697,7 @@ u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret);
ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
return ret;
}
......@@ -719,7 +719,7 @@ u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
0, 0, 0, 0, 0);
if (ret == H_RESOURCE)
ehca_gen_err("Resource in use. ret=%li ", ret);
ehca_gen_err("Resource in use. ret=%lli ", ret);
return ret;
}
......@@ -733,7 +733,7 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
......@@ -774,9 +774,9 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
ehca_gen_err("logical_address_of_page not on a 4k boundary "
"adapter_handle=%lx mr=%p mr_handle=%lx "
"adapter_handle=%llx mr=%p mr_handle=%llx "
"pagesize=%x queue_type=%x "
"logical_address_of_page=%lx count=%lx",
"logical_address_of_page=%llx count=%llx",
adapter_handle.handle, mr,
mr->ipz_mr_handle.handle, pagesize, queue_type,
logical_address_of_page, count);
......@@ -794,7 +794,7 @@ u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
adapter_handle.handle, /* r4 */
......@@ -828,7 +828,7 @@ u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
adapter_handle.handle, /* r4 */
......@@ -855,7 +855,7 @@ u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
struct ehca_mr_hipzout_parms *outparms)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
adapter_handle.handle, /* r4 */
......@@ -877,7 +877,7 @@ u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
struct ehca_mw_hipzout_parms *outparms)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
adapter_handle.handle, /* r4 */
......@@ -895,7 +895,7 @@ u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
struct ehca_mw_hipzout_parms *outparms)
{
u64 ret;
u64 outs[PLPAR_HCALL9_BUFSIZE];
unsigned long outs[PLPAR_HCALL9_BUFSIZE];
ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
adapter_handle.handle, /* r4 */
......
......@@ -1462,7 +1462,8 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
}
static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
struct mlx4_ib_qp *qp, unsigned *lso_seg_len,
__be32 *lso_hdr_sz)
{
unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
......@@ -1479,12 +1480,8 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
/* make sure LSO header is written before overwriting stamping */
wmb();
wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
wr->wr.ud.hlen);
*lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
wr->wr.ud.hlen);
*lso_seg_len = halign;
return 0;
}
......@@ -1518,6 +1515,9 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
int uninitialized_var(stamp);
int uninitialized_var(size);
unsigned uninitialized_var(seglen);
__be32 dummy;
__be32 *lso_wqe;
__be32 uninitialized_var(lso_hdr_sz);
int i;
spin_lock_irqsave(&qp->sq.lock, flags);
......@@ -1525,6 +1525,8 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ind = qp->sq_next_wqe;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
lso_wqe = &dummy;
if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
err = -ENOMEM;
*bad_wr = wr;
......@@ -1606,11 +1608,12 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
if (wr->opcode == IB_WR_LSO) {
err = build_lso_seg(wqe, wr, qp, &seglen);
err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
}
lso_wqe = (__be32 *) wqe;
wqe += seglen;
size += seglen / 16;
}
......@@ -1652,6 +1655,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
for (i = wr->num_sge - 1; i >= 0; --i, --dseg)
set_data_seg(dseg, wr->sg_list + i);
/*
* Possibly overwrite stamping in cacheline with LSO
* segment only after making sure all data segments
* are written.
*/
wmb();
*lso_wqe = lso_hdr_sz;
ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
MLX4_WQE_CTRL_FENCE : 0) | size;
......@@ -1686,7 +1697,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
stamp_send_wqe(qp, stamp, size * 16);
ind = pad_wraparound(qp, ind);
}
}
out:
......
......@@ -106,23 +106,17 @@ int ipoib_open(struct net_device *dev)
ipoib_dbg(priv, "bringing up interface\n");
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
napi_enable(&priv->napi);
if (ipoib_pkey_dev_delay_open(dev))
return 0;
napi_enable(&priv->napi);
if (ipoib_ib_dev_open(dev))
goto err_disable;
if (ipoib_ib_dev_open(dev)) {
napi_disable(&priv->napi);
return -EINVAL;
}
if (ipoib_ib_dev_up(dev)) {
ipoib_ib_dev_stop(dev, 1);
napi_disable(&priv->napi);
return -EINVAL;
}
if (ipoib_ib_dev_up(dev))
goto err_stop;
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
......@@ -144,6 +138,15 @@ int ipoib_open(struct net_device *dev)
netif_start_queue(dev);
return 0;
err_stop:
ipoib_ib_dev_stop(dev, 1);
err_disable:
napi_disable(&priv->napi);
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
return -EINVAL;
}
static int ipoib_stop(struct net_device *dev)
......
......@@ -409,7 +409,7 @@ static int ipoib_mcast_join_complete(int status,
}
if (mcast->logcount++ < 20) {
if (status == -ETIMEDOUT) {
if (status == -ETIMEDOUT || status == -EAGAIN) {
ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n",
mcast->mcmember.mgid.raw, status);
} else {
......
......@@ -61,6 +61,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
rtnl_lock();
mutex_lock(&ppriv->vlan_mutex);
/*
......@@ -111,7 +112,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
goto device_init_failed;
}
result = register_netdev(priv->dev);
result = register_netdevice(priv->dev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
goto register_failed;
......@@ -134,12 +135,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
list_add_tail(&priv->list, &ppriv->child_intfs);
mutex_unlock(&ppriv->vlan_mutex);
rtnl_unlock();
return 0;
sysfs_failed:
ipoib_delete_debug_files(priv->dev);
unregister_netdev(priv->dev);
unregister_netdevice(priv->dev);
register_failed:
ipoib_dev_cleanup(priv->dev);
......@@ -149,6 +151,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
err:
mutex_unlock(&ppriv->vlan_mutex);
rtnl_unlock();
return result;
}
......@@ -162,10 +165,11 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
ppriv = netdev_priv(pdev);
rtnl_lock();
mutex_lock(&ppriv->vlan_mutex);
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey) {
unregister_netdev(priv->dev);
unregister_netdevice(priv->dev);
ipoib_dev_cleanup(priv->dev);
list_del(&priv->list);
free_netdev(priv->dev);
......@@ -175,6 +179,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
}
}
mutex_unlock(&ppriv->vlan_mutex);
rtnl_unlock();
return ret;
}
......@@ -107,9 +107,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs,
dev_cap->reserved_eqs +
num_possible_cpus() + 1);
profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs,
dev_cap->reserved_eqs +
num_possible_cpus() + 1);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment