Commit a46a2802 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Roland Dreier

IB/qib: Fix checkpatch warnings

Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 041af0bb
......@@ -1460,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
* Flush write combining store buffers (if present) and perform a write
* barrier.
*/
static inline void qib_flush_wc(void)
{
#if defined(CONFIG_X86_64)
#define qib_flush_wc() asm volatile("sfence" : : : "memory")
asm volatile("sfence" : : : "memory");
#else
#define qib_flush_wc() wmb() /* no reorder around wc flush */
wmb(); /* no reorder around wc flush */
#endif
}
/* global module parameter variables */
extern unsigned qib_ibmtu;
......
......@@ -257,7 +257,7 @@ struct qib_base_info {
/* shared memory page for send buffer disarm status */
__u64 spi_sendbuf_status;
} __attribute__ ((aligned(8)));
} __aligned(8);
/*
* This version number is given to the driver by the user code during
......@@ -361,7 +361,7 @@ struct qib_user_info {
*/
__u64 spu_base_info;
} __attribute__ ((aligned(8)));
} __aligned(8);
/* User commands. */
......
......@@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
DEBUGFS_FILE_CREATE(opcode_stats);
DEBUGFS_FILE_CREATE(ctx_stats);
DEBUGFS_FILE_CREATE(qp_stats);
return;
}
void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
......
......@@ -259,9 +259,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
if (len > sizeof(ifp->if_serial))
len = sizeof(ifp->if_serial);
memcpy(snp, ifp->if_serial, len);
} else
memcpy(dd->serial, ifp->if_serial,
sizeof(ifp->if_serial));
} else {
memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
}
if (!strstr(ifp->if_comment, "Tested successfully"))
qib_dev_err(dd,
"Board SN %s did not pass functional test: %s\n",
......
......@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
* unless perhaps the user has mpin'ed the pages
* themselves.
*/
qib_devinfo(dd->pcidev,
"Failed to lock addr %p, %u pages: "
"errno %d\n", (void *) vaddr, cnt, -ret);
qib_devinfo(
dd->pcidev,
"Failed to lock addr %p, %u pages: errno %d\n",
(void *) vaddr, cnt, -ret);
goto done;
}
for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
......@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
/* rcvegrbufs are read-only on the slave */
if (vma->vm_flags & VM_WRITE) {
qib_devinfo(dd->pcidev,
"Can't map eager buffers as "
"writable (flags=%lx)\n", vma->vm_flags);
"Can't map eager buffers as writable (flags=%lx)\n",
vma->vm_flags);
ret = -EPERM;
goto bail;
}
......@@ -1247,10 +1248,7 @@ static int init_subctxts(struct qib_devdata *dd,
if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
uinfo->spu_userversion & 0xffff)) {
qib_devinfo(dd->pcidev,
"Mismatched user version (%d.%d) and driver "
"version (%d.%d) while context sharing. Ensure "
"that driver and library are from the same "
"release.\n",
"Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
(int) (uinfo->spu_userversion >> 16),
(int) (uinfo->spu_userversion & 0xffff),
QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
......
......@@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
MODULE_PARM_DESC(long_attenuation, \
MODULE_PARM_DESC(long_attenuation,
"attenuation cutoff (dB) for long copper cable setup");
static ushort qib_singleport;
......@@ -153,7 +153,7 @@ static struct kparam_string kp_txselect = {
static int setup_txselect(const char *, struct kernel_param *);
module_param_call(txselect, setup_txselect, param_get_string,
&kp_txselect, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(txselect, \
MODULE_PARM_DESC(txselect,
"Tx serdes indices (for no QSFP or invalid QSFP data)");
#define BOARD_QME7342 5
......@@ -6584,8 +6584,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
ppd->vls_supported = IB_VL_VL0_7;
else {
qib_devinfo(dd->pcidev,
"Invalid num_vls %u for MTU %d "
", using 4 VLs\n",
"Invalid num_vls %u for MTU %d , using 4 VLs\n",
qib_num_cfg_vls, mtu);
ppd->vls_supported = IB_VL_VL0_3;
qib_num_cfg_vls = 4;
......
......@@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
* Allocate full ctxtcnt array, rather than just cfgctxts, because
* cleanup iterates across all possible ctxts.
*/
dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
if (!dd->rcd) {
qib_dev_err(dd,
"Unable to allocate ctxtdata array, failing\n");
......@@ -1025,8 +1025,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
addr = vmalloc(cnt);
if (!addr) {
qib_devinfo(dd->pcidev,
"Couldn't get memory for checking PIO perf,"
" skipping\n");
"Couldn't get memory for checking PIO perf, skipping\n");
goto done;
}
......@@ -1178,7 +1177,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (!list_empty(&dd->list))
list_del_init(&dd->list);
ib_dealloc_device(&dd->verbs_dev.ibdev);
return ERR_PTR(ret);;
return ERR_PTR(ret);
}
/*
......
......@@ -168,7 +168,6 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
ppd->lastibcstat = ibcs;
if (ev)
signal_ib_event(ppd, ev);
return;
}
void qib_clear_symerror_on_linkup(unsigned long opaque)
......
......@@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
/* We can't pass qib_msix_entry array to qib_msix_setup
* so use a dummy msix_entry array and copy the allocated
* irq back to the qib_msix_entry array. */
msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
if (!msix_entry)
goto do_intx;
......@@ -234,8 +234,10 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
kfree(msix_entry);
do_intx:
qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
"falling back to INTx\n", nvec, ret);
qib_dev_err(
dd,
"pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
nvec, ret);
*msixcnt = 0;
qib_enable_intx(dd->pcidev);
}
......
......@@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
* Module could take up to 2 Msec to respond to MOD_SEL, and there
* is no way to tell if it is ready, so we must wait.
*/
msleep(2);
msleep(20);
/* Make sure TWSI bus is in sane state. */
ret = qib_twsi_reset(dd);
......@@ -139,7 +139,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
else if (pass)
qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
msleep(2);
msleep(20);
bail:
mutex_unlock(&dd->eep_lock);
......@@ -189,7 +189,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
* Module could take up to 2 Msec to respond to MOD_SEL,
* and there is no way to tell if it is ready, so we must wait.
*/
msleep(2);
msleep(20);
/* Make sure TWSI bus is in sane state. */
ret = qib_twsi_reset(dd);
......@@ -234,7 +234,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
* going away, and there is no way to tell if it is ready.
* so we must wait.
*/
msleep(2);
msleep(20);
bail:
mutex_unlock(&dd->eep_lock);
......@@ -480,7 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
udelay(20); /* Generous RST dwell */
dd->f_gpio_mod(dd, mask, mask, mask);
return;
}
void qib_qsfp_deinit(struct qib_qsfp_data *qd)
......
......@@ -247,7 +247,7 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
return ppd->guid;
} else
}
return ibp->guids[index - 1];
}
......
......@@ -922,7 +922,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
* IRQ not set up at this point in init, so we poll.
*/
#define IB_SERDES_TRIM_DONE (1ULL << 11)
#define TRIM_TMO (30)
#define TRIM_TMO (15)
static int qib_sd_trimdone_poll(struct qib_devdata *dd)
{
......@@ -940,7 +940,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
ret = 1;
break;
}
msleep(10);
msleep(20);
}
if (trim_tmo >= TRIM_TMO) {
qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
......
......@@ -50,7 +50,7 @@
/* expected size of headers (for dma_pool) */
#define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
/* attempt to drain the queue for 5secs */
#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
/*
* track how many times a process open this driver.
......@@ -1142,7 +1142,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
qib_user_sdma_hwqueue_clean(ppd);
qib_user_sdma_queue_clean(ppd, pq);
mutex_unlock(&pq->lock);
msleep(10);
msleep(20);
}
if (pq->num_pending || pq->num_sending) {
......@@ -1316,8 +1316,6 @@ void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
if (nfree && !list_empty(pktlist))
goto retry;
return;
}
/* pq->lock must be held, get packets on the wire... */
......
......@@ -2054,7 +2054,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
dev->qp_table_size = ib_qib_qp_table_size;
get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
dev->qp_table = kmalloc(dev->qp_table_size * sizeof(*dev->qp_table),
dev->qp_table = kmalloc_array(
dev->qp_table_size,
sizeof(*dev->qp_table),
GFP_KERNEL);
if (!dev->qp_table) {
ret = -ENOMEM;
......
......@@ -91,7 +91,7 @@ int qib_enable_wc(struct qib_devdata *dd)
}
for (bits = 0; !(piolen & (1ULL << bits)); bits++)
/* do nothing */ ;
; /* do nothing */
if (piolen != (1ULL << bits)) {
piolen >>= bits;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment