Commit 7c28da8b authored by David S. Miller's avatar David S. Miller

Merge branch 'ibmvnic-coding-style'

Lijun Pan says:

====================
ibmvnic: a set of fixes of coding style

This series address several coding style problems.

v2: rebased on top of tree. Add the Reviewed-by tag from v1 reviews.
    patch 8/8 is new.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 260b6971 8a96c80e
...@@ -115,7 +115,7 @@ struct ibmvnic_stat { ...@@ -115,7 +115,7 @@ struct ibmvnic_stat {
#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \ #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
offsetof(struct ibmvnic_statistics, stat)) offsetof(struct ibmvnic_statistics, stat))
#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off))) #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
static const struct ibmvnic_stat ibmvnic_stats[] = { static const struct ibmvnic_stat ibmvnic_stats[] = {
{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)}, {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
...@@ -1221,8 +1221,7 @@ static int ibmvnic_open(struct net_device *netdev) ...@@ -1221,8 +1221,7 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev); rc = __ibmvnic_open(netdev);
out: out:
/* /* If open fails due to a pending failover, set device state and
* If open fails due to a pending failover, set device state and
* return. Device operation will be handled by reset routine. * return. Device operation will be handled by reset routine.
*/ */
if (rc && adapter->failover_pending) { if (rc && adapter->failover_pending) {
...@@ -1946,8 +1945,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -1946,8 +1945,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM)) if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
rtnl_lock(); rtnl_lock();
/* /* Now that we have the rtnl lock, clear any pending failover.
* Now that we have the rtnl lock, clear any pending failover.
* This will ensure ibmvnic_open() has either completed or will * This will ensure ibmvnic_open() has either completed or will
* block until failover is complete. * block until failover is complete.
*/ */
...@@ -2042,9 +2040,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -2042,9 +2040,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
} }
rc = ibmvnic_login(netdev); rc = ibmvnic_login(netdev);
if (rc) { if (rc)
goto out; goto out;
}
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) { if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
rc = init_resources(adapter); rc = init_resources(adapter);
...@@ -2072,14 +2069,14 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -2072,14 +2069,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = reset_tx_pools(adapter); rc = reset_tx_pools(adapter);
if (rc) { if (rc) {
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
rc); rc);
goto out; goto out;
} }
rc = reset_rx_pools(adapter); rc = reset_rx_pools(adapter);
if (rc) { if (rc) {
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
rc); rc);
goto out; goto out;
} }
} }
...@@ -2249,8 +2246,7 @@ static void __ibmvnic_reset(struct work_struct *work) ...@@ -2249,8 +2246,7 @@ static void __ibmvnic_reset(struct work_struct *work)
spin_unlock_irqrestore(&adapter->state_lock, flags); spin_unlock_irqrestore(&adapter->state_lock, flags);
if (adapter->force_reset_recovery) { if (adapter->force_reset_recovery) {
/* /* Since we are doing a hard reset now, clear the
* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any * failover_pending flag so we don't ignore any
* future MOBILITY or other resets. * future MOBILITY or other resets.
*/ */
...@@ -2322,8 +2318,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, ...@@ -2322,8 +2318,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags; unsigned long flags;
int ret; int ret;
/* /* If failover is pending don't schedule any other reset.
* If failover is pending don't schedule any other reset.
* Instead let the failover complete. If there is already a * Instead let the failover complete. If there is already a
* a failover reset scheduled, we will detect and drop the * a failover reset scheduled, we will detect and drop the
* duplicate reset when walking the ->rwi_list below. * duplicate reset when walking the ->rwi_list below.
...@@ -2338,7 +2333,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, ...@@ -2338,7 +2333,8 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (adapter->state == VNIC_PROBING) { if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n"); netdev_warn(netdev, "Adapter reset during probe\n");
ret = adapter->init_done_rc = EAGAIN; adapter->init_done_rc = EAGAIN;
ret = EAGAIN;
goto err; goto err;
} }
...@@ -2445,9 +2441,8 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget) ...@@ -2445,9 +2441,8 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
if (!pending_scrq(adapter, rx_scrq)) if (!pending_scrq(adapter, rx_scrq))
break; break;
next = ibmvnic_next_scrq(adapter, rx_scrq); next = ibmvnic_next_scrq(adapter, rx_scrq);
rx_buff = rx_buff = (struct ibmvnic_rx_buff *)
(struct ibmvnic_rx_buff *)be64_to_cpu(next-> be64_to_cpu(next->rx_comp.correlator);
rx_comp.correlator);
/* do error checking */ /* do error checking */
if (next->rx_comp.rc) { if (next->rx_comp.rc) {
netdev_dbg(netdev, "rx buffer returned with rc %x\n", netdev_dbg(netdev, "rx buffer returned with rc %x\n",
...@@ -2638,9 +2633,9 @@ static void ibmvnic_get_drvinfo(struct net_device *netdev, ...@@ -2638,9 +2633,9 @@ static void ibmvnic_get_drvinfo(struct net_device *netdev,
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
strlcpy(info->fw_version, adapter->fw_version, strscpy(info->fw_version, adapter->fw_version,
sizeof(info->fw_version)); sizeof(info->fw_version));
} }
...@@ -2752,7 +2747,6 @@ static int ibmvnic_set_channels(struct net_device *netdev, ...@@ -2752,7 +2747,6 @@ static int ibmvnic_set_channels(struct net_device *netdev,
channels->rx_count, channels->tx_count, channels->rx_count, channels->tx_count,
adapter->req_rx_queues, adapter->req_tx_queues); adapter->req_rx_queues, adapter->req_tx_queues);
return ret; return ret;
} }
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
...@@ -2841,8 +2835,8 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, ...@@ -2841,8 +2835,8 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
return; return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, data[i] = be64_to_cpu(IBMVNIC_GET_STAT
ibmvnic_stats[i].offset)); (adapter, ibmvnic_stats[i].offset));
for (j = 0; j < adapter->req_tx_queues; j++) { for (j = 0; j < adapter->req_tx_queues; j++) {
data[i] = adapter->tx_stats_buffers[j].packets; data[i] = adapter->tx_stats_buffers[j].packets;
...@@ -2882,6 +2876,7 @@ static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -2882,6 +2876,7 @@ static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
return 0; return 0;
} }
static const struct ethtool_ops ibmvnic_ethtool_ops = { static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo, .get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel, .get_msglevel = ibmvnic_get_msglevel,
...@@ -3151,7 +3146,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, ...@@ -3151,7 +3146,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
/* H_EOI would fail with rc = H_FUNCTION when running /* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error. * in XIVE mode which is expected, but not an error.
*/ */
if (rc && (rc != H_FUNCTION)) if (rc && rc != H_FUNCTION)
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc); val, rc);
} }
...@@ -3654,8 +3649,8 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, ...@@ -3654,8 +3649,8 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
int rc; int rc;
netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n", netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
(unsigned long int)cpu_to_be64(u64_crq[0]), (unsigned long)cpu_to_be64(u64_crq[0]),
(unsigned long int)cpu_to_be64(u64_crq[1])); (unsigned long)cpu_to_be64(u64_crq[1]));
if (!adapter->crq.active && if (!adapter->crq.active &&
crq->generic.first != IBMVNIC_CRQ_INIT_CMD) { crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
...@@ -3860,15 +3855,15 @@ static int send_login(struct ibmvnic_adapter *adapter) ...@@ -3860,15 +3855,15 @@ static int send_login(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_tx_queues; i++) { for (i = 0; i < adapter->req_tx_queues; i++) {
if (adapter->tx_scrq[i]) { if (adapter->tx_scrq[i]) {
tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]-> tx_list_p[i] =
crq_num); cpu_to_be64(adapter->tx_scrq[i]->crq_num);
} }
} }
for (i = 0; i < adapter->req_rx_queues; i++) { for (i = 0; i < adapter->req_rx_queues; i++) {
if (adapter->rx_scrq[i]) { if (adapter->rx_scrq[i]) {
rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]-> rx_list_p[i] =
crq_num); cpu_to_be64(adapter->rx_scrq[i]->crq_num);
} }
} }
...@@ -3884,7 +3879,7 @@ static int send_login(struct ibmvnic_adapter *adapter) ...@@ -3884,7 +3879,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Login Buffer:\n"); netdev_dbg(adapter->netdev, "Login Buffer:\n");
for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n", netdev_dbg(adapter->netdev, "%016lx\n",
((unsigned long int *)(adapter->login_buf))[i]); ((unsigned long *)(adapter->login_buf))[i]);
} }
memset(&crq, 0, sizeof(crq)); memset(&crq, 0, sizeof(crq));
...@@ -4252,7 +4247,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) ...@@ -4252,7 +4247,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n"); netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++) for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
netdev_dbg(adapter->netdev, "%016lx\n", netdev_dbg(adapter->netdev, "%016lx\n",
((unsigned long int *)(buf))[i]); ((unsigned long *)(buf))[i]);
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum); netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum); netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
...@@ -4411,8 +4406,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq, ...@@ -4411,8 +4406,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
case PARTIALSUCCESS: case PARTIALSUCCESS:
dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n", dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
*req_value, *req_value,
(long int)be64_to_cpu(crq->request_capability_rsp. (long)be64_to_cpu(crq->request_capability_rsp.number),
number), name); name);
if (be16_to_cpu(crq->request_capability_rsp.capability) == if (be16_to_cpu(crq->request_capability_rsp.capability) ==
REQ_MTU) { REQ_MTU) {
...@@ -4482,7 +4477,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, ...@@ -4482,7 +4477,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n", netdev_dbg(adapter->netdev, "%016lx\n",
((unsigned long int *)(adapter->login_rsp_buf))[i]); ((unsigned long *)(adapter->login_rsp_buf))[i]);
} }
/* Sanity checks */ /* Sanity checks */
...@@ -4825,8 +4820,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, ...@@ -4825,8 +4820,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
long rc; long rc;
netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n", netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
(unsigned long int)cpu_to_be64(u64_crq[0]), (unsigned long)cpu_to_be64(u64_crq[0]),
(unsigned long int)cpu_to_be64(u64_crq[1])); (unsigned long)cpu_to_be64(u64_crq[1]));
switch (gen_crq->first) { switch (gen_crq->first) {
case IBMVNIC_CRQ_INIT_RSP: case IBMVNIC_CRQ_INIT_RSP:
switch (gen_crq->cmd) { switch (gen_crq->cmd) {
...@@ -5300,8 +5295,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) ...@@ -5300,8 +5295,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops; netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev); SET_NETDEV_DEV(netdev, &dev->dev);
spin_lock_init(&adapter->stats_lock);
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset, INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
__ibmvnic_delayed_reset); __ibmvnic_delayed_reset);
......
...@@ -845,6 +845,7 @@ struct ibmvnic_crq_queue { ...@@ -845,6 +845,7 @@ struct ibmvnic_crq_queue {
union ibmvnic_crq *msgs; union ibmvnic_crq *msgs;
int size, cur; int size, cur;
dma_addr_t msg_token; dma_addr_t msg_token;
/* Used for serialization of msgs, cur */
spinlock_t lock; spinlock_t lock;
bool active; bool active;
char name[32]; char name[32];
...@@ -876,6 +877,7 @@ struct ibmvnic_sub_crq_queue { ...@@ -876,6 +877,7 @@ struct ibmvnic_sub_crq_queue {
unsigned int irq; unsigned int irq;
unsigned int pool_index; unsigned int pool_index;
int scrq_num; int scrq_num;
/* Used for serialization of msgs, cur */
spinlock_t lock; spinlock_t lock;
struct sk_buff *rx_skb_top; struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter; struct ibmvnic_adapter *adapter;
...@@ -985,7 +987,6 @@ struct ibmvnic_adapter { ...@@ -985,7 +987,6 @@ struct ibmvnic_adapter {
struct ibmvnic_statistics stats; struct ibmvnic_statistics stats;
dma_addr_t stats_token; dma_addr_t stats_token;
struct completion stats_done; struct completion stats_done;
spinlock_t stats_lock;
int replenish_no_mem; int replenish_no_mem;
int replenish_add_buff_success; int replenish_add_buff_success;
int replenish_add_buff_failure; int replenish_add_buff_failure;
...@@ -1080,9 +1081,12 @@ struct ibmvnic_adapter { ...@@ -1080,9 +1081,12 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
enum vnic_state state; enum vnic_state state;
/* Used for serializatin of state field */
spinlock_t state_lock;
enum ibmvnic_reset_reason reset_reason; enum ibmvnic_reset_reason reset_reason;
spinlock_t rwi_lock;
struct list_head rwi_list; struct list_head rwi_list;
/* Used for serialization of rwi_list */
spinlock_t rwi_lock;
struct work_struct ibmvnic_reset; struct work_struct ibmvnic_reset;
struct delayed_work ibmvnic_delayed_reset; struct delayed_work ibmvnic_delayed_reset;
unsigned long resetting; unsigned long resetting;
...@@ -1096,7 +1100,4 @@ struct ibmvnic_adapter { ...@@ -1096,7 +1100,4 @@ struct ibmvnic_adapter {
struct ibmvnic_tunables desired; struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback; struct ibmvnic_tunables fallback;
/* Used for serializatin of state field */
spinlock_t state_lock;
}; };
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment