Commit ede285b1 authored by David S. Miller's avatar David S. Miller

Merge branch 'ibmvnic-fixes'

Sukadev Bhattiprolu says:

====================
ibmvnic: Assorted bug fixes

Assorted bug fixes that we tested over the last several weeks.

Thanks to Brian King, Cris Forno, Dany Madden and Rick Lindsley for
reviews and help with testing.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c2f5c57d 154b3b2a
......@@ -106,6 +106,8 @@ static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
......@@ -209,12 +211,11 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
mutex_unlock(&adapter->fw_lock);
return rc;
dev_err(dev, "send_request_map failed, rc = %d\n", rc);
goto out;
}
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
......@@ -222,20 +223,23 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
dev_err(dev,
"Long term map request aborted or timed out,rc = %d\n",
rc);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
mutex_unlock(&adapter->fw_lock);
return rc;
goto out;
}
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
rc = -1;
goto out;
}
rc = 0;
out:
if (rc) {
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
mutex_unlock(&adapter->fw_lock);
return -1;
ltb->buff = NULL;
}
mutex_unlock(&adapter->fw_lock);
return 0;
return rc;
}
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
......@@ -255,14 +259,44 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
ltb->buff = NULL;
ltb->map_id = 0;
}
static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
if (!ltb->buff)
return -EINVAL;
struct device *dev = &adapter->vdev->dev;
int rc;
memset(ltb->buff, 0, ltb->size);
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
if (rc) {
mutex_unlock(&adapter->fw_lock);
return rc;
}
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
if (rc) {
dev_info(dev,
"Reset failed, long term map request timed out or aborted\n");
mutex_unlock(&adapter->fw_lock);
return rc;
}
if (adapter->fw_done_rc) {
dev_info(dev,
"Reset failed, attempting to free and reallocate buffer\n");
free_long_term_buff(adapter, ltb);
mutex_unlock(&adapter->fw_lock);
return alloc_long_term_buff(adapter, ltb, ltb->size);
}
mutex_unlock(&adapter->fw_lock);
return 0;
}
......@@ -298,7 +332,14 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
rx_scrq = adapter->rx_scrq[pool->index];
ind_bufp = &rx_scrq->ind_buf;
for (i = 0; i < count; ++i) {
/* netdev_skb_alloc() could have failed after we saved a few skbs
* in the indir_buf and we would not have sent them to VIOS yet.
* To account for them, start the loop at ind_bufp->index rather
* than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
if (!skb) {
dev_err(dev, "Couldn't replenish rx buff\n");
......@@ -484,7 +525,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
rx_pool->size *
rx_pool->buff_size);
} else {
rc = reset_long_term_buff(&rx_pool->long_term_buff);
rc = reset_long_term_buff(adapter,
&rx_pool->long_term_buff);
}
if (rc)
......@@ -607,11 +649,12 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_tx_pool *tx_pool)
{
int rc, i;
rc = reset_long_term_buff(&tx_pool->long_term_buff);
rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
if (rc)
return rc;
......@@ -638,10 +681,11 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(&adapter->tso_pool[i]);
ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc)
return rc;
rc = reset_one_tx_pool(&adapter->tx_pool[i]);
rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
if (rc)
return rc;
}
......@@ -734,8 +778,11 @@ static int init_tx_pools(struct net_device *netdev)
adapter->tso_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tso_pool)
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
return -1;
}
adapter->num_active_tx_pools = tx_subcrqs;
......@@ -1180,6 +1227,11 @@ static int __ibmvnic_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
if (prev_state == VNIC_CLOSED) {
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
}
adapter->state = VNIC_OPEN;
return rc;
}
......@@ -1583,7 +1635,8 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
ind_bufp->index = 0;
if (atomic_sub_return(entries, &tx_scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev, queue_num)) {
__netif_subqueue_stopped(adapter->netdev, queue_num) &&
!test_bit(0, &adapter->resetting)) {
netif_wake_subqueue(adapter->netdev, queue_num);
netdev_dbg(adapter->netdev, "Started queue %d\n",
queue_num);
......@@ -1676,7 +1729,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_send_failed++;
tx_dropped++;
ret = NETDEV_TX_OK;
ibmvnic_tx_scrq_flush(adapter, tx_scrq);
goto out;
}
......@@ -3140,6 +3192,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
i);
ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
if (adapter->tx_scrq[i]->irq) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
......@@ -3213,7 +3266,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
if (rc && rc != H_FUNCTION)
if (rc && (rc != H_FUNCTION))
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment