Commit 9995bac5 authored by David S. Miller's avatar David S. Miller

Merge branch 'ibmvnic-Correct-long-term-mapped-buffer-error-handling'

Thomas Falcon says:

====================
ibmvnic: Correct long-term-mapped buffer error handling

This patch set fixes the error-handling of long-term-mapped buffers
during adapter initialization and reset. The first patch fixes a bug
in an incorrectly defined descriptor that was keeping the return
codes from the VIO server from being properly checked. The second patch
fixes and cleans up the error-handling implementation.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8fac365f f3be0cbc
......@@ -163,16 +163,6 @@ static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
return rc;
}
static void reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
memset(ltb->buff, 0, ltb->size);
init_completion(&adapter->fw_done);
send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
wait_for_completion(&adapter->fw_done);
}
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
......@@ -193,6 +183,12 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
wait_for_completion(&adapter->fw_done);
if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
adapter->fw_done_rc);
return -1;
}
return 0;
}
......@@ -210,6 +206,24 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
memset(ltb->buff, 0, ltb->size);
init_completion(&adapter->fw_done);
send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
wait_for_completion(&adapter->fw_done);
if (adapter->fw_done_rc) {
dev_info(&adapter->vdev->dev,
"Reset failed, attempting to free and reallocate buffer\n");
free_long_term_buff(adapter, ltb);
return alloc_long_term_buff(adapter, ltb, ltb->size);
}
return 0;
}
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
......@@ -366,13 +380,15 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
int rx_scrqs;
int i, j;
int i, j, rc;
rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
reset_long_term_buff(adapter, &rx_pool->long_term_buff);
rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff);
if (rc)
return rc;
for (j = 0; j < rx_pool->size; j++)
rx_pool->free_map[j] = j;
......@@ -494,13 +510,15 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
int tx_scrqs;
int i, j;
int i, j, rc;
tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
for (i = 0; i < tx_scrqs; i++) {
tx_pool = &adapter->tx_pool[i];
reset_long_term_buff(adapter, &tx_pool->long_term_buff);
rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
if (rc)
return rc;
memset(tx_pool->tx_buff, 0,
adapter->req_tx_entries_per_subcrq *
......@@ -3075,36 +3093,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
static void handle_request_map_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
u8 map_id = crq->request_map_rsp.map_id;
int tx_subcrqs;
int rx_subcrqs;
long rc;
int i;
tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
rc = crq->request_map_rsp.rc.code;
if (rc) {
dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
adapter->map_id--;
/* need to find and zero tx/rx_pool map_id */
for (i = 0; i < tx_subcrqs; i++) {
if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
adapter->tx_pool[i].long_term_buff.map_id = 0;
}
for (i = 0; i < rx_subcrqs; i++) {
if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
adapter->rx_pool[i].long_term_buff.map_id = 0;
}
}
complete(&adapter->fw_done);
}
static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
......@@ -3385,7 +3373,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
handle_query_map_rsp(crq, adapter);
break;
case REQUEST_MAP_RSP:
handle_request_map_rsp(crq, adapter);
adapter->fw_done_rc = crq->request_map_rsp.rc.code;
complete(&adapter->fw_done);
break;
case REQUEST_UNMAP_RSP:
handle_request_unmap_rsp(crq, adapter);
......
......@@ -595,7 +595,7 @@ struct ibmvnic_request_map_rsp {
u8 cmd;
u8 reserved1;
u8 map_id;
u8 reserved2[4];
u8 reserved2[8];
struct ibmvnic_rc rc;
} __packed __aligned(8);
......@@ -988,6 +988,7 @@ struct ibmvnic_adapter {
spinlock_t error_list_lock;
struct completion fw_done;
int fw_done_rc;
/* partner capabilities */
u64 min_tx_queues;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment