Commit 545a112b authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-06-20

This series contains updates to i40e and i40evf.

Anjali provides an update to the registers to handle the updates from the
hardware.  Also provides a fix so that we do not try to access the rings
through the qvectors at the time of freeing the qvectors.

Jesse provides a workaround for some older NVM versions where the NVM
was not filling in the GLQF_HKEY register, so made sure that the
critical register is initialized.

Michal provides a fix to reset the head and tail on admin queue
initialization where head and tail are not reset by the hardware.

Neerav adds a helper routine that would wait for the Rx/Tx queue to reach
the enable or disable state that is requested.  Also provides a fix
to the debugfs command "lldp get remote" which was dumping the local
LLDPDU instead of the peer's LLDPDU.  Fixed a bug when all the Tx hang
recovery mechanisms have failed and the driver tries to bring down the
interface in the interrupt context.

Shannon provides a patch to clear the Virtual Ethernet Bridge (VEB) stats
when the PF stats are cleared.  Also cleans the service tasks so that
they do not run while a reset is in progress.

Mitch fixes an issue in i40evf_get_rxfh() where only fifteen registers
were being read instead of all sixteen.

Carolyn provides a change to the RSS configuration to set table size and
write to the hardware to confirm the RSS table size being used.

Kamil makes a change to the admin queue debug prints so that they will not
cause segmentation faults in some of our tool applications.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 88729dd1 7974d5e5
......@@ -84,6 +84,7 @@
#define I40E_AQ_WORK_LIMIT 16
#define I40E_MAX_USER_PRIORITY 8
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
......@@ -134,6 +135,7 @@ enum i40e_state_t {
__I40E_FILTER_OVERFLOW_PROMISC,
__I40E_SUSPENDED,
__I40E_BAD_EEPROM,
__I40E_DOWN_REQUESTED,
};
enum i40e_interrupt_policy {
......@@ -348,6 +350,7 @@ struct i40e_pf {
u32 rx_hwtstamp_cleared;
bool ptp_tx;
bool ptp_rx;
u16 rss_table_size;
};
struct i40e_mac_filter {
......
......@@ -296,6 +296,10 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
i40e_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
......@@ -334,6 +338,10 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
i40e_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
......@@ -677,6 +685,10 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"%s: ntc %d head %d.\n", __func__, ntc,
rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
(I40E_ADMINQ_CALLBACK)details->callback;
......@@ -736,6 +748,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
u32 val = 0;
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_exit;
}
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
......@@ -829,6 +850,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
}
/* bump the tail */
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
......@@ -866,6 +888,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Command completed with error 0x%X.\n",
retval);
/* strip off FW internal code */
retval &= 0xff;
}
......@@ -880,6 +903,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
if (i40e_is_nvm_update_op(desc))
hw->aq.nvm_busy = true;
if (le16_to_cpu(desc->datalen) == buff_size) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
}
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
......@@ -951,10 +980,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
/* now clean the next descriptor */
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
i40e_debug_aq(hw,
I40E_DEBUG_AQ_COMMAND,
(void *)desc,
hw->aq.arq.r.arq_bi[desc_idx].va);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
......@@ -977,6 +1002,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
if (i40e_is_nvm_update_op(&e->desc))
hw->aq.nvm_busy = false;
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
......
......@@ -1839,7 +1839,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
struct i40e_aqc_list_capabilities_element_resp *cap;
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
u32 reg_val;
u32 i = 0;
u16 id;
......@@ -1910,11 +1909,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_DEV_FUNC_CAP_RSS:
p->rss = true;
reg_val = rd32(hw, I40E_PFQF_CTL_0);
if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)
p->rss_table_size = number;
else
p->rss_table_size = 128;
p->rss_table_size = number;
p->rss_table_entry_width = logical_id;
break;
case I40E_DEV_FUNC_CAP_RX_QUEUES:
......
......@@ -1830,7 +1830,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ret = i40e_aq_get_lldp_mib(&pf->hw,
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
I40E_AQ_LLDP_MIB_LOCAL,
I40E_AQ_LLDP_MIB_REMOTE,
buff, I40E_LLDPDU_SIZE,
&llen, &rlen, NULL);
if (ret) {
......
......@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 10
#define DRV_VERSION_BUILD 13
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
......@@ -304,8 +304,8 @@ static void i40e_tx_timeout(struct net_device *netdev)
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
set_bit(__I40E_DOWN, &vsi->state);
i40e_down(vsi);
set_bit(__I40E_DOWN_REQUESTED, &pf->state);
set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
break;
}
i40e_service_event_schedule(pf);
......@@ -444,9 +444,21 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
**/
void i40e_pf_reset_stats(struct i40e_pf *pf)
{
int i;
memset(&pf->stats, 0, sizeof(pf->stats));
memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
pf->stat_offsets_loaded = false;
for (i = 0; i < I40E_MAX_VEB; i++) {
if (pf->veb[i]) {
memset(&pf->veb[i]->stats, 0,
sizeof(pf->veb[i]->stats));
memset(&pf->veb[i]->stats_offsets, 0,
sizeof(pf->veb[i]->stats_offsets));
pf->veb[i]->stat_offsets_loaded = false;
}
}
}
/**
......@@ -3151,8 +3163,12 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
/* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
* group them so there are multiple queues per vector.
* It is also important to go through all the vectors available to be
* sure that if we don't use all the vectors, that the remaining vectors
* are cleared. This is especially important when decreasing the
* number of queues in use.
*/
for (; v_start < q_vectors && qp_remaining; v_start++) {
for (; v_start < q_vectors; v_start++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
......@@ -3227,6 +3243,35 @@ static void i40e_netpoll(struct net_device *netdev)
}
#endif
/**
* i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
* @pf: the PF being configured
* @pf_q: the PF queue
* @enable: enable or disable state of the queue
*
* This routine will wait for the given Tx queue of the PF to reach the
* enabled or disabled state.
* Returns -ETIMEDOUT in case of failing to reach the requested state after
* multiple retries; else will return 0 in case of success.
**/
static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
{
int i;
u32 tx_reg;
for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
break;
udelay(10);
}
if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
return 0;
}
/**
* i40e_vsi_control_tx - Start or stop a VSI's rings
* @vsi: the VSI being configured
......@@ -3236,7 +3281,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
int i, j, pf_q;
int i, j, pf_q, ret = 0;
u32 tx_reg;
pf_q = vsi->base_queue;
......@@ -3269,22 +3314,46 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
break;
udelay(10);
}
if (j >= 10) {
dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
pf_q, (enable ? "en" : "dis"));
return -ETIMEDOUT;
ret = i40e_pf_txq_wait(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: VSI seid %d Tx ring %d %sable timeout\n",
__func__, vsi->seid, pf_q,
(enable ? "en" : "dis"));
break;
}
}
if (hw->revision_id == 0)
mdelay(50);
return ret;
}
/**
* i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
* @pf: the PF being configured
* @pf_q: the PF queue
* @enable: enable or disable state of the queue
*
* This routine will wait for the given Rx queue of the PF to reach the
* enabled or disabled state.
* Returns -ETIMEDOUT in case of failing to reach the requested state after
* multiple retries; else will return 0 in case of success.
**/
static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
{
int i;
u32 rx_reg;
for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
break;
udelay(10);
}
if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
return -ETIMEDOUT;
return 0;
}
......@@ -3298,7 +3367,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
int i, j, pf_q;
int i, j, pf_q, ret = 0;
u32 rx_reg;
pf_q = vsi->base_queue;
......@@ -3323,22 +3392,17 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
break;
udelay(10);
}
if (j >= 10) {
dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
pf_q, (enable ? "en" : "dis"));
return -ETIMEDOUT;
ret = i40e_pf_rxq_wait(pf, pf_q, enable);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: VSI seid %d Rx ring %d %sable timeout\n",
__func__, vsi->seid, pf_q,
(enable ? "en" : "dis"));
break;
}
}
return 0;
return ret;
}
/**
......@@ -4638,6 +4702,23 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
}
}
/* no further action needed, so return now */
return;
} else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
int v;
/* Find the VSI(s) that needs to be brought down */
dev_info(&pf->pdev->dev, "VSI down requested\n");
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
set_bit(__I40E_DOWN, &vsi->state);
i40e_down(vsi);
clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
}
}
/* no further action needed, so return now */
return;
} else {
......@@ -5110,6 +5191,10 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
}
if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
reset_flags |= (1 << __I40E_DOWN_REQUESTED);
clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
}
/* If there's a recovery already waiting, it takes
* precedence before starting a new reset sequence.
......@@ -5463,6 +5548,20 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
struct i40e_vsi *vsi;
int i;
/* quick workaround for an NVM issue that leaves a critical register
* uninitialized
*/
if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
static const u32 hkey[] = {
0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
0x95b3a76d};
for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
}
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
......@@ -5744,26 +5843,28 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
/* find what triggered the MDD event */
reg = rd32(hw, I40E_GL_MDET_TX);
if (reg & I40E_GL_MDET_TX_VALID_MASK) {
u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
>> I40E_GL_MDET_TX_FUNCTION_SHIFT;
u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
>> I40E_GL_MDET_TX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
>> I40E_GL_MDET_TX_QUEUE_SHIFT;
u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
I40E_GL_MDET_TX_PF_NUM_SHIFT;
u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
I40E_GL_MDET_TX_VF_NUM_SHIFT;
u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
I40E_GL_MDET_TX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
I40E_GL_MDET_TX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
"Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
event, queue, func);
"Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
event, queue, pf_num, vf_num);
wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
mdd_detected = true;
}
reg = rd32(hw, I40E_GL_MDET_RX);
if (reg & I40E_GL_MDET_RX_VALID_MASK) {
u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
>> I40E_GL_MDET_RX_FUNCTION_SHIFT;
u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
>> I40E_GL_MDET_RX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
>> I40E_GL_MDET_RX_QUEUE_SHIFT;
u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
I40E_GL_MDET_RX_FUNCTION_SHIFT;
u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
I40E_GL_MDET_RX_EVENT_SHIFT;
u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
I40E_GL_MDET_RX_QUEUE_SHIFT;
dev_info(&pf->pdev->dev,
"Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
event, queue, func);
......@@ -5860,6 +5961,12 @@ static void i40e_service_task(struct work_struct *work)
service_task);
unsigned long start_time = jiffies;
/* don't bother with service tasks if a reset is in progress */
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
i40e_service_event_complete(pf);
return;
}
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
i40e_vc_process_vflr_event(pf);
......@@ -6492,6 +6599,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
u32 lut = 0;
int i, j;
u64 hena;
u32 reg_val;
/* Fill out hash function seed */
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
......@@ -6504,8 +6612,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
/* Check capability and Set table size and register per hw expectation*/
reg_val = rd32(hw, I40E_PFQF_CTL_0);
if (hw->func_caps.rss_table_size == 512) {
reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
pf->rss_table_size = 512;
} else {
pf->rss_table_size = 128;
reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
}
wr32(hw, I40E_PFQF_CTL_0, reg_val);
/* Populate the LUT with max no. of queues in round robin fashion */
for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
/* The assumption is that lan qp count will be the highest
* qp count for any PF VSI that needs RSS.
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -50,6 +50,9 @@
(d) == I40E_DEV_ID_QSFP_B || \
(d) == I40E_DEV_ID_QSFP_C)
/* I40E_MASK is a macro used on 32 bit registers */
#define I40E_MASK(mask, shift) (mask << shift)
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
......@@ -1162,4 +1165,7 @@ enum i40e_reset_type {
I40E_RESET_GLOBR = 2,
I40E_RESET_EMPR = 3,
};
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
#endif /* _I40E_TYPE_H_ */
......@@ -294,6 +294,10 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
i40e_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, hw->aq.asq.head, 0);
wr32(hw, hw->aq.asq.tail, 0);
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
......@@ -332,6 +336,10 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
i40e_status ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
wr32(hw, hw->aq.arq.head, 0);
wr32(hw, hw->aq.arq.tail, 0);
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
......@@ -630,6 +638,10 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"%s: ntc %d head %d.\n", __func__, ntc,
rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
(I40E_ADMINQ_CALLBACK)details->callback;
......@@ -690,6 +702,15 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
u32 val = 0;
val = rd32(hw, hw->aq.asq.head);
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
status = I40E_ERR_QUEUE_EMPTY;
goto asq_send_command_exit;
}
if (hw->aq.asq.count == 0) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
......@@ -783,6 +804,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
}
/* bump the tail */
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
(hw->aq.asq.next_to_use)++;
if (hw->aq.asq.next_to_use == hw->aq.asq.count)
......@@ -820,6 +842,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
I40E_DEBUG_AQ_MESSAGE,
"AQTX: Command completed with error 0x%X.\n",
retval);
/* strip off FW internal code */
retval &= 0xff;
}
......@@ -834,6 +857,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
if (i40e_is_nvm_update_op(desc))
hw->aq.nvm_busy = true;
if (le16_to_cpu(desc->datalen) == buff_size) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: desc and buffer writeback:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
}
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
......@@ -905,10 +934,6 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
/* now clean the next descriptor */
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
i40evf_debug_aq(hw,
I40E_DEBUG_AQ_COMMAND,
(void *)desc,
hw->aq.arq.r.arq_bi[desc_idx].va);
flags = le16_to_cpu(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
......@@ -931,6 +956,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
if (i40e_is_nvm_update_op(&e->desc))
hw->aq.nvm_busy = false;
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -50,6 +50,9 @@
(d) == I40E_DEV_ID_QSFP_B || \
(d) == I40E_DEV_ID_QSFP_C)
/* I40E_MASK is a macro used on 32 bit registers */
#define I40E_MASK(mask, shift) (mask << shift)
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
#define I40E_MAX_CHAINED_RX_BUFFERS 5
......@@ -1162,4 +1165,7 @@ enum i40e_reset_type {
I40E_RESET_GLOBR = 2,
I40E_RESET_EMPR = 3,
};
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
#endif /* _I40E_TYPE_H_ */
......@@ -632,7 +632,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
u32 hlut_val;
int i, j;
for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
indir[j++] = hlut_val & 0xff;
indir[j++] = (hlut_val >> 8) & 0xff;
......@@ -659,7 +659,7 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
u32 hlut_val;
int i, j;
for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
hlut_val = indir[j++];
hlut_val |= indir[j++] << 8;
hlut_val |= indir[j++] << 16;
......
......@@ -36,7 +36,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
#define DRV_VERSION "0.9.34"
#define DRV_VERSION "0.9.35"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment