Commit 54399a78 authored by David S. Miller's avatar David S. Miller

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
1. Fix potential badness when running a self-test with SR-IOV enabled.
2. Fix calculation of some interface statistics that could run backward.
3. Miscellaneous cleanup.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d3818c92 c2dbab39
...@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx) ...@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
* masks event though they reject 46 bit masks. * masks event though they reject 46 bit masks.
*/ */
while (dma_mask > 0x7fffffffUL) { while (dma_mask > 0x7fffffffUL) {
if (pci_dma_supported(pci_dev, dma_mask)) { if (dma_supported(&pci_dev->dev, dma_mask)) {
rc = pci_set_dma_mask(pci_dev, dma_mask); rc = dma_set_mask(&pci_dev->dev, dma_mask);
if (rc == 0) if (rc == 0)
break; break;
} }
...@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx) ...@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
} }
netif_dbg(efx, probe, efx->net_dev, netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask); "using DMA mask %llx\n", (unsigned long long) dma_mask);
rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
if (rc) { if (rc) {
/* pci_set_consistent_dma_mask() is not *allowed* to /* dma_set_coherent_mask() is not *allowed* to
* fail with a mask that pci_set_dma_mask() accepted, * fail with a mask that dma_set_mask() accepted,
* but just in case... * but just in case...
*/ */
netif_err(efx, probe, efx->net_dev, netif_err(efx, probe, efx->net_dev,
......
...@@ -136,10 +136,10 @@ enum efx_loopback_mode { ...@@ -136,10 +136,10 @@ enum efx_loopback_mode {
* *
* Reset methods are numbered in order of increasing scope. * Reset methods are numbered in order of increasing scope.
* *
* @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
* @RESET_TYPE_ALL: reset everything but PCI core blocks * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
* @RESET_TYPE_WORLD: reset everything, save & restore PCI config * @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_DISABLE: disable NIC * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error * @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
......
...@@ -453,7 +453,7 @@ static void efx_ethtool_get_strings(struct net_device *net_dev, ...@@ -453,7 +453,7 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) { switch (string_set) {
case ETH_SS_STATS: case ETH_SS_STATS:
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
strncpy(ethtool_strings[i].name, strlcpy(ethtool_strings[i].name,
efx_ethtool_stats[i].name, efx_ethtool_stats[i].name,
sizeof(ethtool_strings[i].name)); sizeof(ethtool_strings[i].name));
break; break;
......
...@@ -25,9 +25,12 @@ ...@@ -25,9 +25,12 @@
#include "io.h" #include "io.h"
#include "phy.h" #include "phy.h"
#include "workarounds.h" #include "workarounds.h"
#include "selftest.h"
/* Hardware control for SFC4000 (aka Falcon). */ /* Hardware control for SFC4000 (aka Falcon). */
static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
static const unsigned int static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar /* "Large" EEPROM device: Atmel AT25640 or similar
* 8 KB, 16-bit address, 32 B write block */ * 8 KB, 16-bit address, 32 B write block */
...@@ -1034,10 +1037,34 @@ static const struct efx_nic_register_test falcon_b0_register_tests[] = { ...@@ -1034,10 +1037,34 @@ static const struct efx_nic_register_test falcon_b0_register_tests[] = {
EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
}; };
static int falcon_b0_test_registers(struct efx_nic *efx) static int
falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{ {
return efx_nic_test_registers(efx, falcon_b0_register_tests, enum reset_type reset_method = RESET_TYPE_INVISIBLE;
ARRAY_SIZE(falcon_b0_register_tests)); int rc, rc2;
mutex_lock(&efx->mac_lock);
if (efx->loopback_modes) {
/* We need the 312 clock from the PHY to test the XMAC
* registers, so move into XGMII loopback if available */
if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
efx->loopback_mode = LOOPBACK_XGMII;
else
efx->loopback_mode = __ffs(efx->loopback_modes);
}
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
efx_reset_down(efx, reset_method);
tests->registers =
efx_nic_test_registers(efx, falcon_b0_register_tests,
ARRAY_SIZE(falcon_b0_register_tests))
? -1 : 1;
rc = falcon_reset_hw(efx, reset_method);
rc2 = efx_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
} }
/************************************************************************** /**************************************************************************
...@@ -1818,7 +1845,7 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -1818,7 +1845,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.get_wol = falcon_get_wol, .get_wol = falcon_get_wol,
.set_wol = falcon_set_wol, .set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void, .resume_wol = efx_port_dummy_op_void,
.test_registers = falcon_b0_test_registers, .test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram, .test_nvram = falcon_test_nvram,
.revision = EFX_REV_FALCON_B0, .revision = EFX_REV_FALCON_B0,
......
...@@ -341,12 +341,12 @@ void falcon_update_stats_xmac(struct efx_nic *efx) ...@@ -341,12 +341,12 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
/* Update derived statistics */ /* Update derived statistics */
mac_stats->tx_good_bytes = efx_update_diff_stat(&mac_stats->tx_good_bytes,
(mac_stats->tx_bytes - mac_stats->tx_bad_bytes - mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
mac_stats->tx_control * 64); mac_stats->tx_control * 64);
mac_stats->rx_bad_bytes = efx_update_diff_stat(&mac_stats->rx_bad_bytes,
(mac_stats->rx_bytes - mac_stats->rx_good_bytes - mac_stats->rx_bytes - mac_stats->rx_good_bytes -
mac_stats->rx_control * 64); mac_stats->rx_control * 64);
} }
void falcon_poll_xmac(struct efx_nic *efx) void falcon_poll_xmac(struct efx_nic *efx)
......
...@@ -662,7 +662,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, ...@@ -662,7 +662,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
struct efx_filter_table *table = efx_filter_spec_table(state, spec); struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec; struct efx_filter_spec *saved_spec;
efx_oword_t filter; efx_oword_t filter;
unsigned int filter_idx, depth; unsigned int filter_idx, depth = 0;
u32 key; u32 key;
int rc; int rc;
......
...@@ -1001,12 +1001,17 @@ static void efx_mcdi_exit_assertion(struct efx_nic *efx) ...@@ -1001,12 +1001,17 @@ static void efx_mcdi_exit_assertion(struct efx_nic *efx)
{ {
u8 inbuf[MC_CMD_REBOOT_IN_LEN]; u8 inbuf[MC_CMD_REBOOT_IN_LEN];
/* Atomically reboot the mcfw out of the assertion handler */ /* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
* reboot. We set a flag that makes the command a no-op if it
* has already done so. We don't know what return code to
* expect (0 or -EIO), so ignore it.
*/
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
NULL, 0, NULL); NULL, 0, NULL);
} }
int efx_mcdi_handle_assertion(struct efx_nic *efx) int efx_mcdi_handle_assertion(struct efx_nic *efx)
......
...@@ -68,6 +68,8 @@ ...@@ -68,6 +68,8 @@
#define EFX_TXQ_TYPES 4 #define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
struct efx_self_tests;
/** /**
* struct efx_special_buffer - An Efx special buffer * struct efx_special_buffer - An Efx special buffer
* @addr: CPU base address of the buffer * @addr: CPU base address of the buffer
...@@ -100,7 +102,7 @@ struct efx_special_buffer { ...@@ -100,7 +102,7 @@ struct efx_special_buffer {
* @len: Length of this fragment. * @len: Length of this fragment.
* This field is zero when the queue slot is empty. * This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet. * @continuation: True if this fragment is not the end of a packet.
* @unmap_single: True if pci_unmap_single should be used. * @unmap_single: True if dma_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap * @unmap_len: Length of this fragment to unmap
*/ */
struct efx_tx_buffer { struct efx_tx_buffer {
...@@ -901,7 +903,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -901,7 +903,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @get_wol: Get WoL configuration from driver state * @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC * @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
* @test_registers: Test read/write functionality of control registers * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
* expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents * @test_nvram: Test validity of NVRAM contents
* @revision: Hardware architecture revision * @revision: Hardware architecture revision
* @mem_map_size: Memory BAR mapped size * @mem_map_size: Memory BAR mapped size
...@@ -946,7 +949,7 @@ struct efx_nic_type { ...@@ -946,7 +949,7 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type); int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx); void (*resume_wol)(struct efx_nic *efx);
int (*test_registers)(struct efx_nic *efx); int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx); int (*test_nvram)(struct efx_nic *efx);
int revision; int revision;
......
...@@ -124,9 +124,6 @@ int efx_nic_test_registers(struct efx_nic *efx, ...@@ -124,9 +124,6 @@ int efx_nic_test_registers(struct efx_nic *efx,
unsigned address = 0, i, j; unsigned address = 0, i, j;
efx_oword_t mask, imask, original, reg, buf; efx_oword_t mask, imask, original, reg, buf;
/* Falcon should be in loopback to isolate the XMAC from the PHY */
WARN_ON(!LOOPBACK_INTERNAL(efx));
for (i = 0; i < n_regs; ++i) { for (i = 0; i < n_regs; ++i) {
address = regs[i].address; address = regs[i].address;
mask = imask = regs[i].mask; mask = imask = regs[i].mask;
...@@ -308,8 +305,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) ...@@ -308,8 +305,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len) unsigned int len)
{ {
buffer->addr = pci_alloc_consistent(efx->pci_dev, len, buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
&buffer->dma_addr); &buffer->dma_addr, GFP_ATOMIC);
if (!buffer->addr) if (!buffer->addr)
return -ENOMEM; return -ENOMEM;
buffer->len = len; buffer->len = len;
...@@ -320,8 +317,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, ...@@ -320,8 +317,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
{ {
if (buffer->addr) { if (buffer->addr) {
pci_free_consistent(efx->pci_dev, buffer->len, dma_free_coherent(&efx->pci_dev->dev, buffer->len,
buffer->addr, buffer->dma_addr); buffer->addr, buffer->dma_addr);
buffer->addr = NULL; buffer->addr = NULL;
} }
} }
......
...@@ -294,6 +294,24 @@ extern bool falcon_xmac_check_fault(struct efx_nic *efx); ...@@ -294,6 +294,24 @@ extern bool falcon_xmac_check_fault(struct efx_nic *efx);
extern int falcon_reconfigure_xmac(struct efx_nic *efx); extern int falcon_reconfigure_xmac(struct efx_nic *efx);
extern void falcon_update_stats_xmac(struct efx_nic *efx); extern void falcon_update_stats_xmac(struct efx_nic *efx);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
* asynchronously. If the counters contributing to B are always read
* after those contributing to A, the computed value may be lower than
* the true value by some variable amount, and may decrease between
* subsequent computations.
*
* We should never allow statistics to decrease or to exceed the true
* value. Since the computed value will never be greater than the
* true value, we can achieve this by only storing the computed value
* when it increases.
*/
static inline void efx_update_diff_stat(u64 *stat, u64 diff)
{
if ((s64)(diff - *stat) > 0)
*stat = diff;
}
/* Interrupts and test events */ /* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx); extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx); extern void efx_nic_enable_interrupts(struct efx_nic *efx);
......
...@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) ...@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
rx_buf->len = skb_len - NET_IP_ALIGN; rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->flags = 0; rx_buf->flags = 0;
rx_buf->dma_addr = pci_map_single(efx->pci_dev, rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
skb->data, rx_buf->len, skb->data, rx_buf->len,
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev, if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
rx_buf->dma_addr))) { rx_buf->dma_addr))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
rx_buf->u.skb = NULL; rx_buf->u.skb = NULL;
return -EIO; return -EIO;
...@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
efx->rx_buffer_order); efx->rx_buffer_order);
if (unlikely(page == NULL)) if (unlikely(page == NULL))
return -ENOMEM; return -ENOMEM;
dma_addr = pci_map_page(efx->pci_dev, page, 0, dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
efx_rx_buf_size(efx), efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order); __free_pages(page, efx->rx_buffer_order);
return -EIO; return -EIO;
} }
...@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, ...@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state = page_address(rx_buf->u.page); state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) { if (--state->refcnt == 0) {
pci_unmap_page(efx->pci_dev, dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr, state->dma_addr,
efx_rx_buf_size(efx), efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE); DMA_FROM_DEVICE);
} }
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
rx_buf->len, PCI_DMA_FROMDEVICE); rx_buf->len, DMA_FROM_DEVICE);
} }
} }
......
...@@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) ...@@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
return rc; return rc;
} }
static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
/* Test register access */
if (efx->type->test_registers) {
rc = efx->type->test_registers(efx);
tests->registers = rc ? -1 : 1;
}
return rc;
}
/************************************************************************** /**************************************************************************
* *
* Interrupt and event queue testing * Interrupt and event queue testing
...@@ -488,7 +475,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, ...@@ -488,7 +475,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
skb = state->skbs[i]; skb = state->skbs[i];
if (skb && !skb_shared(skb)) if (skb && !skb_shared(skb))
++tx_done; ++tx_done;
dev_kfree_skb_any(skb); dev_kfree_skb(skb);
} }
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
...@@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
{ {
enum efx_loopback_mode loopback_mode = efx->loopback_mode; enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode; int phy_mode = efx->phy_mode;
enum reset_type reset_method = RESET_TYPE_INVISIBLE; int rc_test = 0, rc_reset, rc;
int rc_test = 0, rc_reset = 0, rc;
efx_selftest_async_cancel(efx); efx_selftest_async_cancel(efx);
...@@ -737,44 +723,26 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -737,44 +723,26 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
*/ */
netif_device_detach(efx->net_dev); netif_device_detach(efx->net_dev);
mutex_lock(&efx->mac_lock); if (efx->type->test_chip) {
if (efx->loopback_modes) { rc_reset = efx->type->test_chip(efx, tests);
/* We need the 312 clock from the PHY to test the XMAC if (rc_reset) {
* registers, so move into XGMII loopback if available */ netif_err(efx, hw, efx->net_dev,
if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) "Unable to recover from chip test\n");
efx->loopback_mode = LOOPBACK_XGMII; efx_schedule_reset(efx, RESET_TYPE_DISABLE);
else return rc_reset;
efx->loopback_mode = __ffs(efx->loopback_modes); }
}
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
/* free up all consumers of SRAM (including all the queues) */
efx_reset_down(efx, reset_method);
rc = efx_test_chip(efx, tests);
if (rc && !rc_test)
rc_test = rc;
/* reset the chip to recover from the register test */ if ((tests->registers < 0) && !rc_test)
rc_reset = efx->type->reset(efx, reset_method); rc_test = -EIO;
}
/* Ensure that the phy is powered and out of loopback /* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */ * for the bist and loopback tests */
mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER; efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE; efx->loopback_mode = LOOPBACK_NONE;
__efx_reconfigure_port(efx);
rc = efx_reset_up(efx, reset_method, rc_reset == 0); mutex_unlock(&efx->mac_lock);
if (rc && !rc_reset)
rc_reset = rc;
if (rc_reset) {
netif_err(efx, drv, efx->net_dev,
"Unable to recover from chip test\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
return rc_reset;
}
rc = efx_test_phy(efx, tests, flags); rc = efx_test_phy(efx, tests, flags);
if (rc && !rc_test) if (rc && !rc_test)
......
...@@ -25,10 +25,12 @@ ...@@ -25,10 +25,12 @@
#include "workarounds.h" #include "workarounds.h"
#include "mcdi.h" #include "mcdi.h"
#include "mcdi_pcol.h" #include "mcdi_pcol.h"
#include "selftest.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx); static void siena_init_wol(struct efx_nic *efx);
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel) static void siena_push_irq_moderation(struct efx_channel *channel)
...@@ -154,10 +156,29 @@ static const struct efx_nic_register_test siena_register_tests[] = { ...@@ -154,10 +156,29 @@ static const struct efx_nic_register_test siena_register_tests[] = {
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
}; };
static int siena_test_registers(struct efx_nic *efx) static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{ {
return efx_nic_test_registers(efx, siena_register_tests, enum reset_type reset_method = reset_method;
ARRAY_SIZE(siena_register_tests)); int rc, rc2;
efx_reset_down(efx, reset_method);
/* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does.
*/
rc = siena_reset_hw(efx, reset_method);
if (rc)
goto out;
tests->registers =
efx_nic_test_registers(efx, siena_register_tests,
ARRAY_SIZE(siena_register_tests))
? -1 : 1;
rc = siena_reset_hw(efx, reset_method);
out:
rc2 = efx_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
} }
/************************************************************************** /**************************************************************************
...@@ -437,8 +458,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) ...@@ -437,8 +458,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(tx_bytes, TX_BYTES); MAC_STAT(tx_bytes, TX_BYTES);
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
mac_stats->tx_good_bytes = (mac_stats->tx_bytes - efx_update_diff_stat(&mac_stats->tx_good_bytes,
mac_stats->tx_bad_bytes); mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
MAC_STAT(tx_packets, TX_PKTS); MAC_STAT(tx_packets, TX_PKTS);
MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
MAC_STAT(tx_pause, TX_PAUSE_PKTS); MAC_STAT(tx_pause, TX_PAUSE_PKTS);
...@@ -471,8 +492,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx) ...@@ -471,8 +492,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
MAC_STAT(rx_bytes, RX_BYTES); MAC_STAT(rx_bytes, RX_BYTES);
MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
mac_stats->rx_good_bytes = (mac_stats->rx_bytes - efx_update_diff_stat(&mac_stats->rx_good_bytes,
mac_stats->rx_bad_bytes); mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS); MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS); MAC_STAT(rx_good, RX_GOOD_PKTS);
MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
...@@ -649,7 +670,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -649,7 +670,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.get_wol = siena_get_wol, .get_wol = siena_get_wol,
.set_wol = siena_set_wol, .set_wol = siena_set_wol,
.resume_wol = siena_init_wol, .resume_wol = siena_init_wol,
.test_registers = siena_test_registers, .test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all, .test_nvram = efx_mcdi_nvram_test_all,
.revision = EFX_REV_SIENA_A0, .revision = EFX_REV_SIENA_A0,
......
...@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, ...@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned int *bytes_compl) unsigned int *bytes_compl)
{ {
if (buffer->unmap_len) { if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev; struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len); buffer->unmap_len);
if (buffer->unmap_single) if (buffer->unmap_single)
pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
else else
pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
buffer->unmap_len = 0; buffer->unmap_len = 0;
buffer->unmap_single = false; buffer->unmap_single = false;
} }
...@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) ...@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{ {
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
struct pci_dev *pci_dev = efx->pci_dev; struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
skb_frag_t *fragment; skb_frag_t *fragment;
unsigned int len, unmap_len = 0, fill_level, insert_ptr; unsigned int len, unmap_len = 0, fill_level, insert_ptr;
...@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
fill_level = tx_queue->insert_count - tx_queue->old_read_count; fill_level = tx_queue->insert_count - tx_queue->old_read_count;
q_space = efx->txq_entries - 1 - fill_level; q_space = efx->txq_entries - 1 - fill_level;
/* Map for DMA. Use pci_map_single rather than pci_map_page /* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse * since this is more efficient on machines with sparse
* memory. * memory.
*/ */
unmap_single = true; unmap_single = true;
dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
/* Process all fragments */ /* Process all fragments */
while (1) { while (1) {
if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
goto pci_err; goto dma_err;
/* Store fields for marking in the per-fragment final /* Store fields for marking in the per-fragment final
* descriptor */ * descriptor */
...@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
i++; i++;
/* Map for DMA */ /* Map for DMA */
unmap_single = false; unmap_single = false;
dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len, dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
...@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK; return NETDEV_TX_OK;
pci_err: dma_err:
netif_err(efx, tx_err, efx->net_dev, netif_err(efx, tx_err, efx->net_dev,
" TX queue %d could not map skb with %d bytes %d " " TX queue %d could not map skb with %d bytes %d "
"fragments for DMA\n", tx_queue->queue, skb->len, "fragments for DMA\n", tx_queue->queue, skb->len,
...@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Free the fragment we were mid-way through pushing */ /* Free the fragment we were mid-way through pushing */
if (unmap_len) { if (unmap_len) {
if (unmap_single) if (unmap_single)
pci_unmap_single(pci_dev, unmap_addr, unmap_len, dma_unmap_single(dma_dev, unmap_addr, unmap_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
else else
pci_unmap_page(pci_dev, unmap_addr, unmap_len, dma_unmap_page(dma_dev, unmap_addr, unmap_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
} }
return rc; return rc;
...@@ -651,17 +651,8 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) ...@@ -651,17 +651,8 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
protocol); protocol);
if (protocol == htons(ETH_P_8021Q)) { if (protocol == htons(ETH_P_8021Q)) {
/* Find the encapsulated protocol; reset network header
* and transport header based on that. */
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto; protocol = veh->h_vlan_encapsulated_proto;
skb_set_network_header(skb, sizeof(*veh));
if (protocol == htons(ETH_P_IP))
skb_set_transport_header(skb, sizeof(*veh) +
4 * ip_hdr(skb)->ihl);
else if (protocol == htons(ETH_P_IPV6))
skb_set_transport_header(skb, sizeof(*veh) +
sizeof(struct ipv6hdr));
} }
if (protocol == htons(ETH_P_IP)) { if (protocol == htons(ETH_P_IP)) {
...@@ -684,20 +675,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) ...@@ -684,20 +675,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
*/ */
static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
{ {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
struct efx_tso_header *tsoh; struct efx_tso_header *tsoh;
dma_addr_t dma_addr; dma_addr_t dma_addr;
u8 *base_kva, *kva; u8 *base_kva, *kva;
base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
if (base_kva == NULL) { if (base_kva == NULL) {
netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
"Unable to allocate page for TSO headers\n"); "Unable to allocate page for TSO headers\n");
return -ENOMEM; return -ENOMEM;
} }
/* pci_alloc_consistent() allocates pages. */ /* dma_alloc_coherent() allocates pages. */
EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
...@@ -714,7 +704,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) ...@@ -714,7 +704,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
/* Free up a TSO header, and all others in the same page. */ /* Free up a TSO header, and all others in the same page. */
static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
struct efx_tso_header *tsoh, struct efx_tso_header *tsoh,
struct pci_dev *pci_dev) struct device *dma_dev)
{ {
struct efx_tso_header **p; struct efx_tso_header **p;
unsigned long base_kva; unsigned long base_kva;
...@@ -731,7 +721,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, ...@@ -731,7 +721,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
p = &(*p)->next; p = &(*p)->next;
} }
pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
} }
static struct efx_tso_header * static struct efx_tso_header *
...@@ -743,11 +733,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) ...@@ -743,11 +733,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
if (unlikely(!tsoh)) if (unlikely(!tsoh))
return NULL; return NULL;
tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
TSOH_BUFFER(tsoh), header_len, TSOH_BUFFER(tsoh), header_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
tsoh->dma_addr))) { tsoh->dma_addr))) {
kfree(tsoh); kfree(tsoh);
return NULL; return NULL;
} }
...@@ -759,9 +749,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) ...@@ -759,9 +749,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
static void static void
efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
{ {
pci_unmap_single(tx_queue->efx->pci_dev, dma_unmap_single(&tx_queue->efx->pci_dev->dev,
tsoh->dma_addr, tsoh->unmap_len, tsoh->dma_addr, tsoh->unmap_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
kfree(tsoh); kfree(tsoh);
} }
...@@ -892,13 +882,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) ...@@ -892,13 +882,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
unmap_addr = (buffer->dma_addr + buffer->len - unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len); buffer->unmap_len);
if (buffer->unmap_single) if (buffer->unmap_single)
pci_unmap_single(tx_queue->efx->pci_dev, dma_unmap_single(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len, unmap_addr, buffer->unmap_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
else else
pci_unmap_page(tx_queue->efx->pci_dev, dma_unmap_page(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len, unmap_addr, buffer->unmap_len,
PCI_DMA_TODEVICE); DMA_TO_DEVICE);
buffer->unmap_len = 0; buffer->unmap_len = 0;
} }
buffer->len = 0; buffer->len = 0;
...@@ -927,7 +917,6 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb) ...@@ -927,7 +917,6 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
st->packet_space = st->full_packet_size;
st->out_len = skb->len - st->header_len; st->out_len = skb->len - st->header_len;
st->unmap_len = 0; st->unmap_len = 0;
st->unmap_single = false; st->unmap_single = false;
...@@ -954,9 +943,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, ...@@ -954,9 +943,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
int hl = st->header_len; int hl = st->header_len;
int len = skb_headlen(skb) - hl; int len = skb_headlen(skb) - hl;
st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
len, PCI_DMA_TODEVICE); len, DMA_TO_DEVICE);
if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = true; st->unmap_single = true;
st->unmap_len = len; st->unmap_len = len;
st->in_len = len; st->in_len = len;
...@@ -1008,7 +997,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, ...@@ -1008,7 +997,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
buffer->continuation = !end_of_packet; buffer->continuation = !end_of_packet;
if (st->in_len == 0) { if (st->in_len == 0) {
/* Transfer ownership of the pci mapping */ /* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len; buffer->unmap_len = st->unmap_len;
buffer->unmap_single = st->unmap_single; buffer->unmap_single = st->unmap_single;
st->unmap_len = 0; st->unmap_len = 0;
...@@ -1181,18 +1170,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, ...@@ -1181,18 +1170,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
mem_err: mem_err:
netif_err(efx, tx_err, efx->net_dev, netif_err(efx, tx_err, efx->net_dev,
"Out of memory for TSO headers, or PCI mapping error\n"); "Out of memory for TSO headers, or DMA mapping error\n");
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
unwind: unwind:
/* Free the DMA mapping we were in the process of writing out */ /* Free the DMA mapping we were in the process of writing out */
if (state.unmap_len) { if (state.unmap_len) {
if (state.unmap_single) if (state.unmap_single)
pci_unmap_single(efx->pci_dev, state.unmap_addr, dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
state.unmap_len, PCI_DMA_TODEVICE); state.unmap_len, DMA_TO_DEVICE);
else else
pci_unmap_page(efx->pci_dev, state.unmap_addr, dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
state.unmap_len, PCI_DMA_TODEVICE); state.unmap_len, DMA_TO_DEVICE);
} }
efx_enqueue_unwind(tx_queue); efx_enqueue_unwind(tx_queue);
...@@ -1216,5 +1205,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue) ...@@ -1216,5 +1205,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
while (tx_queue->tso_headers_free != NULL) while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
tx_queue->efx->pci_dev); &tx_queue->efx->pci_dev->dev);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment