Commit 514bedbc authored by Ben Hutchings's avatar Ben Hutchings

sfc: Remove efx_process_channel_now()

efx_process_channel_now() is unneeded since self-tests can rely on
normal NAPI polling.  Remove it and all calls to it.

efx_channel::work_pending and efx_channel_processed() are also
unneeded (the latter being the same as efx_nic_eventq_read_ack()).
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent 8b8a95a1
......@@ -255,23 +255,6 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
return spent;
}
/* Mark channel as finished processing
*
* Note that since we will not receive further interrupts for this
* channel before we finish processing and call the eventq_read_ack()
* method, there is no need to use the interrupt hold-off timers.
*/
static inline void efx_channel_processed(struct efx_channel *channel)
{
/* The interrupt handler for this channel may set work_pending
* as soon as we acknowledge the events we've seen. Make sure
* it's cleared before then. */
channel->work_pending = false;
smp_wmb();
efx_nic_eventq_read_ack(channel);
}
/* NAPI poll handler
*
* NAPI guarantees serialisation of polls of the same device, which
......@@ -316,58 +299,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
* since efx_channel_processed() will have no effect if
* since efx_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
napi_complete(napi);
efx_channel_processed(channel);
efx_nic_eventq_read_ack(channel);
}
return spent;
}
/* Process the eventq of the specified channel immediately on this CPU
*
* Disable hardware generated interrupts, wait for any existing
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
* This is for use only during a loopback self-test. It must not
* deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
if (channel->irq)
synchronize_irq(channel->irq);
/* Wait for any NAPI processing to complete */
napi_disable(&channel->napi_str);
/* Poll the channel */
efx_process_channel(channel, channel->eventq_mask + 1);
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
......@@ -407,11 +348,7 @@ static void efx_start_eventq(struct efx_channel *channel)
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
* then. Similarly, make sure it sees the enabled flag set.
*/
channel->work_pending = false;
/* Make sure the NAPI handler sees the enabled flag set */
channel->enabled = true;
smp_wmb();
......
......@@ -109,7 +109,6 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
/* Channels */
extern int efx_channel_dummy_op_int(struct efx_channel *channel);
extern void efx_channel_dummy_op_void(struct efx_channel *channel);
extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
......@@ -155,7 +154,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
channel->work_pending = true;
napi_schedule(&channel->napi_str);
}
......
......@@ -361,7 +361,6 @@ enum efx_rx_alloc_method {
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
......@@ -393,7 +392,6 @@ struct efx_channel {
unsigned int irq_moderation;
struct net_device *napi_dev;
struct napi_struct napi_str;
bool work_pending;
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
......
......@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
static int efx_poll_loopback(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_channel *channel;
/* NAPI polling is not enabled, so process channels
* synchronously */
efx_for_each_channel(channel, efx) {
if (channel->work_pending)
efx_process_channel_now(channel);
}
return atomic_read(&state->rx_good) == state->packet_count;
}
......@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
} else {
struct efx_channel *channel = efx_get_channel(efx, 0);
if (channel->work_pending)
efx_process_channel_now(channel);
}
mutex_lock(&efx->mac_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment