Commit 7f9e4b2a authored by Martin Habets's avatar Martin Habets Committed by Jakub Kicinski

sfc/siena: Rename RX/TX functions to avoid conflicts with sfc

For siena use efx_siena_ as the function prefix.
Several functions are not used in Siena, so they are removed.

Use a Siena specific variable name for module parameter
efx_separate_tx_channels.
Move efx_fini_tx_queue() to avoid a forward declaration of
efx_dequeue_buffer().
Signed-off-by: default avatarMartin Habets <habetsm.xilinx@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 71ad88f6
...@@ -58,8 +58,9 @@ MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); ...@@ -58,8 +58,9 @@ MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
* *
* This is only used in MSI-X interrupt mode * This is only used in MSI-X interrupt mode
*/ */
bool efx_separate_tx_channels; bool efx_siena_separate_tx_channels;
module_param(efx_separate_tx_channels, bool, 0444); module_param_named(efx_separate_tx_channels, efx_siena_separate_tx_channels,
bool, 0444);
MODULE_PARM_DESC(efx_separate_tx_channels, MODULE_PARM_DESC(efx_separate_tx_channels,
"Use separate channels for TX and RX"); "Use separate channels for TX and RX");
...@@ -306,7 +307,7 @@ static int efx_probe_nic(struct efx_nic *efx) ...@@ -306,7 +307,7 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1) if (efx->n_channels > 1)
netdev_rss_key_fill(efx->rss_context.rx_hash_key, netdev_rss_key_fill(efx->rss_context.rx_hash_key,
sizeof(efx->rss_context.rx_hash_key)); sizeof(efx->rss_context.rx_hash_key));
efx_set_default_rx_indir_table(efx, &efx->rss_context); efx_siena_set_default_rx_indir_table(efx, &efx->rss_context);
/* Initialise the interrupt moderation settings */ /* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
...@@ -366,7 +367,7 @@ static int efx_probe_all(struct efx_nic *efx) ...@@ -366,7 +367,7 @@ static int efx_probe_all(struct efx_nic *efx)
" VFs may not function\n", rc); " VFs may not function\n", rc);
#endif #endif
rc = efx_probe_filters(efx); rc = efx_siena_probe_filters(efx);
if (rc) { if (rc) {
netif_err(efx, probe, efx->net_dev, netif_err(efx, probe, efx->net_dev,
"failed to create filter tables\n"); "failed to create filter tables\n");
...@@ -380,7 +381,7 @@ static int efx_probe_all(struct efx_nic *efx) ...@@ -380,7 +381,7 @@ static int efx_probe_all(struct efx_nic *efx)
return 0; return 0;
fail5: fail5:
efx_remove_filters(efx); efx_siena_remove_filters(efx);
fail4: fail4:
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
efx->type->vswitching_remove(efx); efx->type->vswitching_remove(efx);
...@@ -400,7 +401,7 @@ static void efx_remove_all(struct efx_nic *efx) ...@@ -400,7 +401,7 @@ static void efx_remove_all(struct efx_nic *efx)
rtnl_unlock(); rtnl_unlock();
efx_siena_remove_channels(efx); efx_siena_remove_channels(efx);
efx_remove_filters(efx); efx_siena_remove_filters(efx);
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
efx->type->vswitching_remove(efx); efx->type->vswitching_remove(efx);
#endif #endif
...@@ -602,7 +603,7 @@ static const struct net_device_ops efx_netdev_ops = { ...@@ -602,7 +603,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_get_phys_port_name = efx_siena_get_phys_port_name, .ndo_get_phys_port_name = efx_siena_get_phys_port_name,
.ndo_setup_tc = efx_siena_setup_tc, .ndo_setup_tc = efx_siena_setup_tc,
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs, .ndo_rx_flow_steer = efx_siena_filter_rfs,
#endif #endif
.ndo_xdp_xmit = efx_xdp_xmit, .ndo_xdp_xmit = efx_xdp_xmit,
.ndo_bpf = efx_xdp .ndo_bpf = efx_xdp
......
...@@ -44,7 +44,7 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel) ...@@ -44,7 +44,7 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
* TSO skbs. * TSO skbs.
*/ */
#define EFX_RXQ_MIN_ENT 128U #define EFX_RXQ_MIN_ENT 128U
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) #define EFX_TXQ_MIN_ENT(efx) (2 * efx_siena_tx_max_skb_descs(efx))
/* All EF10 architecture NICs steal one bit of the DMAQ size for various /* All EF10 architecture NICs steal one bit of the DMAQ size for various
* other purposes when counting TxQ entries, so we halve the queue size. * other purposes when counting TxQ entries, so we halve the queue size.
...@@ -78,7 +78,7 @@ static inline bool efx_rss_enabled(struct efx_nic *efx) ...@@ -78,7 +78,7 @@ static inline bool efx_rss_enabled(struct efx_nic *efx)
* *
* 2. If the existing filters have higher priority, return -%EPERM. * 2. If the existing filters have higher priority, return -%EPERM.
* *
* 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not * 3. If !efx_siena_filter_is_mc_recipient(@spec), or the NIC does not
* support delivery to multiple recipients, return -%EEXIST. * support delivery to multiple recipients, return -%EEXIST.
* *
* This implies that filters for multiple multicast recipients must * This implies that filters for multiple multicast recipients must
......
...@@ -138,7 +138,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, ...@@ -138,7 +138,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
int n_xdp_tx; int n_xdp_tx;
int n_xdp_ev; int n_xdp_ev;
if (efx_separate_tx_channels) if (efx_siena_separate_tx_channels)
n_channels *= 2; n_channels *= 2;
n_channels += extra_channels; n_channels += extra_channels;
...@@ -220,7 +220,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, ...@@ -220,7 +220,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
/* Ignore XDP tx channels when creating rx channels. */ /* Ignore XDP tx channels when creating rx channels. */
n_channels -= efx->n_xdp_channels; n_channels -= efx->n_xdp_channels;
if (efx_separate_tx_channels) { if (efx_siena_separate_tx_channels) {
efx->n_tx_channels = efx->n_tx_channels =
min(max(n_channels / 2, 1U), min(max(n_channels / 2, 1U),
efx->max_tx_channels); efx->max_tx_channels);
...@@ -321,7 +321,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx) ...@@ -321,7 +321,7 @@ int efx_siena_probe_interrupts(struct efx_nic *efx)
/* Assume legacy interrupts */ /* Assume legacy interrupts */
if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0); efx->n_channels = 1 + (efx_siena_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1; efx->n_rx_channels = 1;
efx->n_tx_channels = 1; efx->n_tx_channels = 1;
efx->n_xdp_channels = 0; efx->n_xdp_channels = 0;
...@@ -521,7 +521,8 @@ static void efx_filter_rfs_expire(struct work_struct *data) ...@@ -521,7 +521,8 @@ static void efx_filter_rfs_expire(struct work_struct *data)
channel = container_of(dwork, struct efx_channel, filter_work); channel = container_of(dwork, struct efx_channel, filter_work);
time = jiffies - channel->rfs_last_expiry; time = jiffies - channel->rfs_last_expiry;
quota = channel->rfs_filter_count * time / (30 * HZ); quota = channel->rfs_filter_count * time / (30 * HZ);
if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota))) if (quota >= 20 && __efx_siena_filter_rfs_expire(channel,
min(channel->rfs_filter_count, quota)))
channel->rfs_last_expiry += time; channel->rfs_last_expiry += time;
/* Ensure we do more work eventually even if NAPI poll is not happening */ /* Ensure we do more work eventually even if NAPI poll is not happening */
schedule_delayed_work(dwork, 30 * HZ); schedule_delayed_work(dwork, 30 * HZ);
...@@ -558,7 +559,7 @@ static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i) ...@@ -558,7 +559,7 @@ static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
rx_queue = &channel->rx_queue; rx_queue = &channel->rx_queue;
rx_queue->efx = efx; rx_queue->efx = efx;
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
return channel; return channel;
} }
...@@ -631,7 +632,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel) ...@@ -631,7 +632,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
rx_queue = &channel->rx_queue; rx_queue = &channel->rx_queue;
rx_queue->buffer = NULL; rx_queue->buffer = NULL;
memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); timer_setup(&rx_queue->slow_fill, efx_siena_rx_slow_fill, 0);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire); INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
#endif #endif
...@@ -657,13 +658,13 @@ static int efx_probe_channel(struct efx_channel *channel) ...@@ -657,13 +658,13 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail; goto fail;
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel) {
rc = efx_probe_tx_queue(tx_queue); rc = efx_siena_probe_tx_queue(tx_queue);
if (rc) if (rc)
goto fail; goto fail;
} }
efx_for_each_channel_rx_queue(rx_queue, channel) { efx_for_each_channel_rx_queue(rx_queue, channel) {
rc = efx_probe_rx_queue(rx_queue); rc = efx_siena_probe_rx_queue(rx_queue);
if (rc) if (rc)
goto fail; goto fail;
} }
...@@ -751,9 +752,9 @@ void efx_siena_remove_channel(struct efx_channel *channel) ...@@ -751,9 +752,9 @@ void efx_siena_remove_channel(struct efx_channel *channel)
"destroy chan %d\n", channel->channel); "destroy chan %d\n", channel->channel);
efx_for_each_channel_rx_queue(rx_queue, channel) efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue); efx_siena_remove_rx_queue(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue); efx_siena_remove_tx_queue(tx_queue);
efx_remove_eventq(channel); efx_remove_eventq(channel);
channel->type->post_remove(channel); channel->type->post_remove(channel);
} }
...@@ -963,7 +964,7 @@ int efx_siena_set_channels(struct efx_nic *efx) ...@@ -963,7 +964,7 @@ int efx_siena_set_channels(struct efx_nic *efx)
int rc; int rc;
efx->tx_channel_offset = efx->tx_channel_offset =
efx_separate_tx_channels ? efx_siena_separate_tx_channels ?
efx->n_channels - efx->n_tx_channels : 0; efx->n_channels - efx->n_tx_channels : 0;
if (efx->xdp_tx_queue_count) { if (efx->xdp_tx_queue_count) {
...@@ -1130,15 +1131,15 @@ void efx_siena_start_channels(struct efx_nic *efx) ...@@ -1130,15 +1131,15 @@ void efx_siena_start_channels(struct efx_nic *efx)
efx_for_each_channel_rev(channel, efx) { efx_for_each_channel_rev(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_init_tx_queue(tx_queue); efx_siena_init_tx_queue(tx_queue);
atomic_inc(&efx->active_queues); atomic_inc(&efx->active_queues);
} }
efx_for_each_channel_rx_queue(rx_queue, channel) { efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue); efx_siena_init_rx_queue(rx_queue);
atomic_inc(&efx->active_queues); atomic_inc(&efx->active_queues);
efx_siena_stop_eventq(channel); efx_siena_stop_eventq(channel);
efx_fast_push_rx_descriptors(rx_queue, false); efx_siena_fast_push_rx_descriptors(rx_queue, false);
efx_siena_start_eventq(channel); efx_siena_start_eventq(channel);
} }
...@@ -1184,9 +1185,9 @@ void efx_siena_stop_channels(struct efx_nic *efx) ...@@ -1184,9 +1185,9 @@ void efx_siena_stop_channels(struct efx_nic *efx)
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue); efx_siena_fini_rx_queue(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue); efx_siena_fini_tx_queue(tx_queue);
} }
} }
...@@ -1228,7 +1229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget) ...@@ -1228,7 +1229,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel); efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel); efx_rx_flush_packet(channel);
efx_fast_push_rx_descriptors(rx_queue, true); efx_siena_fast_push_rx_descriptors(rx_queue, true);
} }
/* Update BQL */ /* Update BQL */
......
...@@ -395,7 +395,7 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -395,7 +395,7 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_buffer_order = get_order(rx_buf_len); efx->rx_buffer_order = get_order(rx_buf_len);
} }
efx_rx_config_page_split(efx); efx_siena_rx_config_page_split(efx);
if (efx->rx_buffer_order) if (efx->rx_buffer_order)
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"RX buf len=%u; page order=%u batch=%u\n", "RX buf len=%u; page order=%u batch=%u\n",
...@@ -428,7 +428,7 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -428,7 +428,7 @@ static void efx_start_datapath(struct efx_nic *efx)
* the ring completely. We wake it when half way back to * the ring completely. We wake it when half way back to
* empty. * empty.
*/ */
efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); efx->txq_stop_thresh = efx->txq_entries - efx_siena_tx_max_skb_descs(efx);
efx->txq_wake_thresh = efx->txq_stop_thresh / 2; efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
/* Initialise the channels */ /* Initialise the channels */
......
...@@ -824,7 +824,8 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev, ...@@ -824,7 +824,8 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
mutex_lock(&efx->rss_lock); mutex_lock(&efx->rss_lock);
if (info->flow_type & FLOW_RSS && info->rss_context) { if (info->flow_type & FLOW_RSS && info->rss_context) {
ctx = efx_find_rss_context_entry(efx, info->rss_context); ctx = efx_siena_find_rss_context_entry(efx,
info->rss_context);
if (!ctx) { if (!ctx) {
rc = -ENOENT; rc = -ENOENT;
goto out_unlock; goto out_unlock;
...@@ -1213,7 +1214,7 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir, ...@@ -1213,7 +1214,7 @@ int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
return -EOPNOTSUPP; return -EOPNOTSUPP;
mutex_lock(&efx->rss_lock); mutex_lock(&efx->rss_lock);
ctx = efx_find_rss_context_entry(efx, rss_context); ctx = efx_siena_find_rss_context_entry(efx, rss_context);
if (!ctx) { if (!ctx) {
rc = -ENOENT; rc = -ENOENT;
goto out_unlock; goto out_unlock;
...@@ -1257,18 +1258,18 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev, ...@@ -1257,18 +1258,18 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
rc = -EINVAL; rc = -EINVAL;
goto out_unlock; goto out_unlock;
} }
ctx = efx_alloc_rss_context_entry(efx); ctx = efx_siena_alloc_rss_context_entry(efx);
if (!ctx) { if (!ctx) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_unlock; goto out_unlock;
} }
ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Initialise indir table and key to defaults */ /* Initialise indir table and key to defaults */
efx_set_default_rx_indir_table(efx, ctx); efx_siena_set_default_rx_indir_table(efx, ctx);
netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
allocated = true; allocated = true;
} else { } else {
ctx = efx_find_rss_context_entry(efx, *rss_context); ctx = efx_siena_find_rss_context_entry(efx, *rss_context);
if (!ctx) { if (!ctx) {
rc = -ENOENT; rc = -ENOENT;
goto out_unlock; goto out_unlock;
...@@ -1279,7 +1280,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev, ...@@ -1279,7 +1280,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
/* delete this context */ /* delete this context */
rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
if (!rc) if (!rc)
efx_free_rss_context_entry(ctx); efx_siena_free_rss_context_entry(ctx);
goto out_unlock; goto out_unlock;
} }
...@@ -1290,7 +1291,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev, ...@@ -1290,7 +1291,7 @@ int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
if (rc && allocated) if (rc && allocated)
efx_free_rss_context_entry(ctx); efx_siena_free_rss_context_entry(ctx);
else else
*rss_context = ctx->user_id; *rss_context = ctx->user_id;
out_unlock: out_unlock:
......
...@@ -1160,7 +1160,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel, ...@@ -1160,7 +1160,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
/* The queue must be empty, so we won't receive any rx /* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the * events, so efx_process_channel() won't refill the
* queue. Refill it here */ * queue. Refill it here */
efx_fast_push_rx_descriptors(rx_queue, true); efx_siena_fast_push_rx_descriptors(rx_queue, true);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel); efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
...@@ -2925,13 +2925,14 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, ...@@ -2925,13 +2925,14 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
*/ */
arfs_id = 0; arfs_id = 0;
} else { } else {
rule = efx_rps_hash_find(efx, &spec); rule = efx_siena_rps_hash_find(efx, &spec);
if (!rule) { if (!rule) {
/* ARFS table doesn't know of this filter, remove it */ /* ARFS table doesn't know of this filter, remove it */
force = true; force = true;
} else { } else {
arfs_id = rule->arfs_id; arfs_id = rule->arfs_id;
if (!efx_rps_check_rule(rule, index, &force)) if (!efx_siena_rps_check_rule(rule, index,
&force))
goto out_unlock; goto out_unlock;
} }
} }
...@@ -2939,7 +2940,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, ...@@ -2939,7 +2940,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
flow_id, arfs_id)) { flow_id, arfs_id)) {
if (rule) if (rule)
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
efx_rps_hash_del(efx, &spec); efx_siena_rps_hash_del(efx, &spec);
efx_farch_filter_table_clear_entry(efx, table, index); efx_farch_filter_table_clear_entry(efx, table, index);
ret = true; ret = true;
} }
......
...@@ -157,7 +157,7 @@ void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -157,7 +157,7 @@ void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/ */
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
efx_rx_flush_packet(channel); efx_rx_flush_packet(channel);
efx_discard_rx_packet(channel, rx_buf, n_frags); efx_siena_discard_rx_packet(channel, rx_buf, n_frags);
return; return;
} }
...@@ -195,7 +195,7 @@ void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -195,7 +195,7 @@ void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* All fragments have been DMA-synced, so recycle pages. */ /* All fragments have been DMA-synced, so recycle pages. */
rx_buf = efx_rx_buffer(rx_queue, index); rx_buf = efx_rx_buffer(rx_queue, index);
efx_recycle_rx_pages(channel, rx_buf, n_frags); efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be /* Pipeline receives so that we give time for packet headers to be
* prefetched into cache. * prefetched into cache.
...@@ -217,7 +217,7 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh, ...@@ -217,7 +217,7 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
rx_queue = efx_channel_get_rx_queue(channel); rx_queue = efx_channel_get_rx_queue(channel);
efx_free_rx_buffers(rx_queue, rx_buf, n_frags); efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
return; return;
} }
skb_record_rx_queue(skb, channel->rx_queue.core_index); skb_record_rx_queue(skb, channel->rx_queue.core_index);
...@@ -268,8 +268,8 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -268,8 +268,8 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
if (unlikely(channel->rx_pkt_n_frags > 1)) { if (unlikely(channel->rx_pkt_n_frags > 1)) {
/* We can't do XDP on fragmented packets - drop. */ /* We can't do XDP on fragmented packets - drop. */
efx_free_rx_buffers(rx_queue, rx_buf, efx_siena_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags); channel->rx_pkt_n_frags);
if (net_ratelimit()) if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev, netif_err(efx, rx_err, efx->net_dev,
"XDP is not possible with multiple receive fragments (%d)\n", "XDP is not possible with multiple receive fragments (%d)\n",
...@@ -312,7 +312,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -312,7 +312,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
xdpf = xdp_convert_buff_to_frame(&xdp); xdpf = xdp_convert_buff_to_frame(&xdp);
err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true); err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true);
if (unlikely(err != 1)) { if (unlikely(err != 1)) {
efx_free_rx_buffers(rx_queue, rx_buf, 1); efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit()) if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev, netif_err(efx, rx_err, efx->net_dev,
"XDP TX failed (%d)\n", err); "XDP TX failed (%d)\n", err);
...@@ -326,7 +326,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -326,7 +326,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog); err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
if (unlikely(err)) { if (unlikely(err)) {
efx_free_rx_buffers(rx_queue, rx_buf, 1); efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
if (net_ratelimit()) if (net_ratelimit())
netif_err(efx, rx_err, efx->net_dev, netif_err(efx, rx_err, efx->net_dev,
"XDP redirect failed (%d)\n", err); "XDP redirect failed (%d)\n", err);
...@@ -339,7 +339,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -339,7 +339,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
default: default:
bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act); bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
efx_free_rx_buffers(rx_queue, rx_buf, 1); efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_bad_drops++; channel->n_rx_xdp_bad_drops++;
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
break; break;
...@@ -348,7 +348,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel, ...@@ -348,7 +348,7 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act); trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
fallthrough; fallthrough;
case XDP_DROP: case XDP_DROP:
efx_free_rx_buffers(rx_queue, rx_buf, 1); efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
channel->n_rx_xdp_drops++; channel->n_rx_xdp_drops++;
break; break;
} }
...@@ -379,8 +379,8 @@ void __efx_siena_rx_packet(struct efx_channel *channel) ...@@ -379,8 +379,8 @@ void __efx_siena_rx_packet(struct efx_channel *channel)
efx_loopback_rx_packet(efx, eh, rx_buf->len); efx_loopback_rx_packet(efx, eh, rx_buf->len);
rx_queue = efx_channel_get_rx_queue(channel); rx_queue = efx_channel_get_rx_queue(channel);
efx_free_rx_buffers(rx_queue, rx_buf, efx_siena_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags); channel->rx_pkt_n_frags);
goto out; goto out;
} }
......
This diff is collapsed.
...@@ -43,26 +43,19 @@ static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh) ...@@ -43,26 +43,19 @@ static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
#endif #endif
} }
void efx_rx_slow_fill(struct timer_list *t); void efx_siena_rx_slow_fill(struct timer_list *t);
void efx_recycle_rx_pages(struct efx_channel *channel, void efx_siena_recycle_rx_pages(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, struct efx_rx_buffer *rx_buf,
unsigned int n_frags); unsigned int n_frags);
void efx_discard_rx_packet(struct efx_channel *channel, void efx_siena_discard_rx_packet(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, struct efx_rx_buffer *rx_buf,
unsigned int n_frags); unsigned int n_frags);
int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_queue(struct efx_rx_queue *rx_queue); void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue);
void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue);
void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue);
void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
struct page *page,
unsigned int page_offset,
u16 flags);
void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
static inline void efx_sync_rx_buffer(struct efx_nic *efx, static inline void efx_sync_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf, struct efx_rx_buffer *rx_buf,
...@@ -72,46 +65,46 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, ...@@ -72,46 +65,46 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
void efx_free_rx_buffers(struct efx_rx_queue *rx_queue, void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf, struct efx_rx_buffer *rx_buf,
unsigned int num_bufs); unsigned int num_bufs);
void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); void efx_siena_rx_config_page_split(struct efx_nic *efx);
void efx_rx_config_page_split(struct efx_nic *efx); void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); bool atomic);
void void
efx_siena_rx_packet_gro(struct efx_channel *channel, efx_siena_rx_packet_gro(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum); unsigned int n_frags, u8 *eh, __wsum csum);
struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
void efx_free_rss_context_entry(struct efx_rss_context *ctx); u32 id);
void efx_set_default_rx_indir_table(struct efx_nic *efx, void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
struct efx_rss_context *ctx); void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
struct efx_rss_context *ctx);
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec);
bool efx_filter_spec_equal(const struct efx_filter_spec *left, bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
const struct efx_filter_spec *right); const struct efx_filter_spec *right);
u32 efx_filter_spec_hash(const struct efx_filter_spec *spec); u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
bool *force); unsigned int filter_idx, bool *force);
struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
const struct efx_filter_spec *spec); const struct efx_filter_spec *spec);
struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, void efx_siena_rps_hash_del(struct efx_nic *efx,
const struct efx_filter_spec *spec, const struct efx_filter_spec *spec);
bool *new);
void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
u16 rxq_index, u32 flow_id); unsigned int quota);
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
#endif #endif
int efx_probe_filters(struct efx_nic *efx); int efx_siena_probe_filters(struct efx_nic *efx);
void efx_remove_filters(struct efx_nic *efx); void efx_siena_remove_filters(struct efx_nic *efx);
#endif #endif
...@@ -41,14 +41,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, ...@@ -41,14 +41,6 @@ static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
return (u8 *)page_buf->addr + offset; return (u8 *)page_buf->addr + offset;
} }
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, size_t len)
{
if (len > EFX_TX_CB_SIZE)
return NULL;
return efx_tx_get_copy_buffer(tx_queue, buffer);
}
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{ {
/* We need to consider all queues that the net core sees as one */ /* We need to consider all queues that the net core sees as one */
...@@ -164,7 +156,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, ...@@ -164,7 +156,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
* size limit. * size limit.
*/ */
if (segments) { if (segments) {
rc = efx_tx_tso_fallback(tx_queue, skb); rc = efx_siena_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++; tx_queue->tso_fallbacks++;
if (rc == 0) if (rc == 0)
return 0; return 0;
...@@ -178,7 +170,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, ...@@ -178,7 +170,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
} }
/* Map for DMA and create descriptors if we haven't done so already. */ /* Map for DMA and create descriptors if we haven't done so already. */
if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments)))
goto err; goto err;
efx_tx_maybe_stop_queue(tx_queue); efx_tx_maybe_stop_queue(tx_queue);
...@@ -201,7 +193,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, ...@@ -201,7 +193,7 @@ netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
err: err:
efx_enqueue_unwind(tx_queue, old_insert_count); efx_siena_enqueue_unwind(tx_queue, old_insert_count);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
/* If we're not expecting another transmit and we had something to push /* If we're not expecting another transmit and we had something to push
...@@ -285,7 +277,7 @@ int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpf ...@@ -285,7 +277,7 @@ int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpf
break; break;
/* Create descriptor and set up for unmapping DMA. */ /* Create descriptor and set up for unmapping DMA. */
tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
tx_buffer->xdpf = xdpf; tx_buffer->xdpf = xdpf;
tx_buffer->flags = EFX_TX_BUF_XDP | tx_buffer->flags = EFX_TX_BUF_XDP |
EFX_TX_BUF_MAP_SINGLE; EFX_TX_BUF_MAP_SINGLE;
......
...@@ -11,13 +11,6 @@ ...@@ -11,13 +11,6 @@
#include <linux/types.h> #include <linux/types.h>
/* Driver internal tx-path related declarations. */ /* Driver internal tx-path related declarations. */
unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, size_t len);
/* What TXQ type will satisfy the checksum offloads required for this skb? */ /* What TXQ type will satisfy the checksum offloads required for this skb? */
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb) static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
{ {
......
...@@ -19,7 +19,7 @@ static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) ...@@ -19,7 +19,7 @@ static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
PAGE_SIZE >> EFX_TX_CB_ORDER); PAGE_SIZE >> EFX_TX_CB_ORDER);
} }
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue)
{ {
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
unsigned int entries; unsigned int entries;
...@@ -64,7 +64,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -64,7 +64,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
return rc; return rc;
} }
void efx_init_tx_queue(struct efx_tx_queue *tx_queue) void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue)
{ {
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
...@@ -94,32 +94,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -94,32 +94,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->initialised = true; tx_queue->initialised = true;
} }
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
tx_queue->initialised = false;
if (!tx_queue->buffer)
return;
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
++tx_queue->read_count;
}
tx_queue->xmit_pending = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{ {
int i; int i;
...@@ -143,10 +118,10 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -143,10 +118,10 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
} }
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, struct efx_tx_buffer *buffer,
unsigned int *pkts_compl, unsigned int *pkts_compl,
unsigned int *bytes_compl) unsigned int *bytes_compl)
{ {
if (buffer->unmap_len) { if (buffer->unmap_len) {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev; struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
...@@ -191,6 +166,29 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, ...@@ -191,6 +166,29 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
buffer->flags = 0; buffer->flags = 0;
} }
void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
if (!tx_queue->buffer)
return;
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
++tx_queue->read_count;
}
tx_queue->xmit_pending = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
/* Remove packets from the TX queue /* Remove packets from the TX queue
* *
* This removes packets from the TX queue, up to and including the * This removes packets from the TX queue, up to and including the
...@@ -271,8 +269,8 @@ void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) ...@@ -271,8 +269,8 @@ void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
/* Remove buffers put into a tx_queue for the current packet. /* Remove buffers put into a tx_queue for the current packet.
* None of the buffers must have an skb attached. * None of the buffers must have an skb attached.
*/ */
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count) unsigned int insert_count)
{ {
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
unsigned int bytes_compl = 0; unsigned int bytes_compl = 0;
...@@ -286,8 +284,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, ...@@ -286,8 +284,8 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
} }
} }
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, size_t len) dma_addr_t dma_addr, size_t len)
{ {
const struct efx_nic_type *nic_type = tx_queue->efx->type; const struct efx_nic_type *nic_type = tx_queue->efx->type;
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
...@@ -313,7 +311,7 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, ...@@ -313,7 +311,7 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
return buffer; return buffer;
} }
int efx_tx_tso_header_length(struct sk_buff *skb) static int efx_tx_tso_header_length(struct sk_buff *skb)
{ {
size_t header_len; size_t header_len;
...@@ -328,8 +326,8 @@ int efx_tx_tso_header_length(struct sk_buff *skb) ...@@ -328,8 +326,8 @@ int efx_tx_tso_header_length(struct sk_buff *skb)
} }
/* Map all data from an SKB for DMA and create descriptors on the queue. */ /* Map all data from an SKB for DMA and create descriptors on the queue. */
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
unsigned int segment_count) unsigned int segment_count)
{ {
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
struct device *dma_dev = &efx->pci_dev->dev; struct device *dma_dev = &efx->pci_dev->dev;
...@@ -359,7 +357,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, ...@@ -359,7 +357,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
if (header_len != len) { if (header_len != len) {
tx_queue->tso_long_headers++; tx_queue->tso_long_headers++;
efx_tx_map_chunk(tx_queue, dma_addr, header_len); efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len);
len -= header_len; len -= header_len;
dma_addr += header_len; dma_addr += header_len;
} }
...@@ -370,7 +368,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, ...@@ -370,7 +368,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
skb_frag_t *fragment; skb_frag_t *fragment;
buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
/* The final descriptor for a fragment is responsible for /* The final descriptor for a fragment is responsible for
* unmapping the whole fragment. * unmapping the whole fragment.
...@@ -402,7 +400,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, ...@@ -402,7 +400,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
} while (1); } while (1);
} }
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx)
{ {
/* Header and payload descriptor for each output segment, plus /* Header and payload descriptor for each output segment, plus
* one for every input fragment boundary within a segment * one for every input fragment boundary within a segment
...@@ -430,7 +428,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) ...@@ -430,7 +428,8 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
* *
* Returns 0 on success, error code otherwise. * Returns 0 on success, error code otherwise.
*/ */
int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb) int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue,
struct sk_buff *skb)
{ {
struct sk_buff *segments, *next; struct sk_buff *segments, *next;
......
...@@ -11,15 +11,10 @@ ...@@ -11,15 +11,10 @@
#ifndef EFX_TX_COMMON_H #ifndef EFX_TX_COMMON_H
#define EFX_TX_COMMON_H #define EFX_TX_COMMON_H
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue);
void efx_init_tx_queue(struct efx_tx_queue *tx_queue); void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue);
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue);
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue);
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer,
unsigned int *pkts_compl,
unsigned int *bytes_compl);
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
{ {
...@@ -29,17 +24,16 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer) ...@@ -29,17 +24,16 @@ static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue); void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
unsigned int insert_count); unsigned int insert_count);
struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, size_t len); dma_addr_t dma_addr, size_t len);
int efx_tx_tso_header_length(struct sk_buff *skb); int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, unsigned int segment_count);
unsigned int segment_count);
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx);
int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb); int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern bool efx_separate_tx_channels; extern bool efx_siena_separate_tx_channels;
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment