Commit 69bb5fa7 authored by Edward Cree's avatar Edward Cree Committed by Jakub Kicinski

sfc: ef100 representor RX NAPI poll

This patch adds the 'bottom half' napi->poll routine for representor RX.
See the next patch (with the top half) for an explanation of the 'fake
 interrupt' scheme used to drive this NAPI context.
Signed-off-by: default avatarEdward Cree <ecree.xilinx@gmail.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent a95115c4
...@@ -16,12 +16,16 @@ ...@@ -16,12 +16,16 @@
#define EFX_EF100_REP_DRIVER "efx_ef100_rep" #define EFX_EF100_REP_DRIVER "efx_ef100_rep"
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv, static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
unsigned int i) unsigned int i)
{ {
efv->parent = efx; efv->parent = efx;
efv->idx = i; efv->idx = i;
INIT_LIST_HEAD(&efv->list); INIT_LIST_HEAD(&efv->list);
INIT_LIST_HEAD(&efv->rx_list);
spin_lock_init(&efv->rx_lock);
efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
...@@ -29,6 +33,25 @@ static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv, ...@@ -29,6 +33,25 @@ static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
return 0; return 0;
} }
static int efx_ef100_rep_open(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
NAPI_POLL_WEIGHT);
napi_enable(&efv->napi);
return 0;
}
static int efx_ef100_rep_close(struct net_device *net_dev)
{
struct efx_rep *efv = netdev_priv(net_dev);
napi_disable(&efv->napi);
netif_napi_del(&efv->napi);
return 0;
}
static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb, static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
struct net_device *dev) struct net_device *dev)
{ {
...@@ -93,6 +116,8 @@ static void efx_ef100_rep_get_stats64(struct net_device *dev, ...@@ -93,6 +116,8 @@ static void efx_ef100_rep_get_stats64(struct net_device *dev,
} }
static const struct net_device_ops efx_ef100_rep_netdev_ops = { static const struct net_device_ops efx_ef100_rep_netdev_ops = {
.ndo_open = efx_ef100_rep_open,
.ndo_stop = efx_ef100_rep_close,
.ndo_start_xmit = efx_ef100_rep_xmit, .ndo_start_xmit = efx_ef100_rep_xmit,
.ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id, .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
.ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name, .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
...@@ -256,3 +281,42 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx) ...@@ -256,3 +281,42 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx)
list_for_each_entry_safe(efv, next, &efx->vf_reps, list) list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
efx_ef100_vfrep_destroy(efx, efv); efx_ef100_vfrep_destroy(efx, efv);
} }
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
{
struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
unsigned int read_index;
struct list_head head;
struct sk_buff *skb;
bool need_resched;
int spent = 0;
INIT_LIST_HEAD(&head);
/* Grab up to 'weight' pending SKBs */
spin_lock_bh(&efv->rx_lock);
read_index = efv->write_index;
while (spent < weight && !list_empty(&efv->rx_list)) {
skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
list_del(&skb->list);
list_add_tail(&skb->list, &head);
spent++;
}
spin_unlock_bh(&efv->rx_lock);
/* Receive them */
netif_receive_skb_list(&head);
if (spent < weight)
if (napi_complete_done(napi, spent)) {
spin_lock_bh(&efv->rx_lock);
efv->read_index = read_index;
/* If write_index advanced while we were doing the
* RX, then storing our read_index won't re-prime the
* fake-interrupt. In that case, we need to schedule
* NAPI again to consume the additional packet(s).
*/
need_resched = efv->write_index != read_index;
spin_unlock_bh(&efv->rx_lock);
if (need_resched)
napi_schedule(&efv->napi);
}
return spent;
}
...@@ -29,7 +29,13 @@ struct efx_rep_sw_stats { ...@@ -29,7 +29,13 @@ struct efx_rep_sw_stats {
* @msg_enable: log message enable flags * @msg_enable: log message enable flags
* @mport: m-port ID of corresponding VF * @mport: m-port ID of corresponding VF
* @idx: VF index * @idx: VF index
* @write_index: number of packets enqueued to @rx_list
* @read_index: number of packets consumed from @rx_list
* @rx_pring_size: max length of RX list
* @list: entry on efx->vf_reps * @list: entry on efx->vf_reps
* @rx_list: list of SKBs queued for receive in NAPI poll
* @rx_lock: protects @rx_list
* @napi: NAPI control structure
* @stats: software traffic counters for netdev stats * @stats: software traffic counters for netdev stats
*/ */
struct efx_rep { struct efx_rep {
...@@ -38,7 +44,12 @@ struct efx_rep { ...@@ -38,7 +44,12 @@ struct efx_rep {
u32 msg_enable; u32 msg_enable;
u32 mport; u32 mport;
unsigned int idx; unsigned int idx;
unsigned int write_index, read_index;
unsigned int rx_pring_size;
struct list_head list; struct list_head list;
struct list_head rx_list;
spinlock_t rx_lock;
struct napi_struct napi;
struct efx_rep_sw_stats stats; struct efx_rep_sw_stats stats;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment