Commit bc9ad166 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: introduce napi_schedule_irqoff()

napi_schedule() can be called from any context and has to mask hard
irqs.

Add a variant that can only be called from hard interrupts handlers
or when irqs are already masked.

Many NIC drivers can use it from their hard IRQ handler instead of
generic variant.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c6be2a10
...@@ -386,6 +386,7 @@ typedef enum rx_handler_result rx_handler_result_t; ...@@ -386,6 +386,7 @@ typedef enum rx_handler_result rx_handler_result_t;
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
void __napi_schedule(struct napi_struct *n); void __napi_schedule(struct napi_struct *n);
void __napi_schedule_irqoff(struct napi_struct *n);
static inline bool napi_disable_pending(struct napi_struct *n) static inline bool napi_disable_pending(struct napi_struct *n)
{ {
...@@ -420,6 +421,18 @@ static inline void napi_schedule(struct napi_struct *n) ...@@ -420,6 +421,18 @@ static inline void napi_schedule(struct napi_struct *n)
__napi_schedule(n); __napi_schedule(n);
} }
/**
* napi_schedule_irqoff - schedule NAPI poll
* @n: napi context
*
* Variant of napi_schedule(), assuming hard irqs are masked.
*/
static inline void napi_schedule_irqoff(struct napi_struct *n)
{
if (napi_schedule_prep(n))
__napi_schedule_irqoff(n);
}
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
static inline bool napi_reschedule(struct napi_struct *napi) static inline bool napi_reschedule(struct napi_struct *napi)
{ {
......
...@@ -4372,7 +4372,8 @@ static int process_backlog(struct napi_struct *napi, int quota) ...@@ -4372,7 +4372,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
* __napi_schedule - schedule for receive * __napi_schedule - schedule for receive
* @n: entry to schedule * @n: entry to schedule
* *
* The entry's receive function will be scheduled to run * The entry's receive function will be scheduled to run.
* Consider using __napi_schedule_irqoff() if hard irqs are masked.
*/ */
void __napi_schedule(struct napi_struct *n) void __napi_schedule(struct napi_struct *n)
{ {
...@@ -4384,6 +4385,18 @@ void __napi_schedule(struct napi_struct *n) ...@@ -4384,6 +4385,18 @@ void __napi_schedule(struct napi_struct *n)
} }
EXPORT_SYMBOL(__napi_schedule); EXPORT_SYMBOL(__napi_schedule);
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
*
* Variant of __napi_schedule() assuming hard irqs are masked
*/
void __napi_schedule_irqoff(struct napi_struct *n)
{
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
void __napi_complete(struct napi_struct *n) void __napi_complete(struct napi_struct *n)
{ {
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment