Commit ef87979c authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller

pktgen: better scheduler friendliness

Previous update did not resched in inner loop causing watchdogs.
Rewrite inner loop to:
  * account for delays better with less clock calls
  * more accurate timing of delay:
    - only delay if packet was successfully sent
    - if delay is 100ns and it takes 10ns to build packet then
      account for that
  * use wait_event_interruptible_timeout rather than open coding it.
Signed-off-by: default avatarStephen Hemminger <shemminger@vyatta.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6b80d6a6
...@@ -2104,7 +2104,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) ...@@ -2104,7 +2104,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
{ {
ktime_t start; ktime_t start_time, end_time;
s32 remaining; s32 remaining;
struct hrtimer_sleeper t; struct hrtimer_sleeper t;
...@@ -2115,7 +2115,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) ...@@ -2115,7 +2115,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
if (remaining <= 0) if (remaining <= 0)
return; return;
start = ktime_now(); start_time = ktime_now();
if (remaining < 100) if (remaining < 100)
udelay(remaining); /* really small just spin */ udelay(remaining); /* really small just spin */
else { else {
...@@ -2134,7 +2134,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) ...@@ -2134,7 +2134,10 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
} while (t.task && pkt_dev->running && !signal_pending(current)); } while (t.task && pkt_dev->running && !signal_pending(current));
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
} }
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start)); end_time = ktime_now();
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time));
pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay);
} }
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
...@@ -3364,19 +3367,29 @@ static void pktgen_rem_thread(struct pktgen_thread *t) ...@@ -3364,19 +3367,29 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
mutex_unlock(&pktgen_thread_lock); mutex_unlock(&pktgen_thread_lock);
} }
static void idle(struct pktgen_dev *pkt_dev) static void pktgen_resched(struct pktgen_dev *pkt_dev)
{ {
ktime_t idle_start = ktime_now(); ktime_t idle_start = ktime_now();
schedule();
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
}
if (need_resched()) static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
schedule(); {
else ktime_t idle_start = ktime_now();
cpu_relax();
while (atomic_read(&(pkt_dev->skb->users)) != 1) {
if (signal_pending(current))
break;
if (need_resched())
pktgen_resched(pkt_dev);
else
cpu_relax();
}
pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start)); pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
} }
static void pktgen_xmit(struct pktgen_dev *pkt_dev) static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{ {
struct net_device *odev = pkt_dev->odev; struct net_device *odev = pkt_dev->odev;
...@@ -3386,36 +3399,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3386,36 +3399,21 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
u16 queue_map; u16 queue_map;
int ret; int ret;
if (pkt_dev->delay) { /* If device is offline, then don't send */
spin(pkt_dev, pkt_dev->next_tx); if (unlikely(!netif_running(odev) || !netif_carrier_ok(odev))) {
pktgen_stop_device(pkt_dev);
/* This is max DELAY, this has special meaning of return;
* "never transmit"
*/
if (pkt_dev->delay == ULLONG_MAX) {
pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
return;
}
}
if (!pkt_dev->skb) {
set_cur_queue_map(pkt_dev);
queue_map = pkt_dev->cur_queue_map;
} else {
queue_map = skb_get_queue_mapping(pkt_dev->skb);
} }
txq = netdev_get_tx_queue(odev, queue_map); /* This is max DELAY, this has special meaning of
/* Did we saturate the queue already? */ * "never transmit"
if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) { */
/* If device is down, then all queues are permnantly frozen */ if (unlikely(pkt_dev->delay == ULLONG_MAX)) {
if (netif_running(odev)) pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
idle(pkt_dev);
else
pktgen_stop_device(pkt_dev);
return; return;
} }
/* If no skb or clone count exhausted then get new one */
if (!pkt_dev->skb || (pkt_dev->last_ok && if (!pkt_dev->skb || (pkt_dev->last_ok &&
++pkt_dev->clone_count >= pkt_dev->clone_skb)) { ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
/* build a new pkt */ /* build a new pkt */
...@@ -3434,54 +3432,45 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3434,54 +3432,45 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->clone_count = 0; /* reset counter */ pkt_dev->clone_count = 0; /* reset counter */
} }
/* fill_packet() might have changed the queue */ if (pkt_dev->delay && pkt_dev->last_ok)
spin(pkt_dev, pkt_dev->next_tx);
queue_map = skb_get_queue_mapping(pkt_dev->skb); queue_map = skb_get_queue_mapping(pkt_dev->skb);
txq = netdev_get_tx_queue(odev, queue_map); txq = netdev_get_tx_queue(odev, queue_map);
__netif_tx_lock_bh(txq); __netif_tx_lock_bh(txq);
if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) atomic_inc(&(pkt_dev->skb->users));
pkt_dev->last_ok = 0;
else {
atomic_inc(&(pkt_dev->skb->users));
retry_now: if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
ret = NETDEV_TX_BUSY;
else
ret = (*xmit)(pkt_dev->skb, odev); ret = (*xmit)(pkt_dev->skb, odev);
switch (ret) {
case NETDEV_TX_OK: switch (ret) {
txq_trans_update(txq); case NETDEV_TX_OK:
pkt_dev->last_ok = 1; txq_trans_update(txq);
pkt_dev->sofar++; pkt_dev->last_ok = 1;
pkt_dev->seq_num++; pkt_dev->sofar++;
pkt_dev->tx_bytes += pkt_dev->cur_pkt_size; pkt_dev->seq_num++;
break; pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
case NETDEV_TX_LOCKED: break;
cpu_relax(); default: /* Drivers are not supposed to return other values! */
goto retry_now; if (net_ratelimit())
default: /* Drivers are not supposed to return other values! */ pr_info("pktgen: %s xmit error: %d\n",
if (net_ratelimit()) odev->name, ret);
pr_info("pktgen: %s xmit error: %d\n", pkt_dev->errors++;
odev->name, ret); /* fallthru */
pkt_dev->errors++; case NETDEV_TX_LOCKED:
/* fallthru */ case NETDEV_TX_BUSY:
case NETDEV_TX_BUSY: /* Retry it next time */
/* Retry it next time */ atomic_dec(&(pkt_dev->skb->users));
atomic_dec(&(pkt_dev->skb->users)); pkt_dev->last_ok = 0;
pkt_dev->last_ok = 0;
}
if (pkt_dev->delay)
pkt_dev->next_tx = ktime_add_ns(ktime_now(),
pkt_dev->delay);
} }
__netif_tx_unlock_bh(txq); __netif_tx_unlock_bh(txq);
/* If pkt_dev->count is zero, then run forever */ /* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) { if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
while (atomic_read(&(pkt_dev->skb->users)) != 1) { pktgen_wait_for_skb(pkt_dev);
if (signal_pending(current))
break;
idle(pkt_dev);
}
/* Done with this */ /* Done with this */
pktgen_stop_device(pkt_dev); pktgen_stop_device(pkt_dev);
...@@ -3514,20 +3503,24 @@ static int pktgen_thread_worker(void *arg) ...@@ -3514,20 +3503,24 @@ static int pktgen_thread_worker(void *arg)
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
pkt_dev = next_to_run(t); pkt_dev = next_to_run(t);
if (!pkt_dev && if (unlikely(!pkt_dev && t->control == 0)) {
(t->control & (T_STOP | T_RUN | T_REMDEVALL | T_REMDEV)) wait_event_interruptible_timeout(t->queue,
== 0) { t->control != 0,
prepare_to_wait(&(t->queue), &wait, HZ/10);
TASK_INTERRUPTIBLE); continue;
schedule_timeout(HZ / 10);
finish_wait(&(t->queue), &wait);
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
if (pkt_dev) if (likely(pkt_dev)) {
pktgen_xmit(pkt_dev); pktgen_xmit(pkt_dev);
if (need_resched())
pktgen_resched(pkt_dev);
else
cpu_relax();
}
if (t->control & T_STOP) { if (t->control & T_STOP) {
pktgen_stop(t); pktgen_stop(t);
t->control &= ~(T_STOP); t->control &= ~(T_STOP);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment