Message ID | 1510175399-7404-2-git-send-email-dave.taht@gmail.com |
---|---|
State | Superseded, archived |
Delegated to: | David Miller |
Headers | show |
Series | netem: add nsec scheduling and slot feature | expand |
On Wed, 2017-11-08 at 13:09 -0800, Dave Taht wrote: > Upgrade the internal netem scheduler to use nanoseconds rather than > ticks throughout. > ... > -static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) > +static s64 packet_len_2_sched_time(unsigned int len, > + struct netem_sched_data *q) > { > - u64 ticks; > - > + s64 offset; > len += q->packet_overhead; > > if (q->cell_size) { > @@ -345,11 +345,9 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche > cells++; > len = cells * (q->cell_size + q->cell_overhead); > } > - > - ticks = (u64)len * NSEC_PER_SEC; > - > - do_div(ticks, q->rate); > - return PSCHED_NS2TICKS(ticks); > + offset = (s64)len * NSEC_PER_SEC; > + do_div(offset, q->rate); > + return offset; > } do_div() first argument being u64, I do not see why you chose 's64 offset' packet_len_2_sched_time() should return u64, because I do not see how we could return a negative value, since a packet length is positive.
Hi Dave, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on net-next/master] url: https://github.com/0day-ci/linux/commits/Dave-Taht/netem-convert-to-qdisc_watchdog_schedule_ns/20171111-184934 config: xtensa-allyesconfig (attached as .config) compiler: xtensa-linux-gcc (GCC) 4.9.0 reproduce: wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # save the attached .config to linux build tree make.cross ARCH=xtensa All warnings (new ones prefixed by >>): In file included from ./arch/xtensa/include/generated/asm/div64.h:1:0, from include/linux/kernel.h:173, from include/asm-generic/bug.h:16, from ./arch/xtensa/include/generated/asm/bug.h:1, from include/linux/bug.h:5, from include/linux/mmdebug.h:5, from include/linux/mm.h:9, from net/sched/sch_netem.c:16: net/sched/sch_netem.c: In function 'packet_len_2_sched_time': include/asm-generic/div64.h:208:28: warning: comparison of distinct pointer types lacks a cast (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ ^ >> net/sched/sch_netem.c:349:2: note: in expansion of macro 'do_div' do_div(offset, q->rate); ^ vim +/do_div +349 net/sched/sch_netem.c 334 335 static s64 packet_len_2_sched_time(unsigned int len, 336 struct netem_sched_data *q) 337 { 338 s64 offset; 339 len += q->packet_overhead; 340 341 if (q->cell_size) { 342 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); 343 344 if (len > cells * q->cell_size) /* extra cell needed for remainder */ 345 cells++; 346 len = cells * (q->cell_size + q->cell_overhead); 347 } 348 offset = (s64)len * NSEC_PER_SEC; > 349 do_div(offset, q->rate); 350 return offset; 351 } 352 --- 0-DAY kernel test infrastructure Open Source Technology Center https://lists.01.org/pipermail/kbuild-all Intel Corporation
Hi Dave,
Thank you for the patch! Yet something to improve:
[auto build test ERROR on net-next/master]
url: https://github.com/0day-ci/linux/commits/Dave-Taht/netem-convert-to-qdisc_watchdog_schedule_ns/20171111-184934
config: i386-randconfig-i1-201745 (attached as .config)
compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026
reproduce:
# save the attached .config to linux build tree
make ARCH=i386
All errors (new ones prefixed by >>):
net/sched/sch_netem.o: In function `netem_enqueue':
>> net/sched/sch_netem.c:323: undefined reference to `__moddi3'
vim +323 net/sched/sch_netem.c
661b7972 stephen hemminger 2011-02-23 302
661b7972 stephen hemminger 2011-02-23 303
^1da177e Linus Torvalds 2005-04-16 304 /* tabledist - return a pseudo-randomly distributed value with mean mu and
^1da177e Linus Torvalds 2005-04-16 305 * std deviation sigma. Uses table lookup to approximate the desired
^1da177e Linus Torvalds 2005-04-16 306 * distribution, and a uniformly-distributed pseudo-random source.
^1da177e Linus Torvalds 2005-04-16 307 */
9d0cec66 Dave Taht 2017-11-08 308 static s64 tabledist(s64 mu, s64 sigma,
b407621c Stephen Hemminger 2007-03-22 309 struct crndstate *state,
b407621c Stephen Hemminger 2007-03-22 310 const struct disttable *dist)
^1da177e Linus Torvalds 2005-04-16 311 {
9d0cec66 Dave Taht 2017-11-08 312 s64 x;
b407621c Stephen Hemminger 2007-03-22 313 long t;
b407621c Stephen Hemminger 2007-03-22 314 u32 rnd;
^1da177e Linus Torvalds 2005-04-16 315
^1da177e Linus Torvalds 2005-04-16 316 if (sigma == 0)
^1da177e Linus Torvalds 2005-04-16 317 return mu;
^1da177e Linus Torvalds 2005-04-16 318
^1da177e Linus Torvalds 2005-04-16 319 rnd = get_crandom(state);
^1da177e Linus Torvalds 2005-04-16 320
^1da177e Linus Torvalds 2005-04-16 321 /* default uniform distribution */
^1da177e Linus Torvalds 2005-04-16 322 if (dist == NULL)
^1da177e Linus Torvalds 2005-04-16 @323 return (rnd % (2*sigma)) - sigma + mu;
^1da177e Linus Torvalds 2005-04-16 324
^1da177e Linus Torvalds 2005-04-16 325 t = dist->table[rnd % dist->size];
^1da177e Linus Torvalds 2005-04-16 326 x = (sigma % NETEM_DIST_SCALE) * t;
^1da177e Linus Torvalds 2005-04-16 327 if (x >= 0)
^1da177e Linus Torvalds 2005-04-16 328 x += NETEM_DIST_SCALE/2;
^1da177e Linus Torvalds 2005-04-16 329 else
^1da177e Linus Torvalds 2005-04-16 330 x -= NETEM_DIST_SCALE/2;
^1da177e Linus Torvalds 2005-04-16 331
^1da177e Linus Torvalds 2005-04-16 332 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
^1da177e Linus Torvalds 2005-04-16 333 }
^1da177e Linus Torvalds 2005-04-16 334
:::::: The code at line 323 was first introduced by commit
:::::: 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Linux-2.6.12-rc2
:::::: TO: Linus Torvalds <torvalds@ppc970.osdl.org>
:::::: CC: Linus Torvalds <torvalds@ppc970.osdl.org>
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index db0228a..5559ad1 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -77,8 +77,8 @@ struct netem_sched_data { struct qdisc_watchdog watchdog; - psched_tdiff_t latency; - psched_tdiff_t jitter; + s64 latency; + s64 jitter; u32 loss; u32 ecn; @@ -145,7 +145,7 @@ struct netem_sched_data { * we save skb->tstamp value in skb->cb[] before destroying it. */ struct netem_skb_cb { - psched_time_t time_to_send; + u64 time_to_send; }; static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) @@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q) * std deviation sigma. Uses table lookup to approximate the desired * distribution, and a uniformly-distributed pseudo-random source. */ -static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, - struct crndstate *state, - const struct disttable *dist) +static s64 tabledist(s64 mu, s64 sigma, + struct crndstate *state, + const struct disttable *dist) { - psched_tdiff_t x; + s64 x; long t; u32 rnd; @@ -332,10 +332,10 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma, return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; } -static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) +static s64 packet_len_2_sched_time(unsigned int len, + struct netem_sched_data *q) { - u64 ticks; - + s64 offset; len += q->packet_overhead; if (q->cell_size) { @@ -345,11 +345,9 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche cells++; len = cells * (q->cell_size + q->cell_overhead); } - - ticks = (u64)len * NSEC_PER_SEC; - - do_div(ticks, q->rate); - return PSCHED_NS2TICKS(ticks); + offset = (s64)len * NSEC_PER_SEC; + do_div(offset, q->rate); + return offset; } static void tfifo_reset(struct Qdisc *sch) @@ -369,7 +367,7 @@ static void tfifo_reset(struct Qdisc *sch) static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) { struct netem_sched_data *q = qdisc_priv(sch); - psched_time_t tnext = netem_skb_cb(nskb)->time_to_send; + u64 tnext = netem_skb_cb(nskb)->time_to_send; struct rb_node **p = &q->t_root.rb_node, *parent = NULL; while (*p) { @@ -515,13 +513,13 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, if (q->gap == 0 || /* not doing reordering */ q->counter < q->gap - 1 || /* inside last reordering gap */ q->reorder < get_crandom(&q->reorder_cor)) { - psched_time_t now; - psched_tdiff_t delay; + u64 now; + s64 delay; delay = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); - now = psched_get_time(); + now = ktime_get_ns(); if (q->rate) { struct netem_skb_cb *last = NULL; @@ -547,7 +545,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, * from delay. */ delay -= last->time_to_send - now; - delay = max_t(psched_tdiff_t, 0, delay); + delay = max_t(s64, 0, delay); now = last->time_to_send; } @@ -562,7 +560,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, * Do re-ordering by putting one out of N packets at the front * of the queue. */ - cb->time_to_send = psched_get_time(); + cb->time_to_send = ktime_get_ns(); q->counter = 0; netem_enqueue_skb_head(&sch->q, skb); @@ -609,13 +607,13 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) } p = rb_first(&q->t_root); if (p) { - psched_time_t time_to_send; + u64 time_to_send; skb = rb_to_skb(p); /* if more time remaining? */ time_to_send = netem_skb_cb(skb)->time_to_send; - if (time_to_send <= psched_get_time()) { + if (time_to_send <= ktime_get_ns()) { rb_erase(p, &q->t_root); sch->q.qlen--; @@ -659,7 +657,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) if (skb) goto deliver; } - qdisc_watchdog_schedule(&q->watchdog, time_to_send); + qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send); } if (q->qdisc) { @@ -888,8 +886,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt) sch->limit = qopt->limit; - q->latency = qopt->latency; - q->jitter = qopt->jitter; + q->latency = PSCHED_TICKS2NS(qopt->latency); + q->jitter = PSCHED_TICKS2NS(qopt->jitter); q->limit = qopt->limit; q->gap = qopt->gap; q->counter = 0; @@ -1011,8 +1009,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) struct tc_netem_corrupt corrupt; struct tc_netem_rate rate; - qopt.latency = q->latency; - qopt.jitter = q->jitter; + qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), + UINT_MAX); + qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), + UINT_MAX); qopt.limit = q->limit; qopt.loss = q->loss; qopt.gap = q->gap;
Upgrade the internal netem scheduler to use nanoseconds rather than ticks throughout. Convert to and from the std "ticks" userspace api automatically, while allowing for finer grained scheduling to take place. Signed-off-by: Dave Taht <dave.taht@gmail.com> --- net/sched/sch_netem.c | 56 +++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 28 deletions(-)