From patchwork Tue Nov 7 21:16:54 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Dave Taht X-Patchwork-Id: 835474 X-Patchwork-Delegate: shemminger@vyatta.com Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dkim=pass (2048-bit key; unprotected) header.d=gmail.com header.i=@gmail.com header.b="BU3GHdPo"; dkim-atps=neutral Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3yWj1Z0w3Wz9s3w for ; Wed, 8 Nov 2017 08:17:22 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S934006AbdKGVRU (ORCPT ); Tue, 7 Nov 2017 16:17:20 -0500 Received: from mail-pf0-f195.google.com ([209.85.192.195]:53729 "EHLO mail-pf0-f195.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S933975AbdKGVRE (ORCPT ); Tue, 7 Nov 2017 16:17:04 -0500 Received: by mail-pf0-f195.google.com with SMTP id b6so168378pff.10 for ; Tue, 07 Nov 2017 13:17:04 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=B9G6mtiGGQsoqGnQhwsXZhrJ2mX3YP6alq6U0+LKYvY=; b=BU3GHdPo7r74e4EhyjJkBhxwekl/ad9CvA0+y7oSAIEosXiocAbGAsvjDom9RREKde flSRcGJc5fPJkzHuFEDHBBrxL8CzzrQpew0wwVvwgjPC3OQz9NBvj+wSL2YxhlACazxj XjMbGyurDQ9JloMD7+Nf1FOCjWJCWHWUeGIslbjeKUiGcGjrEAOS4z8VXRby9Ibi/VZV zKVUN8x6ciPZLjnZMvdbQHWKiDJHqNePeIXvy8CjK0X1x2iAQfFymoAIkwGgVxDju/ur 24yNCStgiZc6C/DWdTIVuyrnm4fzxfc11+eUxwGcffKMLqGOx59WKUc8/TYG0bcoeYWy RSlg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references; bh=B9G6mtiGGQsoqGnQhwsXZhrJ2mX3YP6alq6U0+LKYvY=; b=iXOQC5VR+bTUm1RtEfUBN5drc1yq2itJ/D4Lg8X/gIvRZRKOmx3XWcOZkqba+e98q0 NaJknMqETD8sMwnKlvp6Gg51hnVFNtsafV6h8+zwzXYQLwL64kSszNLqJLHo2rV43nef sQ0mHnhu6tUr1l578sPSarfiSk4Je27KaiIacXhXMGS972qsY/zj1seDvEK7m6DL9hED RXdyYffxBkMM4R0ntssGk1zAxgpqAwJ0/dWbyASfRJuRQpge6EQJQA22VY9VFdvXGe4b 4B0PMlmLZd9ybysm/7hfQ+ALdAD6LYcYf144yRK4rsFXCVNT7y+sAN6KxwI/iWeWtExu XjGQ== X-Gm-Message-State: AJaThX76DDyxgzCjKxfPfuCCtGCeGM618NiqlzOnr0CjGjuyO0YXJQlv 5svzM2TWUDkY27kNFNMJOD6x1Q== X-Google-Smtp-Source: ABhQp+Q/6l4YXHTOMCkFgcx7zr+Lm1/WhqpHocujl9Js/+iQqwzZOS328fffGgPIwAF+jtt7u+EH9w== X-Received: by 10.99.190.70 with SMTP id g6mr95467pgo.322.1510089423726; Tue, 07 Nov 2017 13:17:03 -0800 (PST) Received: from nemesis.lab.teklibre.com ([2603:3024:1536:86f0:2e0:4cff:fec1:1206]) by smtp.gmail.com with ESMTPSA id h29sm4993197pfd.65.2017.11.07.13.17.02 (version=TLS1_2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Tue, 07 Nov 2017 13:17:03 -0800 (PST) From: Dave Taht To: netdev@vger.kernel.org Cc: Dave Taht Subject: [PATCH iproute2 2/4] q_netem: utilize 64 bit nanosecond API for delay and jitter Date: Tue, 7 Nov 2017 13:16:54 -0800 Message-Id: <1510089416-5945-3-git-send-email-dave.taht@gmail.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1510089416-5945-1-git-send-email-dave.taht@gmail.com> References: <1510089416-5945-1-git-send-email-dave.taht@gmail.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org This starts to obsolete the old "ticks" api in favor of well defined nanoseconds for the kernel/userspace netem interface. Signed-off-by: Dave Taht --- tc/q_netem.c | 68 +++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 24 deletions(-) diff --git a/tc/q_netem.c b/tc/q_netem.c index cdaddce..22a5b94 100644 --- a/tc/q_netem.c +++ b/tc/q_netem.c @@ -151,24 +151,6 @@ static int get_distribution(const char *type, __s16 *data, int maxdata) #define NEXT_IS_SIGNED_NUMBER() \ (NEXT_ARG_OK() && (isdigit(argv[1][0]) || argv[1][0] == '-')) -/* Adjust for the fact that psched_ticks aren't always usecs - (based on kernel PSCHED_CLOCK configuration */ -static int get_ticks(__u32 *ticks, const char *str) -{ - unsigned int t; - - if (get_time(&t, str)) - return -1; - - if (tc_core_time2big(t)) { - fprintf(stderr, "Illegal %u time (too large)\n", t); - return -1; - } - - *ticks = tc_core_time2tick(t); - return 0; -} - static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv, struct nlmsghdr *n) { @@ -185,6 +167,8 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv, __u16 loss_type = NETEM_LOSS_UNSPEC; int present[__TCA_NETEM_MAX] = {}; __u64 rate64 = 0; + __s64 latency64 = 0; + __s64 jitter64 = 0; for ( ; argc > 0; --argc, ++argv) { if (matches(*argv, "limit") == 0) { @@ -196,14 +180,16 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv, } else if (matches(*argv, "latency") == 0 || matches(*argv, "delay") == 0) { NEXT_ARG(); - if (get_ticks(&opt.latency, *argv)) { + present[TCA_NETEM_LATENCY64] = 1; + if (get_time64(&latency64, *argv)) { explain1("latency"); return -1; } if (NEXT_IS_NUMBER()) { NEXT_ARG(); - if (get_ticks(&opt.jitter, *argv)) { + present[TCA_NETEM_JITTER64] = 1; + if (get_time64(&jitter64, *argv)) { explain1("latency"); return -1; } @@ -437,7 +423,7 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv, tail = NLMSG_TAIL(n); if (reorder.probability) { - if (opt.latency == 0) { + if (latency64 == 0) { fprintf(stderr, "reordering not possible without specifying some delay\n"); explain(); return -1; @@ -458,7 +444,7 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv, } } - if (dist_data && (opt.latency == 0 || opt.jitter == 0)) { + if (dist_data && (latency64 == 0 || jitter64 == 0)) { fprintf(stderr, "distribution specified but no latency and jitter values\n"); explain(); return -1; @@ -467,6 +453,16 @@ static int netem_parse_opt(struct qdisc_util *qu, int argc, char **argv, if (addattr_l(n, 1024, TCA_OPTIONS, &opt, sizeof(opt)) < 0) return -1; + if (present[TCA_NETEM_LATENCY64] && + addattr_l(n, 1024, TCA_NETEM_LATENCY64, &latency64, + sizeof(latency64)) < 0) + return -1; + + if (present[TCA_NETEM_JITTER64] && + addattr_l(n, 1024, TCA_NETEM_JITTER64, &jitter64, + sizeof(jitter64)) < 0) + return -1; + if (present[TCA_NETEM_CORR] && addattr_l(n, 1024, TCA_NETEM_CORR, &cor, sizeof(cor)) < 0) return -1; @@ -540,6 +536,8 @@ static int netem_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt) const struct tc_netem_rate *rate = NULL; int len; __u64 rate64 = 0; + __s64 latency64 = 0; + __s64 jitter64 = 0; SPRINT_BUF(b1); @@ -559,6 +557,18 @@ static int netem_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt) parse_rtattr(tb, TCA_NETEM_MAX, RTA_DATA(opt) + sizeof(qopt), len); + if (tb[TCA_NETEM_LATENCY64]) { + if (RTA_PAYLOAD(tb[TCA_NETEM_LATENCY64]) < + sizeof(latency64)) + return -1; + latency64 = rta_getattr_u64(tb[TCA_NETEM_LATENCY64]); + } + if (tb[TCA_NETEM_JITTER64]) { + if (RTA_PAYLOAD(tb[TCA_NETEM_JITTER64]) < + sizeof(jitter64)) + return -1; + jitter64 = rta_getattr_u64(tb[TCA_NETEM_JITTER64]); + } if (tb[TCA_NETEM_CORR]) { if (RTA_PAYLOAD(tb[TCA_NETEM_CORR]) < sizeof(*cor)) return -1; @@ -602,13 +612,23 @@ static int netem_print_opt(struct qdisc_util *qu, FILE *f, struct rtattr *opt) fprintf(f, "limit %d", qopt.limit); - if (qopt.latency) { + if (latency64) { + fprintf(f, " delay %s", sprint_time64(latency64, b1)); + + if (jitter64) { + fprintf(f, " %s", sprint_time64(jitter64, b1)); + if (cor && cor->delay_corr) + fprintf(f, " %s", + sprint_percent(cor->delay_corr, b1)); + } + } else if (qopt.latency) { fprintf(f, " delay %s", sprint_ticks(qopt.latency, b1)); if (qopt.jitter) { fprintf(f, " %s", sprint_ticks(qopt.jitter, b1)); if (cor && cor->delay_corr) - fprintf(f, " %s", sprint_percent(cor->delay_corr, b1)); + fprintf(f, " %s", + sprint_percent(cor->delay_corr, b1)); } }