From patchwork Wed Jul 8 07:28:33 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ciara Loftus X-Patchwork-Id: 1325079 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: incoming-bpf@patchwork.ozlabs.org Delivered-To: patchwork-incoming-bpf@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=bpf-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4B1s16511Sz9sRN for ; Wed, 8 Jul 2020 17:50:34 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726251AbgGHHue (ORCPT ); Wed, 8 Jul 2020 03:50:34 -0400 Received: from mga09.intel.com ([134.134.136.24]:63202 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725937AbgGHHud (ORCPT ); Wed, 8 Jul 2020 03:50:33 -0400 IronPort-SDR: 2JflgwXrEdC0ybiapYKuLBbcWPpPcZ36rXcQq5SsFjC2kmoYIC7bOaGcxxk8HAPBt0oPoeyoiN GjzdGNqE0udA== X-IronPort-AV: E=McAfee;i="6000,8403,9675"; a="149266804" X-IronPort-AV: E=Sophos;i="5.75,327,1589266800"; d="scan'208";a="149266804" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Jul 2020 00:50:33 -0700 IronPort-SDR: MqmE6mUOyeASU1wSWvDVsgLtLFs59JYmbx1lOtwSF9x8Wl+niBb0I+sTfBaQy0JZ0y9rEe30gz QDiVQgfBBwmA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,327,1589266800"; d="scan'208";a="358030323" Received: from silpixa00399839.ir.intel.com (HELO localhost.localdomain) ([10.237.222.116]) by orsmga001.jf.intel.com with ESMTP; 08 Jul 2020 00:50:31 -0700 From: Ciara Loftus To: bpf@vger.kernel.org, bjorn.topel@intel.com, magnus.karlsson@intel.com Cc: Ciara Loftus Subject: [PATCH bpf-next 1/3] xsk: add new statistics Date: Wed, 8 Jul 2020 07:28:33 +0000 Message-Id: <20200708072835.4427-2-ciara.loftus@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200708072835.4427-1-ciara.loftus@intel.com> References: <20200708072835.4427-1-ciara.loftus@intel.com> MIME-Version: 1.0 Sender: bpf-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org It can be useful for the user to know the reason behind a dropped packet. Introduce new counters which track drops on the receive path caused by: 1. rx ring being full 2. fill ring being empty Also, on the tx path introduce a counter which tracks the number of times we attempt pull from the tx ring when it is empty. Signed-off-by: Ciara Loftus --- include/net/xdp_sock.h | 4 ++++ include/uapi/linux/if_xdp.h | 5 ++++- net/xdp/xsk.c | 36 ++++++++++++++++++++++++++----- net/xdp/xsk_buff_pool.c | 1 + net/xdp/xsk_queue.h | 6 ++++++ tools/include/uapi/linux/if_xdp.h | 5 ++++- 6 files changed, 50 insertions(+), 7 deletions(-) diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 96bfc5f5f24e..c9d87cc40c11 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -69,7 +69,11 @@ struct xdp_sock { spinlock_t tx_completion_lock; /* Protects generic receive. */ spinlock_t rx_lock; + + /* Statistics */ u64 rx_dropped; + u64 rx_queue_full; + struct list_head map_list; /* Protects map_list */ spinlock_t map_list_lock; diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index be328c59389d..a78a8096f4ce 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -73,9 +73,12 @@ struct xdp_umem_reg { }; struct xdp_statistics { - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 rx_ring_full; /* Dropped due to rx ring being full */ + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 3700266229f6..26e3bba8c204 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) addr = xp_get_handle(xskb); err = xskq_prod_reserve_desc(xs->rx, addr, len); if (err) { - xs->rx_dropped++; + xs->rx_queue_full++; return err; } @@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) rcu_read_lock(); list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { - if (!xskq_cons_peek_desc(xs->tx, desc, umem)) + if (!xskq_cons_peek_desc(xs->tx, desc, umem)) { + xs->tx->queue_empty_descs++; continue; + } /* This is the backpressure mechanism for the Tx path. * Reserve space in the completion queue and only proceed @@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk) sent_frame = true; } + xs->tx->queue_empty_descs++; + out: if (sent_frame) sk->sk_write_space(sk); @@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) ring->desc = offsetof(struct xdp_umem_ring, desc); } +struct xdp_statistics_v1 { + __u64 rx_dropped; + __u64 rx_invalid_descs; + __u64 tx_invalid_descs; +}; + static int xsk_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { @@ -831,19 +841,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname, case XDP_STATISTICS: { struct xdp_statistics stats; + bool extra_stats = true; + size_t stats_size; - if (len < sizeof(stats)) + if (len < sizeof(struct xdp_statistics_v1)) { return -EINVAL; + } else if (len < sizeof(stats)) { + extra_stats = false; + stats_size = sizeof(struct xdp_statistics_v1); + } else { + stats_size = sizeof(stats); + } mutex_lock(&xs->mutex); stats.rx_dropped = xs->rx_dropped; + if (extra_stats) { + stats.rx_ring_full = xs->rx_queue_full; + stats.rx_fill_ring_empty_descs = + xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; + stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); + } else { + stats.rx_dropped += xs->rx_queue_full; + } stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); mutex_unlock(&xs->mutex); - if (copy_to_user(optval, &stats, sizeof(stats))) + if (copy_to_user(optval, &stats, stats_size)) return -EFAULT; - if (put_user(sizeof(stats), optlen)) + if (put_user(stats_size, optlen)) return -EFAULT; return 0; diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index 540ed75e4482..89cf3551d3e9 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -235,6 +235,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) for (;;) { if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { + pool->fq->queue_empty_descs++; xp_release(xskb); return NULL; } diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 5b5d24d2dd37..bf42cfd74b89 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -38,6 +38,7 @@ struct xsk_queue { u32 cached_cons; struct xdp_ring *ring; u64 invalid_descs; + u64 queue_empty_descs; }; /* The structure of the shared state of the rings are the same as the @@ -354,6 +355,11 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) return q ? q->invalid_descs : 0; } +static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q) +{ + return q ? q->queue_empty_descs : 0; +} + struct xsk_queue *xskq_create(u32 nentries, bool umem_queue); void xskq_destroy(struct xsk_queue *q_ops); diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h index be328c59389d..a78a8096f4ce 100644 --- a/tools/include/uapi/linux/if_xdp.h +++ b/tools/include/uapi/linux/if_xdp.h @@ -73,9 +73,12 @@ struct xdp_umem_reg { }; struct xdp_statistics { - __u64 rx_dropped; /* Dropped for reasons other than invalid desc */ + __u64 rx_dropped; /* Dropped for other reasons */ __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */ __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */ + __u64 rx_ring_full; /* Dropped due to rx ring being full */ + __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */ + __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */ }; struct xdp_options { From patchwork Wed Jul 8 07:28:34 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ciara Loftus X-Patchwork-Id: 1325080 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: incoming-bpf@patchwork.ozlabs.org Delivered-To: patchwork-incoming-bpf@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=bpf-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4B1s1B4CnVz9sRN for ; Wed, 8 Jul 2020 17:50:38 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726006AbgGHHui (ORCPT ); Wed, 8 Jul 2020 03:50:38 -0400 Received: from mga09.intel.com ([134.134.136.24]:63202 "EHLO mga09.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725937AbgGHHuh (ORCPT ); Wed, 8 Jul 2020 03:50:37 -0400 IronPort-SDR: j0pvhITfWGaUb4AXYwYXxK1KtefdEe+rGFtx6LoGRgM/L2xvfA73aspCd12+TCrzM+68zpMQpI smqmz19oGm8A== X-IronPort-AV: E=McAfee;i="6000,8403,9675"; a="149266807" X-IronPort-AV: E=Sophos;i="5.75,327,1589266800"; d="scan'208";a="149266807" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga102.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Jul 2020 00:50:37 -0700 IronPort-SDR: hzJtXiCtpklyr9p8A6ROO7bIqIU3UwDEnjqsSkPeE/oWKZ5fLe5UNTKtfUHyz3xAiDZEF+EJgn Pn+tKYTZ80yw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,327,1589266800"; d="scan'208";a="358030331" Received: from silpixa00399839.ir.intel.com (HELO localhost.localdomain) ([10.237.222.116]) by orsmga001.jf.intel.com with ESMTP; 08 Jul 2020 00:50:36 -0700 From: Ciara Loftus To: bpf@vger.kernel.org, bjorn.topel@intel.com, magnus.karlsson@intel.com Cc: Ciara Loftus Subject: [PATCH bpf-next 2/3] samples: bpf: add an option for printing extra statistics in xdpsock Date: Wed, 8 Jul 2020 07:28:34 +0000 Message-Id: <20200708072835.4427-3-ciara.loftus@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200708072835.4427-1-ciara.loftus@intel.com> References: <20200708072835.4427-1-ciara.loftus@intel.com> MIME-Version: 1.0 Sender: bpf-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Introduce the --extra-stats (or simply -x) flag to the xdpsock application which prints additional statistics alongside the regular rx and tx counters. The new statistics printed report error conditions eg. rx ring full, invalid descriptors, etc. Signed-off-by: Ciara Loftus --- samples/bpf/xdpsock_user.c | 87 +++++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 2 deletions(-) diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index c91e91362a0c..19c679456a0e 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -77,6 +77,7 @@ static u32 opt_batch_size = 64; static int opt_pkt_count; static u16 opt_pkt_size = MIN_PKT_SIZE; static u32 opt_pkt_fill_pattern = 0x12345678; +static bool opt_extra_stats; static int opt_poll; static int opt_interval = 1; static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP; @@ -103,8 +104,20 @@ struct xsk_socket_info { struct xsk_socket *xsk; unsigned long rx_npkts; unsigned long tx_npkts; + unsigned long rx_dropped_npkts; + unsigned long rx_invalid_npkts; + unsigned long tx_invalid_npkts; + unsigned long rx_full_npkts; + unsigned long rx_fill_empty_npkts; + unsigned long tx_empty_npkts; unsigned long prev_rx_npkts; unsigned long prev_tx_npkts; + unsigned long prev_rx_dropped_npkts; + unsigned long prev_rx_invalid_npkts; + unsigned long prev_tx_invalid_npkts; + unsigned long prev_rx_full_npkts; + unsigned long prev_rx_fill_empty_npkts; + unsigned long prev_tx_empty_npkts; u32 outstanding_tx; }; @@ -147,6 +160,30 @@ static void print_benchmark(bool running) } } +static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk) +{ + struct xdp_statistics stats; + socklen_t optlen; + int err; + + optlen = sizeof(stats); + err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); + if (err) + return err; + + if (optlen == sizeof(struct xdp_statistics)) { + xsk->rx_dropped_npkts = stats.rx_dropped; + xsk->rx_invalid_npkts = stats.rx_invalid_descs; + xsk->tx_invalid_npkts = stats.tx_invalid_descs; + xsk->rx_full_npkts = stats.rx_ring_full; + xsk->rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs; + xsk->tx_empty_npkts = stats.tx_ring_empty_descs; + return 0; + } + + return -EINVAL; +} + static void dump_stats(void) { unsigned long now = get_nsecs(); @@ -157,7 +194,8 @@ static void dump_stats(void) for (i = 0; i < num_socks && xsks[i]; i++) { char *fmt = "%-15s %'-11.0f %'-11lu\n"; - double rx_pps, tx_pps; + double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps, + tx_invalid_pps, tx_empty_pps; rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) * 1000000000. / dt; @@ -175,6 +213,46 @@ static void dump_stats(void) xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts; xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts; + + if (opt_extra_stats) { + if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) { + dropped_pps = (xsks[i]->rx_dropped_npkts - + xsks[i]->prev_rx_dropped_npkts) * 1000000000. / dt; + rx_invalid_pps = (xsks[i]->rx_invalid_npkts - + xsks[i]->prev_rx_invalid_npkts) * 1000000000. / dt; + tx_invalid_pps = (xsks[i]->tx_invalid_npkts - + xsks[i]->prev_tx_invalid_npkts) * 1000000000. / dt; + full_pps = (xsks[i]->rx_full_npkts - + xsks[i]->prev_rx_full_npkts) * 1000000000. / dt; + fill_empty_pps = (xsks[i]->rx_fill_empty_npkts - + xsks[i]->prev_rx_fill_empty_npkts) + * 1000000000. / dt; + tx_empty_pps = (xsks[i]->tx_empty_npkts - + xsks[i]->prev_tx_empty_npkts) * 1000000000. / dt; + + printf(fmt, "rx dropped", dropped_pps, + xsks[i]->rx_dropped_npkts); + printf(fmt, "rx invalid", rx_invalid_pps, + xsks[i]->rx_invalid_npkts); + printf(fmt, "tx invalid", tx_invalid_pps, + xsks[i]->tx_invalid_npkts); + printf(fmt, "rx queue full", full_pps, + xsks[i]->rx_full_npkts); + printf(fmt, "fill ring empty", fill_empty_pps, + xsks[i]->rx_fill_empty_npkts); + printf(fmt, "tx ring empty", tx_empty_pps, + xsks[i]->tx_empty_npkts); + + xsks[i]->prev_rx_dropped_npkts = xsks[i]->rx_dropped_npkts; + xsks[i]->prev_rx_invalid_npkts = xsks[i]->rx_invalid_npkts; + xsks[i]->prev_tx_invalid_npkts = xsks[i]->tx_invalid_npkts; + xsks[i]->prev_rx_full_npkts = xsks[i]->rx_full_npkts; + xsks[i]->prev_rx_fill_empty_npkts = xsks[i]->rx_fill_empty_npkts; + xsks[i]->prev_tx_empty_npkts = xsks[i]->tx_empty_npkts; + } else { + printf("%-15s\n", "Error retrieving extra stats"); + } + } } } @@ -630,6 +708,7 @@ static struct option long_options[] = { {"tx-pkt-count", required_argument, 0, 'C'}, {"tx-pkt-size", required_argument, 0, 's'}, {"tx-pkt-pattern", required_argument, 0, 'P'}, + {"extra-stats", no_argument, 0, 'x'}, {0, 0, 0, 0} }; @@ -664,6 +743,7 @@ static void usage(const char *prog) " (Default: %d bytes)\n" " Min size: %d, Max size %d.\n" " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n" + " -x, --extra-stats Display extra statistics.\n" "\n"; fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE, opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE, @@ -679,7 +759,7 @@ static void parse_command_line(int argc, char **argv) opterr = 0; for (;;) { - c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:", + c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x", long_options, &option_index); if (c == -1) break; @@ -760,6 +840,9 @@ static void parse_command_line(int argc, char **argv) case 'P': opt_pkt_fill_pattern = strtol(optarg, NULL, 16); break; + case 'x': + opt_extra_stats = 1; + break; default: usage(basename(argv[0])); } From patchwork Wed Jul 8 07:28:35 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ciara Loftus X-Patchwork-Id: 1325081 X-Patchwork-Delegate: bpf@iogearbox.net Return-Path: X-Original-To: incoming-bpf@patchwork.ozlabs.org Delivered-To: patchwork-incoming-bpf@bilbo.ozlabs.org Authentication-Results: ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=vger.kernel.org (client-ip=23.128.96.18; helo=vger.kernel.org; envelope-from=bpf-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=intel.com Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by ozlabs.org (Postfix) with ESMTP id 4B1s1L3VDtz9s1x for ; Wed, 8 Jul 2020 17:50:46 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1725937AbgGHHuq (ORCPT ); Wed, 8 Jul 2020 03:50:46 -0400 Received: from mga03.intel.com ([134.134.136.65]:17896 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725747AbgGHHup (ORCPT ); Wed, 8 Jul 2020 03:50:45 -0400 IronPort-SDR: cL2MzDurO1pk5PKSl2PK4Uy1CB3IPwNGyFoYsvLFX7V6V4OWOJcUgVAoCqV3VD+NBlq4r1RNAd ylg4OFZ7Lb+Q== X-IronPort-AV: E=McAfee;i="6000,8403,9675"; a="147761879" X-IronPort-AV: E=Sophos;i="5.75,327,1589266800"; d="scan'208";a="147761879" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 08 Jul 2020 00:50:45 -0700 IronPort-SDR: y1XqIQZO0v8nXjza3I/nmXKUKbrUKVKhT60VF91BDI7m90HuM1mNzJt9exTYsSeX8F5jV5xml+ OSUVJOEO8TRw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.75,327,1589266800"; d="scan'208";a="358030359" Received: from silpixa00399839.ir.intel.com (HELO localhost.localdomain) ([10.237.222.116]) by orsmga001.jf.intel.com with ESMTP; 08 Jul 2020 00:50:44 -0700 From: Ciara Loftus To: bpf@vger.kernel.org, bjorn.topel@intel.com, magnus.karlsson@intel.com Cc: Ciara Loftus Subject: [PATCH bpf-next 3/3] xsk: add xdp statistics to xsk_diag Date: Wed, 8 Jul 2020 07:28:35 +0000 Message-Id: <20200708072835.4427-4-ciara.loftus@intel.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20200708072835.4427-1-ciara.loftus@intel.com> References: <20200708072835.4427-1-ciara.loftus@intel.com> MIME-Version: 1.0 Sender: bpf-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: bpf@vger.kernel.org Add xdp statistics to the information dumped through the xsk_diag interface Signed-off-by: Ciara Loftus --- include/uapi/linux/xdp_diag.h | 11 +++++++++++ net/xdp/xsk_diag.c | 17 +++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/include/uapi/linux/xdp_diag.h b/include/uapi/linux/xdp_diag.h index 78b2591a7782..66b9973b4f4c 100644 --- a/include/uapi/linux/xdp_diag.h +++ b/include/uapi/linux/xdp_diag.h @@ -30,6 +30,7 @@ struct xdp_diag_msg { #define XDP_SHOW_RING_CFG (1 << 1) #define XDP_SHOW_UMEM (1 << 2) #define XDP_SHOW_MEMINFO (1 << 3) +#define XDP_SHOW_STATS (1 << 4) enum { XDP_DIAG_NONE, @@ -41,6 +42,7 @@ enum { XDP_DIAG_UMEM_FILL_RING, XDP_DIAG_UMEM_COMPLETION_RING, XDP_DIAG_MEMINFO, + XDP_DIAG_STATS, __XDP_DIAG_MAX, }; @@ -69,4 +71,13 @@ struct xdp_diag_umem { __u32 refs; }; +struct xdp_diag_stats { + __u64 n_rx_dropped; + __u64 n_rx_invalid; + __u64 n_rx_full; + __u64 n_fill_ring_empty; + __u64 n_tx_invalid; + __u64 n_tx_ring_empty; +}; + #endif /* _LINUX_XDP_DIAG_H */ diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c index 0163b26aaf63..21e9c2d123ee 100644 --- a/net/xdp/xsk_diag.c +++ b/net/xdp/xsk_diag.c @@ -76,6 +76,19 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) return err; } +static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb) +{ + struct xdp_diag_stats du = {}; + + du.n_rx_dropped = xs->rx_dropped; + du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx); + du.n_rx_full = xs->rx_queue_full; + du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0; + du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx); + du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx); + return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du); +} + static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, struct xdp_diag_req *req, struct user_namespace *user_ns, @@ -118,6 +131,10 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb, sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) goto out_nlmsg_trim; + if ((req->xdiag_show & XDP_SHOW_STATS) && + xsk_diag_put_stats(xs, nlskb)) + goto out_nlmsg_trim; + mutex_unlock(&xs->mutex); nlmsg_end(nlskb, nlh); return 0;