diff mbox series

[net-next] tcp: show number of network segments in some SNMP counters

Message ID 1536425898-12059-1-git-send-email-laoar.shao@gmail.com
State Changes Requested, archived
Delegated to: David Miller
Headers show
Series [net-next] tcp: show number of network segments in some SNMP counters | expand

Commit Message

Yafang Shao Sept. 8, 2018, 4:58 p.m. UTC
It is better to show the number of network segments in bellow SNMP
counters, because that could be more useful for the user.
For example, the user could easily figure out how mant packets are
dropped and how many packets are queued in the out-of-oder queue.

- LINUX_MIB_TCPRCVQDROP
- LINUX_MIB_TCPZEROWINDOWDROP
- LINUX_MIB_TCPBACKLOGDROP
- LINUX_MIB_TCPMINTTLDROP
- LINUX_MIB_TCPOFODROP
- LINUX_MIB_TCPOFOQUEUE

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 net/ipv4/tcp_input.c | 18 ++++++++++++------
 net/ipv4/tcp_ipv4.c  |  9 ++++++---
 net/ipv6/tcp_ipv6.c  |  6 ++++--
 3 files changed, 22 insertions(+), 11 deletions(-)

Comments

Yafang Shao Sept. 8, 2018, 5:42 p.m. UTC | #1
On Sun, Sep 9, 2018 at 12:58 AM, Yafang Shao <laoar.shao@gmail.com> wrote:
> It is better to show the number of network segments in bellow SNMP
> counters, because that could be more useful for the user.
> For example, the user could easily figure out how mant packets are
> dropped and how many packets are queued in the out-of-oder queue.
>
> - LINUX_MIB_TCPRCVQDROP
> - LINUX_MIB_TCPZEROWINDOWDROP
> - LINUX_MIB_TCPBACKLOGDROP
> - LINUX_MIB_TCPMINTTLDROP
> - LINUX_MIB_TCPOFODROP
> - LINUX_MIB_TCPOFOQUEUE
>
> Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
> ---
>  net/ipv4/tcp_input.c | 18 ++++++++++++------
>  net/ipv4/tcp_ipv4.c  |  9 ++++++---
>  net/ipv6/tcp_ipv6.c  |  6 ++++--
>  3 files changed, 22 insertions(+), 11 deletions(-)
>
> diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
> index 62508a2..90f449b 100644
> --- a/net/ipv4/tcp_input.c
> +++ b/net/ipv4/tcp_input.c
> @@ -4496,7 +4496,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
>         tcp_ecn_check_ce(sk, skb);
>
>         if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
> -               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
> +               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP,
> +                             tcp_skb_pcount(skb));
>                 tcp_drop(sk, skb);
>                 return;
>         }
> @@ -4505,7 +4506,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
>         tp->pred_flags = 0;
>         inet_csk_schedule_ack(sk);
>
> -       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
> +       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE,
> +                     tcp_skb_pcount(skb));
>         seq = TCP_SKB_CB(skb)->seq;
>         end_seq = TCP_SKB_CB(skb)->end_seq;
>         SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
> @@ -4666,7 +4668,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
>         skb->len = size;
>
>         if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
> -               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
> +               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP,
> +                             tcp_skb_pcount(skb));
>                 goto err_free;
>         }
>
> @@ -4725,7 +4728,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
>          */
>         if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
>                 if (tcp_receive_window(tp) == 0) {
> -                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
> +                       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP,
> +                                     tcp_skb_pcount(skb));
>                         goto out_of_window;
>                 }
>
> @@ -4734,7 +4738,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
>                 if (skb_queue_len(&sk->sk_receive_queue) == 0)
>                         sk_forced_mem_schedule(sk, skb->truesize);
>                 else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
> -                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
> +                       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP,
> +                                     tcp_skb_pcount(skb));
>                         goto drop;
>                 }
>
> @@ -4796,7 +4801,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
>                  * remembering D-SACK for its head made in previous line.
>                  */
>                 if (!tcp_receive_window(tp)) {
> -                       NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
> +                       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP,
> +                                     tcp_skb_pcount(skb));
>                         goto out_of_window;
>                 }
>                 goto queue_and_out;
> diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
> index 09547ef..f2fe14b 100644
> --- a/net/ipv4/tcp_ipv4.c
> +++ b/net/ipv4/tcp_ipv4.c
> @@ -475,7 +475,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
>                 goto out;
>
>         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
> -               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
> +               __NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
> +                               tcp_skb_pcount(skb));
>                 goto out;
>         }
>
> @@ -1633,7 +1634,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
>
>         if (unlikely(sk_add_backlog(sk, skb, limit))) {
>                 bh_unlock_sock(sk);
> -               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
> +               __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP,
> +                               tcp_skb_pcount(skb));
>                 return true;
>         }
>         return false;
> @@ -1790,7 +1792,8 @@ int tcp_v4_rcv(struct sk_buff *skb)
>                 }
>         }
>         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
> -               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
> +               __NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
> +                               tcp_skb_pcount(skb));
>                 goto discard_and_relse;
>         }
>
> diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
> index 03e6b7a..97dfc16 100644
> --- a/net/ipv6/tcp_ipv6.c
> +++ b/net/ipv6/tcp_ipv6.c
> @@ -391,7 +391,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
>                 goto out;
>
>         if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
> -               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
> +               __NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
> +                               tcp_skb_pcount(skb));
>                 goto out;
>         }
>
> @@ -1523,7 +1524,8 @@ static int tcp_v6_rcv(struct sk_buff *skb)
>                 }
>         }
>         if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
> -               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
> +               __NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
> +                               tcp_skb_pcount(skb));
>                 goto discard_and_relse;
>         }
>
> --
> 1.8.3.1
>

Seems it is not proper to use tcp_skb_pcount(skb). Will send V2.
Sorry about the noise.


Thanks
Yafang
kernel test robot Sept. 10, 2018, 3:07 p.m. UTC | #2
Hi Yafang,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Yafang-Shao/tcp-show-number-of-network-segments-in-some-SNMP-counters/20180910-225108
config: x86_64-randconfig-x019-201836 (attached as .config)
compiler: gcc-7 (Debian 7.3.0-1) 7.3.0
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

Note: it may well be a FALSE warning. FWIW you are at least aware of it now.
http://gcc.gnu.org/wiki/Better_Uninitialized_Warnings

All warnings (new ones prefixed by >>):

   In file included from net/ipv4/tcp_ipv4.c:69:0:
   net/ipv4/tcp_ipv4.c: In function 'tcp_v4_err':
>> include/net/tcp.h:917:24: warning: 'skb' may be used uninitialized in this function [-Wmaybe-uninitialized]
     return TCP_SKB_CB(skb)->tcp_gso_segs;
                           ^
   net/ipv4/tcp_ipv4.c:436:18: note: 'skb' was declared here
     struct sk_buff *skb;
                     ^~~
--
   In file included from net//ipv4/tcp_ipv4.c:69:0:
   net//ipv4/tcp_ipv4.c: In function 'tcp_v4_err':
>> include/net/tcp.h:917:24: warning: 'skb' may be used uninitialized in this function [-Wmaybe-uninitialized]
     return TCP_SKB_CB(skb)->tcp_gso_segs;
                           ^
   net//ipv4/tcp_ipv4.c:436:18: note: 'skb' was declared here
     struct sk_buff *skb;
                     ^~~

vim +/skb +917 include/net/tcp.h

3fa6f616 David Ahern    2017-08-07  911  
^1da177e Linus Torvalds 2005-04-16  912  /* Due to TSO, an SKB can be composed of multiple actual
^1da177e Linus Torvalds 2005-04-16  913   * packets.  To keep these tracked properly, we use this.
bd14b1b2 Eric Dumazet   2012-05-04  914   */
^1da177e Linus Torvalds 2005-04-16  915  static inline int tcp_skb_pcount(const struct sk_buff *skb)
bd14b1b2 Eric Dumazet   2012-05-04  916  {
cd7d8498 Eric Dumazet   2014-09-24 @917  	return TCP_SKB_CB(skb)->tcp_gso_segs;
cd7d8498 Eric Dumazet   2014-09-24  918  }
bd14b1b2 Eric Dumazet   2012-05-04  919  

:::::: The code at line 917 was first introduced by commit
:::::: cd7d8498c9a5d510c64db38d9f4f4fbc41790f09 tcp: change tcp_skb_pcount() location

:::::: TO: Eric Dumazet <edumazet@google.com>
:::::: CC: David S. Miller <davem@davemloft.net>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
diff mbox series

Patch

diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 62508a2..90f449b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4496,7 +4496,8 @@  static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 	tcp_ecn_check_ce(sk, skb);
 
 	if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
-		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP,
+			      tcp_skb_pcount(skb));
 		tcp_drop(sk, skb);
 		return;
 	}
@@ -4505,7 +4506,8 @@  static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 	tp->pred_flags = 0;
 	inet_csk_schedule_ack(sk);
 
-	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
+	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE,
+		      tcp_skb_pcount(skb));
 	seq = TCP_SKB_CB(skb)->seq;
 	end_seq = TCP_SKB_CB(skb)->end_seq;
 	SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
@@ -4666,7 +4668,8 @@  int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
 	skb->len = size;
 
 	if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
-		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP,
+			      tcp_skb_pcount(skb));
 		goto err_free;
 	}
 
@@ -4725,7 +4728,8 @@  static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 	 */
 	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
 		if (tcp_receive_window(tp) == 0) {
-			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
+			NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP,
+				      tcp_skb_pcount(skb));
 			goto out_of_window;
 		}
 
@@ -4734,7 +4738,8 @@  static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 		if (skb_queue_len(&sk->sk_receive_queue) == 0)
 			sk_forced_mem_schedule(sk, skb->truesize);
 		else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
-			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+			NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP,
+				      tcp_skb_pcount(skb));
 			goto drop;
 		}
 
@@ -4796,7 +4801,8 @@  static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 		 * remembering D-SACK for its head made in previous line.
 		 */
 		if (!tcp_receive_window(tp)) {
-			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
+			NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP,
+				      tcp_skb_pcount(skb));
 			goto out_of_window;
 		}
 		goto queue_and_out;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 09547ef..f2fe14b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -475,7 +475,8 @@  void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
 		goto out;
 
 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
-		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
+		__NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
+				tcp_skb_pcount(skb));
 		goto out;
 	}
 
@@ -1633,7 +1634,8 @@  bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 	if (unlikely(sk_add_backlog(sk, skb, limit))) {
 		bh_unlock_sock(sk);
-		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
+		__NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP,
+				tcp_skb_pcount(skb));
 		return true;
 	}
 	return false;
@@ -1790,7 +1792,8 @@  int tcp_v4_rcv(struct sk_buff *skb)
 		}
 	}
 	if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
-		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
+		__NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
+				tcp_skb_pcount(skb));
 		goto discard_and_relse;
 	}
 
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 03e6b7a..97dfc16 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -391,7 +391,8 @@  static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 		goto out;
 
 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
-		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
+		__NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
+				tcp_skb_pcount(skb));
 		goto out;
 	}
 
@@ -1523,7 +1524,8 @@  static int tcp_v6_rcv(struct sk_buff *skb)
 		}
 	}
 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
-		__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
+		__NET_ADD_STATS(net, LINUX_MIB_TCPMINTTLDROP,
+				tcp_skb_pcount(skb));
 		goto discard_and_relse;
 	}