From patchwork Tue Sep 22 14:34:36 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Christophe Leroy X-Patchwork-Id: 521079 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 0EF731401CD for ; Wed, 23 Sep 2015 00:35:08 +1000 (AEST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933451AbbIVOen (ORCPT ); Tue, 22 Sep 2015 10:34:43 -0400 Received: from 2.236.17.93.rev.sfr.net ([93.17.236.2]:31942 "EHLO mailhub1.si.c-s.fr" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S933286AbbIVOej (ORCPT ); Tue, 22 Sep 2015 10:34:39 -0400 Received: from localhost (mailhub1-int [192.168.12.234]) by localhost (Postfix) with ESMTP id 67BB11C927B; Tue, 22 Sep 2015 16:34:38 +0200 (CEST) X-Virus-Scanned: amavisd-new at c-s.fr Received: from mailhub1.si.c-s.fr ([192.168.12.234]) by localhost (mailhub1.c-s.fr [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id xIAMiAaU_KKX; Tue, 22 Sep 2015 16:34:38 +0200 (CEST) Received: from messagerie.si.c-s.fr (messagerie [192.168.25.192]) by pegase1.c-s.fr (Postfix) with ESMTP id F18191C9236; Tue, 22 Sep 2015 16:34:37 +0200 (CEST) Received: from localhost (localhost [127.0.0.1]) by messagerie.si.c-s.fr (Postfix) with ESMTP id DD725C73C4; Tue, 22 Sep 2015 16:34:37 +0200 (CEST) X-Virus-Scanned: amavisd-new at c-s.fr Received: from messagerie.si.c-s.fr ([127.0.0.1]) by localhost (messagerie.si.c-s.fr [127.0.0.1]) (amavisd-new, port 10023) with ESMTP id wja3keeMW08e; Tue, 22 Sep 2015 16:34:37 +0200 (CEST) Received: from PO10863.localdomain (unknown [192.168.232.142]) by messagerie.si.c-s.fr (Postfix) with ESMTP id 8E06EC73C8; Tue, 22 Sep 2015 16:34:37 +0200 (CEST) Received: by localhost.localdomain (Postfix, from userid 0) id 333D01A2467; Tue, 22 Sep 2015 16:34:36 +0200 (CEST) Message-Id: In-Reply-To: References: From: Christophe Leroy Subject: [PATCH 9/9] powerpc: optimise csum_partial() call when len is constant To: Benjamin Herrenschmidt , Paul Mackerras , Michael Ellerman , scottwood@freescale.com Cc: linux-kernel@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, netdev@vger.kernel.org Date: Tue, 22 Sep 2015 16:34:36 +0200 (CEST) Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org csum_partial is often called for small fixed length packets for which it is suboptimal to use the generic csum_partial() function. For instance, in my configuration, I got: * One place calling it with constant len 4 * Seven places calling it with constant len 8 * Three places calling it with constant len 14 * One place calling it with constant len 20 * One place calling it with constant len 24 * One place calling it with constant len 32 This patch renames csum_partial() to __csum_partial() and implements csum_partial() as a wrapper inline function which * uses csum_add() for small 16bits multiple constant length * uses ip_fast_csum() for other 32bits multiple constant * uses __csum_partial() in all other cases Signed-off-by: Christophe Leroy --- arch/powerpc/include/asm/checksum.h | 80 ++++++++++++++++++++++++++----------- arch/powerpc/lib/checksum_32.S | 4 +- arch/powerpc/lib/checksum_64.S | 4 +- arch/powerpc/lib/ppc_ksyms.c | 2 +- 4 files changed, 62 insertions(+), 28 deletions(-) diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index f8a9704..25c4657f 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -13,20 +13,6 @@ #include #else /* - * computes the checksum of a memory block at buff, length len, - * and adds in "sum" (32-bit) - * - * returns a 32-bit number suitable for feeding into itself - * or csum_tcpudp_magic - * - * this function must be called with even lengths, except - * for the last fragment, which may be odd - * - * it's best to have buff aligned on a 32-bit boundary - */ -extern __wsum csum_partial(const void *buff, int len, __wsum sum); - -/* * Computes the checksum of a memory block at src, length len, * and adds in "sum" (32-bit), while copying the block to dst. * If an access exception occurs on src or dst, it stores -EFAULT @@ -67,15 +53,6 @@ static inline __sum16 csum_fold(__wsum sum) return (__force __sum16)(~((__force u32)sum + tmp) >> 16); } -/* - * this routine is used for miscellaneous IP-like checksums, mainly - * in icmp.c - */ -static inline __sum16 ip_compute_csum(const void *buff, int len) -{ - return csum_fold(csum_partial(buff, len, 0)); -} - static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, @@ -175,6 +152,63 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) return csum_fold(ip_fast_csum_nofold(iph, ihl)); } +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum __csum_partial(const void *buff, int len, __wsum sum); + +static inline __wsum csum_partial(const void *buff, int len, __wsum sum) +{ + if (__builtin_constant_p(len) && len == 0) + return sum; + + if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) { + __wsum sum1; + + if (len == 2) + sum1 = (__force u32)*(u16 *)buff; + if (len >= 4) + sum1 = *(u32 *)buff; + if (len == 6) + sum1 = csum_add(sum1, (__force u32)*(u16 *)(buff + 4)); + if (len >= 8) + sum1 = csum_add(sum1, *(u32 *)(buff + 4)); + if (len == 10) + sum1 = csum_add(sum1, (__force u32)*(u16 *)(buff + 8)); + if (len >= 12) + sum1 = csum_add(sum1, *(u32 *)(buff + 8)); + if (len == 14) + sum1 = csum_add(sum1, (__force u32)*(u16 *)(buff + 12)); + if (len >= 16) + sum1 = csum_add(sum1, *(u32 *)(buff + 12)); + + sum = csum_add(sum1, sum); + } else if (__builtin_constant_p(len) && (len & 3) == 0) { + sum = csum_add(ip_fast_csum_nofold(buff, len >> 2), sum); + } else { + sum = __csum_partial(buff, len, sum); + } + return sum; +} + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +static inline __sum16 ip_compute_csum(const void *buff, int len) +{ + return csum_fold(csum_partial(buff, len, 0)); +} + #endif #endif /* __KERNEL__ */ #endif diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S index 0d34f47..043d0088 100644 --- a/arch/powerpc/lib/checksum_32.S +++ b/arch/powerpc/lib/checksum_32.S @@ -24,9 +24,9 @@ * computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit) * - * csum_partial(buff, len, sum) + * do_csum_partial(buff, len, sum) */ -_GLOBAL(csum_partial) +_GLOBAL(__csum_partial) subi r3,r3,4 srawi. r6,r4,2 /* Divide len by 4 and also clear carry */ beq 3f /* if we're doing < 4 bytes */ diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index f53f4ab..4ab562d 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -21,9 +21,9 @@ * Computes the checksum of a memory block at buff, length len, * and adds in "sum" (32-bit). * - * csum_partial(r3=buff, r4=len, r5=sum) + * do_csum_partial(r3=buff, r4=len, r5=sum) */ -_GLOBAL(csum_partial) +_GLOBAL(__csum_partial) addic r0,r5,0 /* clear carry */ srdi. r6,r4,3 /* less than 8 bytes? */ diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c index 8cd5c0b..c422812 100644 --- a/arch/powerpc/lib/ppc_ksyms.c +++ b/arch/powerpc/lib/ppc_ksyms.c @@ -17,7 +17,7 @@ EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); #ifndef CONFIG_GENERIC_CSUM -EXPORT_SYMBOL(csum_partial); +EXPORT_SYMBOL(__csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); #endif