From patchwork Fri Jul 18 18:28:15 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Zi Shen Lim X-Patchwork-Id: 371689 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 435D3140132 for ; Sat, 19 Jul 2014 04:30:49 +1000 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1946009AbaGRS2r (ORCPT ); Fri, 18 Jul 2014 14:28:47 -0400 Received: from mail-pa0-f47.google.com ([209.85.220.47]:58597 "EHLO mail-pa0-f47.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1945999AbaGRS2p (ORCPT ); Fri, 18 Jul 2014 14:28:45 -0400 Received: by mail-pa0-f47.google.com with SMTP id kx10so5909884pab.34 for ; Fri, 18 Jul 2014 11:28:44 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20120113; h=from:to:cc:subject:date:message-id:in-reply-to:references; bh=kWnSumr2mu0E8DIz8tJ3WBOeCWCqZPiI7yA3QhPd0L4=; b=Za2K/WJ6rCVTe1BZAAcZgcdIxARZD1AW8WFTRzOsKb/ZskO6HUTb27hV7LJNu9TDlH JylP8MFfP8gAfOAq3Cm/YzTwRPchw3GIVKgwIF0WGWNzVSn4+n5sqDS5nUXg89NC+q0q HpWRlZJ8P3d47bspLdrD5w8c6BorJk5GEnXZpw+ZhIijOhmHTskXiAIsoizjPIgZV5lV WmU2FT8PGx0+dxAxbKJpBxUEwGFnfuQl6Kl9ynoufwAGWzmnZg39OPgdQ4pVpw7L2DPg TlTcIDw9P27AJSD/3xbk4S8NPCGnFOsMUBLLcP6nPPIPid4Bq+lHLI7BcP35pL8C9Ccu popQ== X-Received: by 10.70.130.227 with SMTP id oh3mr7493383pdb.55.1405708124770; Fri, 18 Jul 2014 11:28:44 -0700 (PDT) Received: from gup76.sbx05686.santaca.wayport.net (ip-64-134-230-66.public.wayport.net. [64.134.230.66]) by mx.google.com with ESMTPSA id og2sm8353017pdb.42.2014.07.18.11.28.43 for (version=TLSv1.2 cipher=ECDHE-RSA-AES128-SHA bits=128/128); Fri, 18 Jul 2014 11:28:44 -0700 (PDT) From: Zi Shen Lim To: Catalin Marinas , Will Deacon , Jiang Liu , AKASHI Takahiro , "David S. Miller" , Daniel Borkmann , Alexei Starovoitov Cc: Zi Shen Lim , linux-kernel@vger.kernel.org, linux-arm-kernel@lists.infradead.org, netdev@vger.kernel.org Subject: [PATCH 09/14] arm64: introduce aarch64_insn_gen_add_sub_shifted_reg() Date: Fri, 18 Jul 2014 11:28:15 -0700 Message-Id: <1405708100-13604-10-git-send-email-zlim.lnx@gmail.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1405708100-13604-1-git-send-email-zlim.lnx@gmail.com> References: <1405708100-13604-1-git-send-email-zlim.lnx@gmail.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Introduce function to generate add/subtract (shifted register) instructions. Signed-off-by: Zi Shen Lim --- arch/arm64/include/asm/insn.h | 11 ++++++++++ arch/arm64/kernel/insn.c | 49 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 49dec28..c0a765d 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -67,6 +67,7 @@ enum aarch64_insn_imm_type { AARCH64_INSN_IMM_12, AARCH64_INSN_IMM_9, AARCH64_INSN_IMM_7, + AARCH64_INSN_IMM_6, AARCH64_INSN_IMM_S, AARCH64_INSN_IMM_R, AARCH64_INSN_IMM_MAX @@ -206,6 +207,10 @@ __AARCH64_INSN_FUNCS(bfm, 0x7F800000, 0x33000000) __AARCH64_INSN_FUNCS(movz, 0x7F800000, 0x52800000) __AARCH64_INSN_FUNCS(ubfm, 0x7F800000, 0x53000000) __AARCH64_INSN_FUNCS(movk, 0x7F800000, 0x72800000) +__AARCH64_INSN_FUNCS(add, 0x7F200000, 0x0B000000) +__AARCH64_INSN_FUNCS(adds, 0x7F200000, 0x2B000000) +__AARCH64_INSN_FUNCS(sub, 0x7F200000, 0x4B000000) +__AARCH64_INSN_FUNCS(subs, 0x7F200000, 0x6B000000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) __AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) @@ -265,6 +270,12 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, int imm, int shift, enum aarch64_insn_variant variant, enum aarch64_insn_movewide_type type); +u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_adsb_type type); bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 7aa2784..d7a4dd4 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -260,6 +260,7 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, mask = BIT(7) - 1; shift = 15; break; + case AARCH64_INSN_IMM_6: case AARCH64_INSN_IMM_S: mask = BIT(6) - 1; shift = 10; @@ -698,3 +699,51 @@ u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst, return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); } + +u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst, + enum aarch64_insn_register src, + enum aarch64_insn_register reg, + int shift, + enum aarch64_insn_variant variant, + enum aarch64_insn_adsb_type type) +{ + u32 insn; + + switch (type) { + case AARCH64_INSN_ADSB_ADD: + insn = aarch64_insn_get_add_value(); + break; + case AARCH64_INSN_ADSB_SUB: + insn = aarch64_insn_get_sub_value(); + break; + case AARCH64_INSN_ADSB_ADD_SETFLAGS: + insn = aarch64_insn_get_adds_value(); + break; + case AARCH64_INSN_ADSB_SUB_SETFLAGS: + insn = aarch64_insn_get_subs_value(); + break; + default: + BUG_ON(1); + } + + switch (variant) { + case AARCH64_INSN_VARIANT_32BIT: + BUG_ON(shift & ~(SZ_32 - 1)); + break; + case AARCH64_INSN_VARIANT_64BIT: + insn |= AARCH64_INSN_SF_BIT; + BUG_ON(shift & ~(SZ_64 - 1)); + break; + default: + BUG_ON(1); + } + + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src); + + insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg); + + return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift); +}