@@ -131,6 +131,7 @@ extern int arm_const_double_inline_cost (rtx);
extern bool arm_const_double_by_parts (rtx);
extern bool arm_const_double_by_immediates (rtx);
extern void arm_emit_call_insn (rtx, rtx, bool);
+extern bool arm_fusion_enabled_p (unsigned int);
extern const char *output_call (rtx *);
void arm_emit_movpair (rtx, rtx);
extern const char *output_mov_long_double_arm_from_arm (rtx *);
@@ -29642,6 +29642,13 @@ aarch_macro_fusion_pair_p (rtx_insn* prev, rtx_insn* curr)
return false;
}
+/* Return true iff the instruction fusion described by OP is enabled. */
+bool
+arm_fusion_enabled_p (unsigned int op)
+{
+ return current_tune->fusible_ops & op;
+}
+
/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
static unsigned HOST_WIDE_INT
@@ -18,14 +18,27 @@
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
+
+;; When AES/AESMC fusion is enabled we want the register allocation to
+;; look like:
+;; AESE Vn, _
+;; AESMC Vn, Vn
+;; So prefer to tie operand 1 to operand 0 when fusing.
+
(define_insn "crypto_<crypto_pattern>"
- [(set (match_operand:<crypto_mode> 0 "register_operand" "=w")
+ [(set (match_operand:<crypto_mode> 0 "register_operand" "=w,w")
(unspec:<crypto_mode> [(match_operand:<crypto_mode> 1
- "register_operand" "w")]
+ "register_operand" "0,w")]
CRYPTO_UNARY))]
"TARGET_CRYPTO"
"<crypto_pattern>.<crypto_size_sfx>\\t%q0, %q1"
- [(set_attr "type" "<crypto_type>")]
+ [(set_attr "type" "<crypto_type>")
+ (set_attr_alternative "enabled"
+ [(if_then_else (match_test
+ "arm_fusion_enabled_p (tune_params::FUSE_AES_AESMC)")
+ (const_string "yes" )
+ (const_string "no"))
+ (const_string "yes")])]
)
(define_insn "crypto_<crypto_pattern>"