@@ -99,11 +99,11 @@ (define_peephole2
})
(define_peephole2
- [(set (match_operand:VD 0 "register_operand" "")
- (match_operand:VD 1 "aarch64_mem_pair_operand" ""))
- (set (match_operand:VD 2 "register_operand" "")
- (match_operand:VD 3 "memory_operand" ""))]
- "aarch64_operands_ok_for_ldpstp (operands, true, <MODE>mode)"
+ [(set (match_operand:DREG 0 "register_operand" "")
+ (match_operand:DREG 1 "aarch64_mem_pair_operand" ""))
+ (set (match_operand:DREG2 2 "register_operand" "")
+ (match_operand:DREG2 3 "memory_operand" ""))]
+ "aarch64_operands_ok_for_ldpstp (operands, true, <DREG:MODE>mode)"
[(parallel [(set (match_dup 0) (match_dup 1))
(set (match_dup 2) (match_dup 3))])]
{
@@ -119,11 +119,12 @@ (define_peephole2
})
(define_peephole2
- [(set (match_operand:VD 0 "aarch64_mem_pair_operand" "")
- (match_operand:VD 1 "register_operand" ""))
- (set (match_operand:VD 2 "memory_operand" "")
- (match_operand:VD 3 "register_operand" ""))]
- "TARGET_SIMD && aarch64_operands_ok_for_ldpstp (operands, false, <MODE>mode)"
+ [(set (match_operand:DREG 0 "aarch64_mem_pair_operand" "")
+ (match_operand:DREG 1 "register_operand" ""))
+ (set (match_operand:DREG2 2 "memory_operand" "")
+ (match_operand:DREG2 3 "register_operand" ""))]
+ "TARGET_SIMD
+ && aarch64_operands_ok_for_ldpstp (operands, false, <DREG:MODE>mode)"
[(parallel [(set (match_dup 0) (match_dup 1))
(set (match_dup 2) (match_dup 3))])]
{
@@ -138,7 +139,6 @@ (define_peephole2
}
})
-
;; Handle sign/zero extended consecutive load/store.
(define_peephole2
@@ -181,6 +181,36 @@ (define_peephole2
}
})
+;; Handle storing of a floating point zero with integer data.
+;; This handles cases like:
+;; struct pair { int a; float b; }
+;;
+;; p->a = 1;
+;; p->b = 0.0;
+;;
+;; We can match modes that won't work for a stp instruction
+;; as aarch64_operands_ok_for_ldpstp checks that the modes are
+;; compatible.
+(define_peephole2
+ [(set (match_operand:DSX 0 "aarch64_mem_pair_operand" "")
+ (match_operand:DSX 1 "aarch64_reg_zero_or_fp_zero" ""))
+ (set (match_operand:<FCVT_TARGET> 2 "memory_operand" "")
+ (match_operand:<FCVT_TARGET> 3 "aarch64_reg_zero_or_fp_zero" ""))]
+ "aarch64_operands_ok_for_ldpstp (operands, false, <V_INT_EQUIV>mode)"
+ [(parallel [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2) (match_dup 3))])]
+{
+ rtx base, offset_1, offset_2;
+
+ extract_base_offset_in_addr (operands[0], &base, &offset_1);
+ extract_base_offset_in_addr (operands[2], &base, &offset_2);
+ if (INTVAL (offset_1) > INTVAL (offset_2))
+ {
+ std::swap (operands[0], operands[2]);
+ std::swap (operands[1], operands[3]);
+ }
+})
+
;; Handle consecutive load/store whose offset is out of the range
;; supported by ldp/ldpsw/stp. We firstly adjust offset in a scratch
;; register, then merge them into ldp/ldpsw/stp by using the adjusted
@@ -177,30 +177,30 @@ (define_insn "aarch64_store_lane0<mode>"
[(set_attr "type" "neon_store1_1reg<q>")]
)
-(define_insn "load_pair<mode>"
- [(set (match_operand:VD 0 "register_operand" "=w")
- (match_operand:VD 1 "aarch64_mem_pair_operand" "Ump"))
- (set (match_operand:VD 2 "register_operand" "=w")
- (match_operand:VD 3 "memory_operand" "m"))]
+(define_insn "load_pair<DREG:mode><DREG2:mode>"
+ [(set (match_operand:DREG 0 "register_operand" "=w")
+ (match_operand:DREG 1 "aarch64_mem_pair_operand" "Ump"))
+ (set (match_operand:DREG2 2 "register_operand" "=w")
+ (match_operand:DREG2 3 "memory_operand" "m"))]
"TARGET_SIMD
&& rtx_equal_p (XEXP (operands[3], 0),
plus_constant (Pmode,
XEXP (operands[1], 0),
- GET_MODE_SIZE (<MODE>mode)))"
+ GET_MODE_SIZE (<DREG:MODE>mode)))"
"ldp\\t%d0, %d2, %1"
[(set_attr "type" "neon_ldp")]
)
-(define_insn "store_pair<mode>"
- [(set (match_operand:VD 0 "aarch64_mem_pair_operand" "=Ump")
- (match_operand:VD 1 "register_operand" "w"))
- (set (match_operand:VD 2 "memory_operand" "=m")
- (match_operand:VD 3 "register_operand" "w"))]
+(define_insn "vec_store_pair<DREG:mode><DREG2:mode>"
+ [(set (match_operand:DREG 0 "aarch64_mem_pair_operand" "=Ump")
+ (match_operand:DREG 1 "register_operand" "w"))
+ (set (match_operand:DREG2 2 "memory_operand" "=m")
+ (match_operand:DREG2 3 "register_operand" "w"))]
"TARGET_SIMD
&& rtx_equal_p (XEXP (operands[2], 0),
plus_constant (Pmode,
XEXP (operands[0], 0),
- GET_MODE_SIZE (<MODE>mode)))"
+ GET_MODE_SIZE (<DREG:MODE>mode)))"
"stp\\t%d1, %d3, %0"
[(set_attr "type" "neon_stp")]
)
@@ -4279,10 +4279,10 @@ aarch64_gen_store_pair (machine_mode mode, rtx mem1, rtx reg1, rtx mem2,
switch (mode)
{
case E_DImode:
- return gen_store_pairdi (mem1, reg1, mem2, reg2);
+ return gen_store_pair_dw_didi (mem1, reg1, mem2, reg2);
case E_DFmode:
- return gen_store_pairdf (mem1, reg1, mem2, reg2);
+ return gen_store_pair_dw_dfdf (mem1, reg1, mem2, reg2);
default:
gcc_unreachable ();
@@ -4299,10 +4299,10 @@ aarch64_gen_load_pair (machine_mode mode, rtx reg1, rtx mem1, rtx reg2,
switch (mode)
{
case E_DImode:
- return gen_load_pairdi (reg1, mem1, reg2, mem2);
+ return gen_load_pair_dw_didi (reg1, mem1, reg2, mem2);
case E_DFmode:
- return gen_load_pairdf (reg1, mem1, reg2, mem2);
+ return gen_load_pair_dw_dfdf (reg1, mem1, reg2, mem2);
default:
gcc_unreachable ();
@@ -16853,6 +16853,10 @@ aarch64_operands_ok_for_ldpstp (rtx *operands, bool load,
if (!rtx_equal_p (base_1, base_2))
return false;
+ /* The operands must be of the same size. */
+ gcc_assert (known_eq (GET_MODE_SIZE (GET_MODE (mem_1)),
+ GET_MODE_SIZE (GET_MODE (mem_2))));
+
offval_1 = INTVAL (offset_1);
offval_2 = INTVAL (offset_2);
/* We should only be trying this for fixed-sized modes. There is no
@@ -1309,15 +1309,15 @@ (define_expand "movmemdi"
;; Operands 1 and 3 are tied together by the final condition; so we allow
;; fairly lax checking on the second memory operation.
-(define_insn "load_pairsi"
- [(set (match_operand:SI 0 "register_operand" "=r,w")
- (match_operand:SI 1 "aarch64_mem_pair_operand" "Ump,Ump"))
- (set (match_operand:SI 2 "register_operand" "=r,w")
- (match_operand:SI 3 "memory_operand" "m,m"))]
- "rtx_equal_p (XEXP (operands[3], 0),
- plus_constant (Pmode,
- XEXP (operands[1], 0),
- GET_MODE_SIZE (SImode)))"
+(define_insn "load_pair_sw_<SX:mode><SX2:mode>"
+ [(set (match_operand:SX 0 "register_operand" "=r,w")
+ (match_operand:SX 1 "aarch64_mem_pair_operand" "Ump,Ump"))
+ (set (match_operand:SX2 2 "register_operand" "=r,w")
+ (match_operand:SX2 3 "memory_operand" "m,m"))]
+ "rtx_equal_p (XEXP (operands[3], 0),
+ plus_constant (Pmode,
+ XEXP (operands[1], 0),
+ GET_MODE_SIZE (<SX:MODE>mode)))"
"@
ldp\\t%w0, %w2, %1
ldp\\t%s0, %s2, %1"
@@ -1325,15 +1325,16 @@ (define_insn "load_pairsi"
(set_attr "fp" "*,yes")]
)
-(define_insn "load_pairdi"
- [(set (match_operand:DI 0 "register_operand" "=r,w")
- (match_operand:DI 1 "aarch64_mem_pair_operand" "Ump,Ump"))
- (set (match_operand:DI 2 "register_operand" "=r,w")
- (match_operand:DI 3 "memory_operand" "m,m"))]
- "rtx_equal_p (XEXP (operands[3], 0),
- plus_constant (Pmode,
- XEXP (operands[1], 0),
- GET_MODE_SIZE (DImode)))"
+;; Storing different modes that can still be merged
+(define_insn "load_pair_dw_<DX:mode><DX2:mode>"
+ [(set (match_operand:DX 0 "register_operand" "=r,w")
+ (match_operand:DX 1 "aarch64_mem_pair_operand" "Ump,Ump"))
+ (set (match_operand:DX2 2 "register_operand" "=r,w")
+ (match_operand:DX2 3 "memory_operand" "m,m"))]
+ "rtx_equal_p (XEXP (operands[3], 0),
+ plus_constant (Pmode,
+ XEXP (operands[1], 0),
+ GET_MODE_SIZE (<DX:MODE>mode)))"
"@
ldp\\t%x0, %x2, %1
ldp\\t%d0, %d2, %1"
@@ -1341,18 +1342,17 @@ (define_insn "load_pairdi"
(set_attr "fp" "*,yes")]
)
-
;; Operands 0 and 2 are tied together by the final condition; so we allow
;; fairly lax checking on the second memory operation.
-(define_insn "store_pairsi"
- [(set (match_operand:SI 0 "aarch64_mem_pair_operand" "=Ump,Ump")
- (match_operand:SI 1 "aarch64_reg_or_zero" "rZ,w"))
- (set (match_operand:SI 2 "memory_operand" "=m,m")
- (match_operand:SI 3 "aarch64_reg_or_zero" "rZ,w"))]
- "rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (SImode)))"
+(define_insn "store_pair_sw_<SX:mode><SX2:mode>"
+ [(set (match_operand:SX 0 "aarch64_mem_pair_operand" "=Ump,Ump")
+ (match_operand:SX 1 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))
+ (set (match_operand:SX2 2 "memory_operand" "=m,m")
+ (match_operand:SX2 3 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))]
+ "rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (Pmode,
+ XEXP (operands[0], 0),
+ GET_MODE_SIZE (<SX:MODE>mode)))"
"@
stp\\t%w1, %w3, %0
stp\\t%s1, %s3, %0"
@@ -1360,15 +1360,16 @@ (define_insn "store_pairsi"
(set_attr "fp" "*,yes")]
)
-(define_insn "store_pairdi"
- [(set (match_operand:DI 0 "aarch64_mem_pair_operand" "=Ump,Ump")
- (match_operand:DI 1 "aarch64_reg_or_zero" "rZ,w"))
- (set (match_operand:DI 2 "memory_operand" "=m,m")
- (match_operand:DI 3 "aarch64_reg_or_zero" "rZ,w"))]
- "rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (DImode)))"
+;; Storing different modes that can still be merged
+(define_insn "store_pair_dw_<DX:mode><DX2:mode>"
+ [(set (match_operand:DX 0 "aarch64_mem_pair_operand" "=Ump,Ump")
+ (match_operand:DX 1 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))
+ (set (match_operand:DX2 2 "memory_operand" "=m,m")
+ (match_operand:DX2 3 "aarch64_reg_zero_or_fp_zero" "rYZ,w"))]
+ "rtx_equal_p (XEXP (operands[2], 0),
+ plus_constant (Pmode,
+ XEXP (operands[0], 0),
+ GET_MODE_SIZE (<DX:MODE>mode)))"
"@
stp\\t%x1, %x3, %0
stp\\t%d1, %d3, %0"
@@ -1376,74 +1377,6 @@ (define_insn "store_pairdi"
(set_attr "fp" "*,yes")]
)
-;; Operands 1 and 3 are tied together by the final condition; so we allow
-;; fairly lax checking on the second memory operation.
-(define_insn "load_pairsf"
- [(set (match_operand:SF 0 "register_operand" "=w,r")
- (match_operand:SF 1 "aarch64_mem_pair_operand" "Ump,Ump"))
- (set (match_operand:SF 2 "register_operand" "=w,r")
- (match_operand:SF 3 "memory_operand" "m,m"))]
- "rtx_equal_p (XEXP (operands[3], 0),
- plus_constant (Pmode,
- XEXP (operands[1], 0),
- GET_MODE_SIZE (SFmode)))"
- "@
- ldp\\t%s0, %s2, %1
- ldp\\t%w0, %w2, %1"
- [(set_attr "type" "neon_load1_2reg,load_8")
- (set_attr "fp" "yes,*")]
-)
-
-(define_insn "load_pairdf"
- [(set (match_operand:DF 0 "register_operand" "=w,r")
- (match_operand:DF 1 "aarch64_mem_pair_operand" "Ump,Ump"))
- (set (match_operand:DF 2 "register_operand" "=w,r")
- (match_operand:DF 3 "memory_operand" "m,m"))]
- "rtx_equal_p (XEXP (operands[3], 0),
- plus_constant (Pmode,
- XEXP (operands[1], 0),
- GET_MODE_SIZE (DFmode)))"
- "@
- ldp\\t%d0, %d2, %1
- ldp\\t%x0, %x2, %1"
- [(set_attr "type" "neon_load1_2reg,load_16")
- (set_attr "fp" "yes,*")]
-)
-
-;; Operands 0 and 2 are tied together by the final condition; so we allow
-;; fairly lax checking on the second memory operation.
-(define_insn "store_pairsf"
- [(set (match_operand:SF 0 "aarch64_mem_pair_operand" "=Ump,Ump")
- (match_operand:SF 1 "aarch64_reg_or_fp_zero" "w,rY"))
- (set (match_operand:SF 2 "memory_operand" "=m,m")
- (match_operand:SF 3 "aarch64_reg_or_fp_zero" "w,rY"))]
- "rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (SFmode)))"
- "@
- stp\\t%s1, %s3, %0
- stp\\t%w1, %w3, %0"
- [(set_attr "type" "neon_store1_2reg,store_8")
- (set_attr "fp" "yes,*")]
-)
-
-(define_insn "store_pairdf"
- [(set (match_operand:DF 0 "aarch64_mem_pair_operand" "=Ump,Ump")
- (match_operand:DF 1 "aarch64_reg_or_fp_zero" "w,rY"))
- (set (match_operand:DF 2 "memory_operand" "=m,m")
- (match_operand:DF 3 "aarch64_reg_or_fp_zero" "w,rY"))]
- "rtx_equal_p (XEXP (operands[2], 0),
- plus_constant (Pmode,
- XEXP (operands[0], 0),
- GET_MODE_SIZE (DFmode)))"
- "@
- stp\\t%d1, %d3, %0
- stp\\t%x1, %x3, %0"
- [(set_attr "type" "neon_store1_2reg,store_16")
- (set_attr "fp" "yes,*")]
-)
-
;; Load pair with post-index writeback. This is primarily used in function
;; epilogues.
(define_insn "loadwb_pair<GPI:mode>_<P:mode>"
@@ -69,6 +69,12 @@ (define_mode_iterator VSDQ_I_DI [V8QI V16QI V4HI V8HI V2SI V4SI V2DI DI])
;; Double vector modes.
(define_mode_iterator VD [V8QI V4HI V4HF V2SI V2SF])
+;; All modes stored in registers d0-d31.
+(define_mode_iterator DREG [V8QI V4HI V4HF V2SI V2SF DF])
+
+;; Copy of the above.
+(define_mode_iterator DREG2 [V8QI V4HI V4HF V2SI V2SF DF])
+
;; Advanced SIMD, 64-bit container, all integer modes.
(define_mode_iterator VD_BHSI [V8QI V4HI V2SI])
@@ -236,6 +242,19 @@ (define_mode_iterator VSTRUCT [OI CI XI])
;; Double scalar modes
(define_mode_iterator DX [DI DF])
+;; Duplicate of the above
+(define_mode_iterator DX2 [DI DF])
+
+;; Single scalar modes
+(define_mode_iterator SX [SI SF])
+
+;; Duplicate of the above
+(define_mode_iterator SX2 [SI SF])
+
+;; Single and double integer and float modes
+(define_mode_iterator DSX [DF DI SF SI])
+
+
;; Modes available for Advanced SIMD <f>mul lane operations.
(define_mode_iterator VMUL [V4HI V8HI V2SI V4SI
(V4HF "TARGET_SIMD_F16INST")
@@ -855,7 +874,8 @@ (define_mode_attr V_INT_EQUIV [(V8QI "V8QI") (V16QI "V16QI")
(V4HF "V4HI") (V8HF "V8HI")
(V2SF "V2SI") (V4SF "V4SI")
(DF "DI") (V2DF "V2DI")
- (SF "SI") (HF "HI")
+ (SF "SI") (SI "SI")
+ (HF "HI")
(VNx16QI "VNx16QI")
(VNx8HI "VNx8HI") (VNx8HF "VNx8HI")
(VNx4SI "VNx4SI") (VNx4SF "VNx4SI")
@@ -62,6 +62,10 @@ (define_predicate "aarch64_reg_or_fp_zero"
(and (match_code "const_double")
(match_test "aarch64_float_const_zero_rtx_p (op)"))))
+(define_predicate "aarch64_reg_zero_or_fp_zero"
+ (ior (match_operand 0 "aarch64_reg_or_fp_zero")
+ (match_operand 0 "aarch64_reg_or_zero")))
+
(define_predicate "aarch64_reg_zero_or_m1_or_1"
(and (match_code "reg,subreg,const_int")
(ior (match_operand 0 "register_operand")
new file mode 100644
@@ -0,0 +1,20 @@
+/* { dg-options "-O2" } */
+
+typedef float __attribute__ ((vector_size (8))) vec;
+
+struct pair
+{
+ vec e1;
+ double e2;
+};
+
+vec tmp;
+
+void
+stp (struct pair *p)
+{
+ p->e1 = tmp;
+ p->e2 = 1.0;
+
+ /* { dg-final { scan-assembler "stp\td\[0-9\]+, d\[0-9\]+, \\\[x\[0-9\]+\\\]" } } */
+}
new file mode 100644
@@ -0,0 +1,47 @@
+/* { dg-options "-O2" } */
+
+struct pair
+{
+ double a;
+ long long int b;
+};
+
+void
+stp (struct pair *p)
+{
+ p->a = 0.0;
+ p->b = 1;
+}
+
+/* { dg-final { scan-assembler "stp\txzr, x\[0-9\]+, \\\[x\[0-9\]+\\\]" } } */
+
+void
+stp2 (struct pair *p)
+{
+ p->a = 0.0;
+ p->b = 0;
+}
+
+struct reverse_pair
+{
+ long long int a;
+ double b;
+};
+
+void
+stp_reverse (struct reverse_pair *p)
+{
+ p->a = 1;
+ p->b = 0.0;
+}
+
+/* { dg-final { scan-assembler "stp\tx\[0-9\]+, xzr, \\\[x\[0-9\]+\\\]" } } */
+
+void
+stp_reverse2 (struct reverse_pair *p)
+{
+ p->a = 0;
+ p->b = 0.0;
+}
+
+/* { dg-final { scan-assembler-times "stp\txzr, xzr, \\\[x\[0-9\]+\\\]" 2 } } */
new file mode 100644
@@ -0,0 +1,30 @@
+/* { dg-options "-O2" } */
+
+typedef float __attribute__ ((vector_size (8))) fvec;
+typedef int __attribute__ ((vector_size (8))) ivec;
+
+struct pair
+{
+ double a;
+ fvec b;
+};
+
+void ldp (double *a, fvec *b, struct pair *p)
+{
+ *a = p->a + 1;
+ *b = p->b;
+}
+
+struct vec_pair
+{
+ fvec a;
+ ivec b;
+};
+
+void ldp2 (fvec *a, ivec *b, struct vec_pair *p)
+{
+ *a = p->a;
+ *b = p->b;
+}
+
+/* { dg-final { scan-assembler-times "ldp\td\[0-9\], d\[0-9\]+, \\\[x\[0-9\]+\\\]" 2 } } */