@@ -1260,14 +1260,14 @@ ix86_swap_binary_operands_p (enum rtx_code code, machine_mode mode,
return false;
}
-
/* Fix up OPERANDS to satisfy ix86_binary_operator_ok. Return the
destination to use for the operation. If different from the true
- destination in operands[0], a copy operation will be required. */
+ destination in operands[0], a copy operation will be required except
+ under TARGET_APX_NDD. */
rtx
ix86_fixup_binary_operands (enum rtx_code code, machine_mode mode,
- rtx operands[])
+ rtx operands[], bool use_ndd)
{
rtx dst = operands[0];
rtx src1 = operands[1];
@@ -1307,7 +1307,7 @@ ix86_fixup_binary_operands (enum rtx_code code, machine_mode mode,
src1 = force_reg (mode, src1);
/* Source 1 cannot be a non-matching memory. */
- if (MEM_P (src1) && !rtx_equal_p (dst, src1))
+ if (!use_ndd && MEM_P (src1) && !rtx_equal_p (dst, src1))
src1 = force_reg (mode, src1);
/* Improve address combine. */
@@ -1338,11 +1338,11 @@ ix86_fixup_binary_operands_no_copy (enum rtx_code code,
void
ix86_expand_binary_operator (enum rtx_code code, machine_mode mode,
- rtx operands[])
+ rtx operands[], bool use_ndd)
{
rtx src1, src2, dst, op, clob;
- dst = ix86_fixup_binary_operands (code, mode, operands);
+ dst = ix86_fixup_binary_operands (code, mode, operands, use_ndd);
src1 = operands[1];
src2 = operands[2];
@@ -1352,7 +1352,8 @@ ix86_expand_binary_operator (enum rtx_code code, machine_mode mode,
if (reload_completed
&& code == PLUS
- && !rtx_equal_p (dst, src1))
+ && !rtx_equal_p (dst, src1)
+ && !use_ndd)
{
/* This is going to be an LEA; avoid splitting it later. */
emit_insn (op);
@@ -1451,7 +1452,7 @@ ix86_expand_vector_logical_operator (enum rtx_code code, machine_mode mode,
bool
ix86_binary_operator_ok (enum rtx_code code, machine_mode mode,
- rtx operands[3])
+ rtx operands[3], bool use_ndd)
{
rtx dst = operands[0];
rtx src1 = operands[1];
@@ -1475,7 +1476,7 @@ ix86_binary_operator_ok (enum rtx_code code, machine_mode mode,
return false;
/* Source 1 cannot be a non-matching memory. */
- if (MEM_P (src1) && !rtx_equal_p (dst, src1))
+ if (!use_ndd && MEM_P (src1) && !rtx_equal_p (dst, src1))
/* Support "andhi/andsi/anddi" as a zero-extending move. */
return (code == AND
&& (mode == HImode
@@ -2129,6 +2129,8 @@ ix86_option_override_internal (bool main_args_p,
if (TARGET_APX_F && !TARGET_64BIT)
error ("%<-mapxf%> is not supported for 32-bit code");
+ else if (opts->x_ix86_apx_features != apx_none && !TARGET_64BIT)
+ error ("%<-mapx-features=%> option is not supported for 32-bit code");
if (TARGET_UINTR && !TARGET_64BIT)
error ("%<-muintr%> not supported for 32-bit code");
@@ -108,14 +108,14 @@ extern void ix86_expand_move (machine_mode, rtx[]);
extern void ix86_expand_vector_move (machine_mode, rtx[]);
extern void ix86_expand_vector_move_misalign (machine_mode, rtx[]);
extern rtx ix86_fixup_binary_operands (enum rtx_code,
- machine_mode, rtx[]);
+ machine_mode, rtx[], bool = false);
extern void ix86_fixup_binary_operands_no_copy (enum rtx_code,
machine_mode, rtx[]);
extern void ix86_expand_binary_operator (enum rtx_code,
- machine_mode, rtx[]);
+ machine_mode, rtx[], bool = false);
extern void ix86_expand_vector_logical_operator (enum rtx_code,
machine_mode, rtx[]);
-extern bool ix86_binary_operator_ok (enum rtx_code, machine_mode, rtx[3]);
+extern bool ix86_binary_operator_ok (enum rtx_code, machine_mode, rtx[3], bool = false);
extern bool ix86_avoid_lea_for_add (rtx_insn *, rtx[]);
extern bool ix86_use_lea_for_mov (rtx_insn *, rtx[]);
extern bool ix86_avoid_lea_for_addr (rtx_insn *, rtx[]);
@@ -562,7 +562,7 @@ (define_attr "unit" "integer,i387,sse,mmx,unknown"
;; Used to control the "enabled" attribute on a per-instruction basis.
(define_attr "isa" "base,x64,nox64,x64_sse2,x64_sse4,x64_sse4_noavx,
- x64_avx,x64_avx512bw,x64_avx512dq,aes,
+ x64_avx,x64_avx512bw,x64_avx512dq,aes,apx_ndd,
sse_noavx,sse2,sse2_noavx,sse3,sse3_noavx,sse4,sse4_noavx,
avx,noavx,avx2,noavx2,bmi,bmi2,fma4,fma,avx512f,avx512f_512,
noavx512f,avx512bw,avx512bw_512,noavx512bw,avx512dq,
@@ -960,6 +960,8 @@ (define_attr "enabled" ""
(symbol_ref "TARGET_AVX512BF16 && TARGET_AVX512VL")
(eq_attr "isa" "vpclmulqdqvl")
(symbol_ref "TARGET_VPCLMULQDQ && TARGET_AVX512VL")
+ (eq_attr "isa" "apx_ndd")
+ (symbol_ref "TARGET_APX_NDD")
(eq_attr "mmx_isa" "native")
(symbol_ref "!TARGET_MMX_WITH_SSE")
@@ -6285,7 +6287,8 @@ (define_expand "add<mode>3"
(plus:SDWIM (match_operand:SDWIM 1 "nonimmediate_operand")
(match_operand:SDWIM 2 "<general_hilo_operand>")))]
""
- "ix86_expand_binary_operator (PLUS, <MODE>mode, operands); DONE;")
+ "ix86_expand_binary_operator (PLUS, <MODE>mode, operands,
+ TARGET_APX_NDD); DONE;")
(define_insn_and_split "*add<dwi>3_doubleword"
[(set (match_operand:<DWI> 0 "nonimmediate_operand" "=ro,r")
@@ -6412,26 +6415,29 @@ (define_insn_and_split "*add<dwi>3_doubleword_concat_zext"
"split_double_mode (<DWI>mode, &operands[0], 1, &operands[0], &operands[5]);")
(define_insn "*add<mode>_1"
- [(set (match_operand:SWI48 0 "nonimmediate_operand" "=rm,r,r,r")
+ [(set (match_operand:SWI48 0 "nonimmediate_operand" "=rm,r,r,r,r,r")
(plus:SWI48
- (match_operand:SWI48 1 "nonimmediate_operand" "%0,0,r,r")
- (match_operand:SWI48 2 "x86_64_general_operand" "re,BM,0,le")))
+ (match_operand:SWI48 1 "nonimmediate_operand" "%0,0,r,r,rm,r")
+ (match_operand:SWI48 2 "x86_64_general_operand" "re,BM,0,le,re,BM")))
(clobber (reg:CC FLAGS_REG))]
- "ix86_binary_operator_ok (PLUS, <MODE>mode, operands)"
+ "ix86_binary_operator_ok (PLUS, <MODE>mode, operands,
+ TARGET_APX_NDD)"
{
+ bool use_ndd = (which_alternative == 4 || which_alternative == 5);
switch (get_attr_type (insn))
{
case TYPE_LEA:
return "#";
case TYPE_INCDEC:
- gcc_assert (rtx_equal_p (operands[0], operands[1]));
if (operands[2] == const1_rtx)
- return "inc{<imodesuffix>}\t%0";
+ return use_ndd ? "inc{<imodesuffix>}\t{%1, %0|%0, %1}"
+ : "inc{<imodesuffix>}\t%0";
else
{
gcc_assert (operands[2] == constm1_rtx);
- return "dec{<imodesuffix>}\t%0";
+ return use_ndd ? "dec{<imodesuffix>}\t{%1, %0|%0, %1}"
+ : "dec{<imodesuffix>}\t%0";
}
default:
@@ -6440,14 +6446,16 @@ (define_insn "*add<mode>_1"
if (which_alternative == 2)
std::swap (operands[1], operands[2]);
- gcc_assert (rtx_equal_p (operands[0], operands[1]));
if (x86_maybe_negate_const_int (&operands[2], <MODE>mode))
- return "sub{<imodesuffix>}\t{%2, %0|%0, %2}";
+ return use_ndd ? "sub{<imodesuffix>}\t{%2, %1, %0|%0, %1, %2}"
+ : "sub{<imodesuffix>}\t{%2, %0|%0, %2}";
- return "add{<imodesuffix>}\t{%2, %0|%0, %2}";
+ return use_ndd ? "add{<imodesuffix>}\t{%2, %1, %0|%0, %1, %2}"
+ : "add{<imodesuffix>}\t{%2, %0|%0, %2}";
}
}
- [(set (attr "type")
+ [(set_attr "isa" "*,*,*,*,apx_ndd,apx_ndd")
+ (set (attr "type")
(cond [(eq_attr "alternative" "3")
(const_string "lea")
(match_operand:SWI48 2 "incdec_operand")
@@ -6516,25 +6524,26 @@ (define_insn "addsi_1_zext"
(set_attr "mode" "SI")])
(define_insn "*addhi_1"
- [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r,r,Yp")
- (plus:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0,r,Yp")
- (match_operand:HI 2 "general_operand" "rn,m,0,ln")))
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r,r,Yp,r,r")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0,r,Yp,rm,r")
+ (match_operand:HI 2 "general_operand" "rn,m,0,ln,rn,m")))
(clobber (reg:CC FLAGS_REG))]
- "ix86_binary_operator_ok (PLUS, HImode, operands)"
+ "ix86_binary_operator_ok (PLUS, HImode, operands,
+ TARGET_APX_NDD)"
{
+ bool use_ndd = (which_alternative == 4 || which_alternative == 5);
switch (get_attr_type (insn))
{
case TYPE_LEA:
return "#";
case TYPE_INCDEC:
- gcc_assert (rtx_equal_p (operands[0], operands[1]));
if (operands[2] == const1_rtx)
- return "inc{w}\t%0";
+ return use_ndd ? "inc{w}\t{%1, %0|%0, %1}" : "inc{w}\t%0";
else
{
gcc_assert (operands[2] == constm1_rtx);
- return "dec{w}\t%0";
+ return use_ndd ? "dec{w}\t{%1, %0|%0, %1}" : "dec{w}\t%0";
}
default:
@@ -6543,14 +6552,16 @@ (define_insn "*addhi_1"
if (which_alternative == 2)
std::swap (operands[1], operands[2]);
- gcc_assert (rtx_equal_p (operands[0], operands[1]));
if (x86_maybe_negate_const_int (&operands[2], HImode))
- return "sub{w}\t{%2, %0|%0, %2}";
+ return use_ndd ? "sub{w}\t{%2, %1, %0|%0, %1, %2}"
+ : "sub{w}\t{%2, %0|%0, %2}";
- return "add{w}\t{%2, %0|%0, %2}";
+ return use_ndd ? "add{w}\t{%2, %1, %0|%0, %1, %2}"
+ : "add{w}\t{%2, %0|%0, %2}";
}
}
- [(set (attr "type")
+ [(set_attr "isa" "*,*,*,*,apx_ndd,apx_ndd")
+ (set (attr "type")
(cond [(eq_attr "alternative" "3")
(const_string "lea")
(match_operand:HI 2 "incdec_operand")
@@ -6562,30 +6573,35 @@ (define_insn "*addhi_1"
(and (eq_attr "type" "alu") (match_operand 2 "const128_operand"))
(const_string "1")
(const_string "*")))
- (set_attr "mode" "HI,HI,HI,SI")])
+ (set_attr "mode" "HI,HI,HI,SI,HI,HI")])
(define_insn "*addqi_1"
- [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q,q,r,r,Yp")
- (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,q,0,r,Yp")
- (match_operand:QI 2 "general_operand" "qn,m,0,rn,0,ln")))
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q,q,r,r,Yp,r,r")
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0,q,0,r,Yp,rm,r")
+ (match_operand:QI 2 "general_operand" "qn,m,0,rn,0,ln,rn,m")))
(clobber (reg:CC FLAGS_REG))]
- "ix86_binary_operator_ok (PLUS, QImode, operands)"
+ "ix86_binary_operator_ok (PLUS, QImode, operands, TARGET_APX_NDD)"
{
bool widen = (get_attr_mode (insn) != MODE_QI);
-
+ bool use_ndd = (which_alternative == 6 || which_alternative == 7);
switch (get_attr_type (insn))
{
case TYPE_LEA:
return "#";
case TYPE_INCDEC:
- gcc_assert (rtx_equal_p (operands[0], operands[1]));
if (operands[2] == const1_rtx)
- return widen ? "inc{l}\t%k0" : "inc{b}\t%0";
+ if (use_ndd)
+ return "inc{b}\t{%1, %0|%0, %1}";
+ else
+ return widen ? "inc{l}\t%k0" : "inc{b}\t%0";
else
{
gcc_assert (operands[2] == constm1_rtx);
- return widen ? "dec{l}\t%k0" : "dec{b}\t%0";
+ if (use_ndd)
+ return "dec{b}\t{%1, %0|%0, %1}";
+ else
+ return widen ? "dec{l}\t%k0" : "dec{b}\t%0";
}
default:
@@ -6594,21 +6610,23 @@ (define_insn "*addqi_1"
if (which_alternative == 2 || which_alternative == 4)
std::swap (operands[1], operands[2]);
- gcc_assert (rtx_equal_p (operands[0], operands[1]));
if (x86_maybe_negate_const_int (&operands[2], QImode))
{
- if (widen)
- return "sub{l}\t{%2, %k0|%k0, %2}";
+ if (use_ndd)
+ return "sub{b}\t{%2, %1, %0|%0, %1, %2}";
else
- return "sub{b}\t{%2, %0|%0, %2}";
+ return widen ? "sub{l}\t{%2, %k0|%k0, %2}"
+ : "sub{b}\t{%2, %0|%0, %2}";
}
- if (widen)
- return "add{l}\t{%k2, %k0|%k0, %k2}";
+ if (use_ndd)
+ return "add{b}\t{%2, %1, %0|%0, %1, %2}";
else
- return "add{b}\t{%2, %0|%0, %2}";
+ return widen ? "add{l}\t{%k2, %k0|%k0, %k2}"
+ : "add{b}\t{%2, %0|%0, %2}";
}
}
- [(set (attr "type")
+ [(set_attr "isa" "*,*,*,*,*,*,apx_ndd,apx_ndd")
+ (set (attr "type")
(cond [(eq_attr "alternative" "5")
(const_string "lea")
(match_operand:QI 2 "incdec_operand")
@@ -6620,7 +6638,7 @@ (define_insn "*addqi_1"
(and (eq_attr "type" "alu") (match_operand 2 "const128_operand"))
(const_string "1")
(const_string "*")))
- (set_attr "mode" "QI,QI,QI,SI,SI,SI")
+ (set_attr "mode" "QI,QI,QI,SI,SI,SI,QI,QI")
;; Potential partial reg stall on alternatives 3 and 4.
(set (attr "preferred_for_speed")
(cond [(eq_attr "alternative" "3,4")
new file mode 100644
@@ -0,0 +1,21 @@
+/* { dg-do compile { target { ! ia32 } } } */
+/* { dg-options "-mapxf -march=x86-64 -O2" } */
+/* { dg-final { scan-assembler-not "movl"} } */
+
+int foo (int *a)
+{
+ int b = *a - 1;
+ return b;
+}
+
+int foo2 (int a, int b)
+{
+ int c = a + b;
+ return c;
+}
+
+int foo3 (int *a, int b)
+{
+ int c = *a + b;
+ return c;
+}
From: Kong Lingling <lingling.kong@intel.com> APX NDD provides an extra destination register operand for several gpr related legacy insns, so a new alternative can be adopted to operand1 with "r" constraint. This first patch supports NDD for add instruction, and keeps to use lea when all operands are registers since lea have shorter encoding. For add operations containing mem NDD will be adopted to save an extra move. In legacy x86 binary operation expand it will force operands[0] and operands[1] to be the same so add a helper function to allow NDD form pattern that operands[0] and operands[1] can be different. gcc/ChangeLog: * config/i386/i386-expand.cc (ix86_fixup_binary_operands): Add new use_ndd flag to check whether ndd can be used for this binop and adjust operand emit. (ix86_binary_operator_ok): Likewise. (ix86_expand_binary_operator): Likewise, and void postreload expand generate lea pattern when use_ndd is explicit parsed. * config/i386/i386-options.cc (ix86_option_override_internal): Prohibit apx subfeatures when not in 64bit mode. * config/i386/i386-protos.h (ix86_binary_operator_ok): Add use_ndd flag. (ix86_fixup_binary_operand): Likewise. (ix86_expand_binary_operand): Likewise. * config/i386/i386.md (*add<mode>_1): Extend with new alternatives to support NDD, and adjust output template. (*addhi_1): Likewise. (*addqi_1): Likewise. gcc/testsuite/ChangeLog: * gcc.target/i386/apx-ndd.c: New test. --- gcc/config/i386/i386-expand.cc | 19 ++--- gcc/config/i386/i386-options.cc | 2 + gcc/config/i386/i386-protos.h | 6 +- gcc/config/i386/i386.md | 102 ++++++++++++++---------- gcc/testsuite/gcc.target/i386/apx-ndd.c | 21 +++++ 5 files changed, 96 insertions(+), 54 deletions(-) create mode 100644 gcc/testsuite/gcc.target/i386/apx-ndd.c