diff mbox

[ARM,3/7] Convert FP mnemonics to UAL | mul+add patterns

Message ID 53F36775.2010105@arm.com
State New
Headers show

Commit Message

Kyrylo Tkachov Aug. 19, 2014, 3:04 p.m. UTC
Hi all,

This patch switches over the FP mul+add, mul+neg+add etc patterns. 
Mixing some of the mnemonics up is a danger but the ARM ARM has a handy 
section that maps each mnemonic to its UAL equivalent.

Ok for trunk?

Thanks,
Kyrill

2014-08-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>

     * config/arm/vfp.md (*mulsf3_vfp): Use UAL assembly syntax.
     (*muldf3_vfp): Likewise.
     (*mulsf3negsf_vfp): Likewise.
     (*muldf3negdf_vfp): Likewise.
     (*mulsf3addsf_vfp): Likewise.
     (*muldf3adddf_vfp): Likewise.
     (*mulsf3subsf_vfp): Likewise.
     (*muldf3subdf_vfp): Likewise.
     (*mulsf3negsfaddsf_vfp): Likewise.
     (*fmuldf3negdfadddf_vfp): Likewise.
     (*mulsf3negsfsubsf_vfp): Likewise.
     (*muldf3negdfsubdf_vfp): Likewise.

2014-08-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>

     * gcc.target/arm/vfp-1.c: Updated expected assembly.

Comments

Ramana Radhakrishnan Sept. 9, 2014, 11:04 a.m. UTC | #1
On Tue, Aug 19, 2014 at 4:04 PM, Kyrill Tkachov <kyrylo.tkachov@arm.com> wrote:
> Hi all,
>
> This patch switches over the FP mul+add, mul+neg+add etc patterns. Mixing
> some of the mnemonics up is a danger but the ARM ARM has a handy section
> that maps each mnemonic to its UAL equivalent.
>
> Ok for trunk?
>
Ok this is the one I worried about the most and double checked - it
looks sane. Thanks.

regards
Ramana

> Thanks,
> Kyrill
>
> 2014-08-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
>
>     * config/arm/vfp.md (*mulsf3_vfp): Use UAL assembly syntax.
>     (*muldf3_vfp): Likewise.
>     (*mulsf3negsf_vfp): Likewise.
>     (*muldf3negdf_vfp): Likewise.
>     (*mulsf3addsf_vfp): Likewise.
>     (*muldf3adddf_vfp): Likewise.
>     (*mulsf3subsf_vfp): Likewise.
>     (*muldf3subdf_vfp): Likewise.
>     (*mulsf3negsfaddsf_vfp): Likewise.
>     (*fmuldf3negdfadddf_vfp): Likewise.
>     (*mulsf3negsfsubsf_vfp): Likewise.
>     (*muldf3negdfsubdf_vfp): Likewise.
>
> 2014-08-19  Kyrylo Tkachov  <kyrylo.tkachov@arm.com>
>
>     * gcc.target/arm/vfp-1.c: Updated expected assembly.
diff mbox

Patch

commit f15edff842bafd42b4484067b45121d3d8667057
Author: Kyrylo Tkachov <kyrylo.tkachov@arm.com>
Date:   Thu Jul 17 09:50:59 2014 +0100

    [ARM][3/n] Convert FP mnemonics to UAL | mul+add patterns

diff --git a/gcc/config/arm/vfp.md b/gcc/config/arm/vfp.md
index 975f5ae..f740e26 100644
--- a/gcc/config/arm/vfp.md
+++ b/gcc/config/arm/vfp.md
@@ -749,7 +749,7 @@ 
 	(mult:SF (match_operand:SF 1 "s_register_operand" "t")
 		 (match_operand:SF 2 "s_register_operand" "t")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fmuls%?\\t%0, %1, %2"
+  "vmul%?.f32\\t%0, %1, %2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmuls")]
@@ -760,7 +760,7 @@ 
 	(mult:DF (match_operand:DF 1 "s_register_operand" "w")
 		 (match_operand:DF 2 "s_register_operand" "w")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fmuld%?\\t%P0, %P1, %P2"
+  "vmul%?.f64\\t%P0, %P1, %P2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmuld")]
@@ -771,7 +771,7 @@ 
 	(mult:SF (neg:SF (match_operand:SF 1 "s_register_operand" "t"))
 		 (match_operand:SF	   2 "s_register_operand" "t")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fnmuls%?\\t%0, %1, %2"
+  "vnmul%?.f32\\t%0, %1, %2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmuls")]
@@ -782,7 +782,7 @@ 
 	(mult:DF (neg:DF (match_operand:DF 1 "s_register_operand" "w"))
 		 (match_operand:DF	   2 "s_register_operand" "w")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fnmuld%?\\t%P0, %P1, %P2"
+  "vnmul%?.f64\\t%P0, %P1, %P2"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmuld")]
@@ -798,7 +798,7 @@ 
 			  (match_operand:SF 3 "s_register_operand" "t"))
 		 (match_operand:SF	    1 "s_register_operand" "0")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fmacs%?\\t%0, %2, %3"
+  "vmla%?.f32\\t%0, %2, %3"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmacs")]
@@ -810,7 +810,7 @@ 
 			  (match_operand:DF 3 "s_register_operand" "w"))
 		 (match_operand:DF	    1 "s_register_operand" "0")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fmacd%?\\t%P0, %P2, %P3"
+  "vmla%?.f64\\t%P0, %P2, %P3"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmacd")]
@@ -823,7 +823,7 @@ 
 			   (match_operand:SF 3 "s_register_operand" "t"))
 		  (match_operand:SF	     1 "s_register_operand" "0")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fmscs%?\\t%0, %2, %3"
+  "vnmls%?.f32\\t%0, %2, %3"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmacs")]
@@ -835,7 +835,7 @@ 
 			   (match_operand:DF 3 "s_register_operand" "w"))
 		  (match_operand:DF	     1 "s_register_operand" "0")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fmscd%?\\t%P0, %P2, %P3"
+  "vnmls%?.f64\\t%P0, %P2, %P3"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmacd")]
@@ -848,7 +848,7 @@ 
 		  (mult:SF (match_operand:SF 2 "s_register_operand" "t")
 			   (match_operand:SF 3 "s_register_operand" "t"))))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fnmacs%?\\t%0, %2, %3"
+  "vmls%?.f32\\t%0, %2, %3"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmacs")]
@@ -860,7 +860,7 @@ 
 		  (mult:DF (match_operand:DF 2 "s_register_operand" "w")
 			   (match_operand:DF 3 "s_register_operand" "w"))))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fnmacd%?\\t%P0, %P2, %P3"
+  "vmls%?.f64\\t%P0, %P2, %P3"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmacd")]
@@ -875,7 +875,7 @@ 
 		    (match_operand:SF	      3 "s_register_operand" "t"))
 		  (match_operand:SF	      1 "s_register_operand" "0")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP"
-  "fnmscs%?\\t%0, %2, %3"
+  "vnmla%?.f32\\t%0, %2, %3"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmacs")]
@@ -888,7 +888,7 @@ 
 		    (match_operand:DF	      3 "s_register_operand" "w"))
 		  (match_operand:DF	      1 "s_register_operand" "0")))]
   "TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_VFP_DOUBLE"
-  "fnmscd%?\\t%P0, %P2, %P3"
+  "vnmla%?.f64\\t%P0, %P2, %P3"
   [(set_attr "predicable" "yes")
    (set_attr "predicable_short_it" "no")
    (set_attr "type" "fmacd")]
diff --git a/gcc/testsuite/gcc.target/arm/vfp-1.c b/gcc/testsuite/gcc.target/arm/vfp-1.c
index 3027f10..43495ae 100644
--- a/gcc/testsuite/gcc.target/arm/vfp-1.c
+++ b/gcc/testsuite/gcc.target/arm/vfp-1.c
@@ -26,22 +26,22 @@  void test_sf() {
   /* { dg-final { scan-assembler "vdiv.f32" } } */
   f1 = f2 / f3;
   /* mulsf3_vfp */
-  /* { dg-final { scan-assembler "fmuls" } } */
+  /* { dg-final { scan-assembler "vmul.f32" } } */
   f1 = f2 * f3;
   /* mulsf3negsf_vfp */
-  /* { dg-final { scan-assembler "fnmuls" } } */
+  /* { dg-final { scan-assembler "vnmul.f32" } } */
   f1 = -f2 * f3;
   /* mulsf3addsf_vfp */
-  /* { dg-final { scan-assembler "fmacs" } } */
+  /* { dg-final { scan-assembler "vmla.f32" } } */
   f1 = f2 * f3 + f1;
   /* mulsf3subsf_vfp */
-  /* { dg-final { scan-assembler "fmscs" } } */
+  /* { dg-final { scan-assembler "vnmls.f32" } } */
   f1 = f2 * f3 - f1;
   /* mulsf3negsfaddsf_vfp */
-  /* { dg-final { scan-assembler "fnmacs" } } */
+  /* { dg-final { scan-assembler "vmls.f32" } } */
   f1 = f2 - f3 * f1;
   /* mulsf3negsfsubsf_vfp */
-  /* { dg-final { scan-assembler "fnmscs" } } */
+  /* { dg-final { scan-assembler "vnmla.f32" } } */
   f1 = -f2 * f3 - f1;
   /* sqrtsf2_vfp */
   /* { dg-final { scan-assembler "fsqrts" } } */
@@ -67,22 +67,22 @@  void test_df() {
   /* { dg-final { scan-assembler "vdiv.f64" } } */
   d1 = d2 / d3;
   /* muldf3_vfp */
-  /* { dg-final { scan-assembler "fmuld" } } */
+  /* { dg-final { scan-assembler "vmul.f64" } } */
   d1 = d2 * d3;
   /* muldf3negdf_vfp */
-  /* { dg-final { scan-assembler "fnmuld" } } */
+  /* { dg-final { scan-assembler "vnmul.f64" } } */
   d1 = -d2 * d3;
   /* muldf3adddf_vfp */
-  /* { dg-final { scan-assembler "fmacd" } } */
+  /* { dg-final { scan-assembler "vmla.f64" } } */
   d1 = d2 * d3 + d1;
   /* muldf3subdf_vfp */
-  /* { dg-final { scan-assembler "fmscd" } } */
+  /* { dg-final { scan-assembler "vnmls.f64" } } */
   d1 = d2 * d3 - d1;
   /* muldf3negdfadddf_vfp */
-  /* { dg-final { scan-assembler "fnmacd" } } */
+  /* { dg-final { scan-assembler "vmls.f64" } } */
   d1 = d2 - d3 * d1;
   /* muldf3negdfsubdf_vfp */
-  /* { dg-final { scan-assembler "fnmscd" } } */
+  /* { dg-final { scan-assembler "vnmla.f64" } } */
   d1 = -d2 * d3 - d1;
   /* sqrtdf2_vfp */
   /* { dg-final { scan-assembler "fsqrtd" } } */