diff mbox series

rs6000: Fix invalid splits when using Altivec style addresses [PR98959]

Message ID 8f29db7d-e282-4e25-ffed-98ba91b4db7b@linux.ibm.com
State New
Headers show
Series rs6000: Fix invalid splits when using Altivec style addresses [PR98959] | expand

Commit Message

Peter Bergner Feb. 12, 2021, 8:50 p.m. UTC
The rs6000_emit_le_vsx_* functions assume they are not passed an Altivec
style "& ~16" address.  However, some of our expanders and splitters do
not verify we do not have an Altivec style address before calling those
functions, leading to an ICE.  The solution here is to guard the expanders
and splitters to ensure we do not call them if we're given an Altivec style
address.

This fixes the ICE.  Ok for mainline if my powerpc64le-linux regtesting
comes back clean? 

We'll want backports once this has time to bake on mainline for a while.
Ok there too assuming my regtests there are clean?

Peter


2021-02-12  Peter Bergner  <bergner@linux.ibm.com>

gcc/
	PR target/98959
	* config/rs6000/rs6000.c (rs6000_emit_le_vsx_permute): Add an assert
	to ensure we do not have an Altivec style address.
	* config/rs6000/vsx.md (*vsx_le_perm_load_<mode>): Disable if passed
	an Altivec style address.
	(*vsx_le_perm_store_<mode>): Likewise.
	(splitters after *vsx_le_perm_store_<mode>): Likewise.
	(vsx_load_<mode>): Disable special expander if passed an Altivec
	style address.
	(vsx_store_<mode>): Likewise.

gcc/testsuite/
	PR target/98959
	* gcc.target/powerpc/pr98959.c: New test.

Comments

Segher Boessenkool Feb. 12, 2021, 11:36 p.m. UTC | #1
On Fri, Feb 12, 2021 at 02:50:12PM -0600, Peter Bergner wrote:
> The rs6000_emit_le_vsx_* functions assume they are not passed an Altivec
> style "& ~16" address.  However, some of our expanders and splitters do
> not verify we do not have an Altivec style address before calling those
> functions, leading to an ICE.  The solution here is to guard the expanders
> and splitters to ensure we do not call them if we're given an Altivec style
> address.

> --- a/gcc/config/rs6000/rs6000.c
> +++ b/gcc/config/rs6000/rs6000.c
> @@ -10059,6 +10059,11 @@ rs6000_const_vec (machine_mode mode)
>  void
>  rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
>  {
> +  if (MEM_P (dest))
> +    gcc_assert (!altivec_indexed_or_indirect_operand (dest, mode));
> +  if (MEM_P (source))
> +    gcc_assert (!altivec_indexed_or_indirect_operand (source, mode));

altivec_indexed_or_indirect_operand returns false if passed something
not a mem, so this is just

  gcc_assert (!altivec_indexed_or_indirect_operand (dest, mode));
  gcc_assert (!altivec_indexed_or_indirect_operand (source, mode));

Please retest with that tweak.  Okay for trunk.  Thanks!

Also okay for GCC 10.  Do you need backports to earlier?  Which then?


Segher
diff mbox series

Patch

diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index ec068c58aa5..e147cbdb52f 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -10059,6 +10059,11 @@  rs6000_const_vec (machine_mode mode)
 void
 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
 {
+  if (MEM_P (dest))
+    gcc_assert (!altivec_indexed_or_indirect_operand (dest, mode));
+  if (MEM_P (source))
+    gcc_assert (!altivec_indexed_or_indirect_operand (source, mode));
+
   /* Scalar permutations are easier to express in integer modes rather than
      floating-point modes, so cast them here.  We use V1TImode instead
      of TImode to ensure that the values don't go through GPRs.  */
diff --git a/gcc/config/rs6000/vsx.md b/gcc/config/rs6000/vsx.md
index 3e0518631df..f6fe88d3600 100644
--- a/gcc/config/rs6000/vsx.md
+++ b/gcc/config/rs6000/vsx.md
@@ -987,11 +987,13 @@  (define_insn_and_split "*vsx_le_undo_permute_<mode>"
 (define_insn_and_split "*vsx_le_perm_load_<mode>"
   [(set (match_operand:VSX_LE_128 0 "vsx_register_operand" "=wa,r")
         (match_operand:VSX_LE_128 1 "memory_operand" "Z,Q"))]
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR
+   && !altivec_indexed_or_indirect_operand (operands[1], <MODE>mode)"
   "@
    #
    #"
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR
+   && !altivec_indexed_or_indirect_operand (operands[1], <MODE>mode)"
   [(const_int 0)]
 {
   rtx tmp = (can_create_pseudo_p ()
@@ -1008,7 +1010,8 @@  (define_insn_and_split "*vsx_le_perm_load_<mode>"
 (define_insn "*vsx_le_perm_store_<mode>"
   [(set (match_operand:VSX_LE_128 0 "memory_operand" "=Z,Q")
         (match_operand:VSX_LE_128 1 "vsx_register_operand" "+wa,r"))]
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && !TARGET_P9_VECTOR
+   & !altivec_indexed_or_indirect_operand (operands[0], <MODE>mode)"
   "@
    #
    #"
@@ -1019,7 +1022,8 @@  (define_insn "*vsx_le_perm_store_<mode>"
 (define_split
   [(set (match_operand:VSX_LE_128 0 "memory_operand")
         (match_operand:VSX_LE_128 1 "vsx_register_operand"))]
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && !reload_completed && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && !reload_completed && !TARGET_P9_VECTOR
+   && !altivec_indexed_or_indirect_operand (operands[0], <MODE>mode)"
   [(const_int 0)]
 {
   rtx tmp = (can_create_pseudo_p ()
@@ -1075,7 +1079,8 @@  (define_peephole2
 (define_split
   [(set (match_operand:VSX_LE_128 0 "memory_operand")
         (match_operand:VSX_LE_128 1 "vsx_register_operand"))]
-  "!BYTES_BIG_ENDIAN && TARGET_VSX && reload_completed && !TARGET_P9_VECTOR"
+  "!BYTES_BIG_ENDIAN && TARGET_VSX && reload_completed && !TARGET_P9_VECTOR
+   && !altivec_indexed_or_indirect_operand (operands[0], <MODE>mode)"
   [(const_int 0)]
 {
   rs6000_emit_le_vsx_permute (operands[1], operands[1], <MODE>mode);
@@ -1241,7 +1246,8 @@  (define_expand "vsx_load_<mode>"
   "VECTOR_MEM_VSX_P (<MODE>mode)"
 {
   /* Expand to swaps if needed, prior to swap optimization.  */
-  if (!BYTES_BIG_ENDIAN && !TARGET_P9_VECTOR)
+  if (!BYTES_BIG_ENDIAN && !TARGET_P9_VECTOR
+      && !altivec_indexed_or_indirect_operand(operands[1], <MODE>mode))
     {
       rs6000_emit_le_vsx_move (operands[0], operands[1], <MODE>mode);
       DONE;
@@ -1254,7 +1260,8 @@  (define_expand "vsx_store_<mode>"
   "VECTOR_MEM_VSX_P (<MODE>mode)"
 {
   /* Expand to swaps if needed, prior to swap optimization.  */
-  if (!BYTES_BIG_ENDIAN && !TARGET_P9_VECTOR)
+  if (!BYTES_BIG_ENDIAN && !TARGET_P9_VECTOR
+      && !altivec_indexed_or_indirect_operand(operands[0], <MODE>mode))
     {
       rs6000_emit_le_vsx_move (operands[0], operands[1], <MODE>mode);
       DONE;
diff --git a/gcc/testsuite/gcc.target/powerpc/pr98959.c b/gcc/testsuite/gcc.target/powerpc/pr98959.c
new file mode 100644
index 00000000000..9e8523db7b5
--- /dev/null
+++ b/gcc/testsuite/gcc.target/powerpc/pr98959.c
@@ -0,0 +1,17 @@ 
+/* PR target/98959 */
+/* { dg-options "-fno-schedule-insns -O2 -mcmodel=small" } */
+
+/* Verify we do not ICE on the following.  */
+
+typedef __attribute__ ((altivec (vector__))) unsigned __int128 v1ti_t;
+
+v1ti_t foo (v1ti_t v);
+
+void
+bug ()
+{
+  v1ti_t dv = { ((31415926539) << 6) };
+  dv = foo (dv);
+  if (dv[0] != 0)
+    __builtin_abort ();
+}