diff mbox series

[2/3] RISC-V: Additional large constant synthesis improvements

Message ID 20240902200157.328705-2-rzinsly@ventanamicro.com
State New
Headers show
Series [1/3] RISC-V: Improve codegen for negative repeating large constants | expand

Commit Message

Raphael Moreira Zinsly Sept. 2, 2024, 8:01 p.m. UTC
Improve handling of large constants in riscv_build_integer, generate
better code for constants where the high half can be constructed
by shifting/shiftNadding the low half or if the halves differ by less
than 2k.

gcc/ChangeLog:
	* config/riscv/riscv.cc (riscv_build_integer): Detect new case
	of constants that can be improved.
	(riscv_move_integer): Add synthesys for concatening constants
	without Zbkb.

gcc/testsuite/ChangeLog:
	* gcc.target/riscv/synthesis-12.c: New test.
	* gcc.target/riscv/synthesis-13.c: New test.
	* gcc.target/riscv/synthesis-14.c: New test.
---
 gcc/config/riscv/riscv.cc                     | 140 +++++++++++++++++-
 gcc/testsuite/gcc.target/riscv/synthesis-12.c |  26 ++++
 gcc/testsuite/gcc.target/riscv/synthesis-13.c |  26 ++++
 gcc/testsuite/gcc.target/riscv/synthesis-14.c |  28 ++++
 4 files changed, 214 insertions(+), 6 deletions(-)
 create mode 100644 gcc/testsuite/gcc.target/riscv/synthesis-12.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/synthesis-13.c
 create mode 100644 gcc/testsuite/gcc.target/riscv/synthesis-14.c

Comments

Jeff Law Sept. 4, 2024, 11:32 p.m. UTC | #1
On 9/2/24 2:01 PM, Raphael Moreira Zinsly wrote:
> Improve handling of large constants in riscv_build_integer, generate
> better code for constants where the high half can be constructed
> by shifting/shiftNadding the low half or if the halves differ by less
> than 2k.
> 
> gcc/ChangeLog:
> 	* config/riscv/riscv.cc (riscv_build_integer): Detect new case
> 	of constants that can be improved.
> 	(riscv_move_integer): Add synthesys for concatening constants
> 	without Zbkb.
> 
> gcc/testsuite/ChangeLog:
> 	* gcc.target/riscv/synthesis-12.c: New test.
> 	* gcc.target/riscv/synthesis-13.c: New test.
> 	* gcc.target/riscv/synthesis-14.c: New test.
> ---
>   gcc/config/riscv/riscv.cc                     | 140 +++++++++++++++++-
>   gcc/testsuite/gcc.target/riscv/synthesis-12.c |  26 ++++
>   gcc/testsuite/gcc.target/riscv/synthesis-13.c |  26 ++++
>   gcc/testsuite/gcc.target/riscv/synthesis-14.c |  28 ++++
>   4 files changed, 214 insertions(+), 6 deletions(-)
>   create mode 100644 gcc/testsuite/gcc.target/riscv/synthesis-12.c
>   create mode 100644 gcc/testsuite/gcc.target/riscv/synthesis-13.c
>   create mode 100644 gcc/testsuite/gcc.target/riscv/synthesis-14.c
> 
> diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
> index b963a57881e..64d5611cbd2 100644
> --- a/gcc/config/riscv/riscv.cc
> +++ b/gcc/config/riscv/riscv.cc
> @@ -1231,6 +1231,124 @@ riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
>   	}
>   
>       }
> +  else if (cost > 4 && TARGET_64BIT && can_create_pseudo_p ()
> +	   && allow_new_pseudos)
> +    {
> +      struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
> +      int alt_cost;
> +
> +      unsigned HOST_WIDE_INT loval = value & 0xffffffff;
> +      unsigned HOST_WIDE_INT hival = (value & ~loval) >> 32;
> +      bool bit31 = (hival & 0x80000000) != 0;
> +      int trailing_shift = ctz_hwi (loval) - ctz_hwi (hival);
> +      int leading_shift = clz_hwi (loval) - clz_hwi (hival);
> +      int shiftval = 0;
> +
> +      /* Adjust the shift into the high half accordingly.  */
> +      if ((trailing_shift > 0 && hival == (loval >> trailing_shift))
> +	  || (trailing_shift < 0 && hival == (loval << trailing_shift)))
> +	shiftval = 32 - trailing_shift;
> +      else if ((leading_shift < 0 && hival == (loval >> leading_shift))
> +		|| (leading_shift > 0 && hival == (loval << leading_shift)))
Don't these trigger undefined behavior when tailing_shift or 
leading_shift is < 0?  We shouldn't ever generate negative shift counts.

Generally looks pretty good, but we do need to get those negative shifts 
fixed before integration.

jeff
Raphael Moreira Zinsly Sept. 5, 2024, 12:16 p.m. UTC | #2
On Wed, Sep 4, 2024 at 8:32 PM Jeff Law <jlaw@ventanamicro.com> wrote:
> On 9/2/24 2:01 PM, Raphael Moreira Zinsly wrote:
> ...
> > +      bool bit31 = (hival & 0x80000000) != 0;
> > +      int trailing_shift = ctz_hwi (loval) - ctz_hwi (hival);
> > +      int leading_shift = clz_hwi (loval) - clz_hwi (hival);
> > +      int shiftval = 0;
> > +
> > +      /* Adjust the shift into the high half accordingly.  */
> > +      if ((trailing_shift > 0 && hival == (loval >> trailing_shift))
> > +       || (trailing_shift < 0 && hival == (loval << trailing_shift)))
> > +     shiftval = 32 - trailing_shift;
> > +      else if ((leading_shift < 0 && hival == (loval >> leading_shift))
> > +             || (leading_shift > 0 && hival == (loval << leading_shift)))
> Don't these trigger undefined behavior when tailing_shift or
> leading_shift is < 0?  We shouldn't ever generate negative shift counts.

The value of trailing/leading_shift is added to 32, we will never have
negative shift counts.



--
Raphael Moreira Zinsly
Jeff Law Sept. 5, 2024, 6:09 p.m. UTC | #3
On 9/5/24 6:16 AM, Raphael Zinsly wrote:
> On Wed, Sep 4, 2024 at 8:32 PM Jeff Law <jlaw@ventanamicro.com> wrote:
>> On 9/2/24 2:01 PM, Raphael Moreira Zinsly wrote:
>> ...
>>> +      bool bit31 = (hival & 0x80000000) != 0;
>>> +      int trailing_shift = ctz_hwi (loval) - ctz_hwi (hival);
>>> +      int leading_shift = clz_hwi (loval) - clz_hwi (hival);
>>> +      int shiftval = 0;
>>> +
>>> +      /* Adjust the shift into the high half accordingly.  */
>>> +      if ((trailing_shift > 0 && hival == (loval >> trailing_shift))
>>> +       || (trailing_shift < 0 && hival == (loval << trailing_shift)))
>>> +     shiftval = 32 - trailing_shift;
>>> +      else if ((leading_shift < 0 && hival == (loval >> leading_shift))
>>> +             || (leading_shift > 0 && hival == (loval << leading_shift)))
>> Don't these trigger undefined behavior when tailing_shift or
>> leading_shift is < 0?  We shouldn't ever generate negative shift counts.
> 
> The value of trailing/leading_shift is added to 32, we will never have
> negative shift counts.
In the IF you have this conditional:

> (trailing_shift < 0 && hival == (loval << trailing_shift))

How could that not be undefined behvaior?  You first test that the value 
is less than zero and if it is less than zero you use it as a shift count.

Similarly for:

> (leading_shift < 0 && hival == (loval >> leading_shift))

Jeff
Raphael Moreira Zinsly Sept. 5, 2024, 6:38 p.m. UTC | #4
On Thu, Sep 5, 2024 at 3:10 PM Jeff Law <jeffreyalaw@gmail.com> wrote:
> On 9/5/24 6:16 AM, Raphael Zinsly wrote:
> > On Wed, Sep 4, 2024 at 8:32 PM Jeff Law <jlaw@ventanamicro.com> wrote:
> >> On 9/2/24 2:01 PM, Raphael Moreira Zinsly wrote:
> >> ...
> >>> +      bool bit31 = (hival & 0x80000000) != 0;
> >>> +      int trailing_shift = ctz_hwi (loval) - ctz_hwi (hival);
> >>> +      int leading_shift = clz_hwi (loval) - clz_hwi (hival);
> >>> +      int shiftval = 0;
> >>> +
> >>> +      /* Adjust the shift into the high half accordingly.  */
> >>> +      if ((trailing_shift > 0 && hival == (loval >> trailing_shift))
> >>> +       || (trailing_shift < 0 && hival == (loval << trailing_shift)))
> >>> +     shiftval = 32 - trailing_shift;
> >>> +      else if ((leading_shift < 0 && hival == (loval >> leading_shift))
> >>> +             || (leading_shift > 0 && hival == (loval << leading_shift)))
> >> Don't these trigger undefined behavior when tailing_shift or
> >> leading_shift is < 0?  We shouldn't ever generate negative shift counts.
> >
> > The value of trailing/leading_shift is added to 32, we will never have
> > negative shift counts.
> In the IF you have this conditional:
>
> > (trailing_shift < 0 && hival == (loval << trailing_shift))
>
> How could that not be undefined behvaior?  You first test that the value
> is less than zero and if it is less than zero you use it as a shift count.

I'm not using trailing_shift as the shift count, I'm using shiftval:

+         /* Now we want to shift the previously generated constant into the
+            high half.  */
+         alt_codes[alt_cost - 2].code = ASHIFT;
+         alt_codes[alt_cost - 2].value = shiftval;
+         alt_codes[alt_cost - 2].use_uw = false;
+         alt_codes[alt_cost - 2].save_temporary = false;
Jeff Law Sept. 5, 2024, 6:41 p.m. UTC | #5
On 9/5/24 12:38 PM, Raphael Zinsly wrote:
> On Thu, Sep 5, 2024 at 3:10 PM Jeff Law <jeffreyalaw@gmail.com> wrote:
>> On 9/5/24 6:16 AM, Raphael Zinsly wrote:
>>> On Wed, Sep 4, 2024 at 8:32 PM Jeff Law <jlaw@ventanamicro.com> wrote:
>>>> On 9/2/24 2:01 PM, Raphael Moreira Zinsly wrote:
>>>> ...
>>>>> +      bool bit31 = (hival & 0x80000000) != 0;
>>>>> +      int trailing_shift = ctz_hwi (loval) - ctz_hwi (hival);
>>>>> +      int leading_shift = clz_hwi (loval) - clz_hwi (hival);
>>>>> +      int shiftval = 0;
>>>>> +
>>>>> +      /* Adjust the shift into the high half accordingly.  */
>>>>> +      if ((trailing_shift > 0 && hival == (loval >> trailing_shift))
>>>>> +       || (trailing_shift < 0 && hival == (loval << trailing_shift)))
>>>>> +     shiftval = 32 - trailing_shift;
>>>>> +      else if ((leading_shift < 0 && hival == (loval >> leading_shift))
>>>>> +             || (leading_shift > 0 && hival == (loval << leading_shift)))
>>>> Don't these trigger undefined behavior when tailing_shift or
>>>> leading_shift is < 0?  We shouldn't ever generate negative shift counts.
>>>
>>> The value of trailing/leading_shift is added to 32, we will never have
>>> negative shift counts.
>> In the IF you have this conditional:
>>
>>> (trailing_shift < 0 && hival == (loval << trailing_shift))
>>
>> How could that not be undefined behvaior?  You first test that the value
>> is less than zero and if it is less than zero you use it as a shift count.
> 
> I'm not using trailing_shift as the shift count, I'm using shiftval:
> 
> +         /* Now we want to shift the previously generated constant into the
> +            high half.  */
> +         alt_codes[alt_cost - 2].code = ASHIFT;
> +         alt_codes[alt_cost - 2].value = shiftval;
> +         alt_codes[alt_cost - 2].use_uw = false;
> +         alt_codes[alt_cost - 2].save_temporary = false;
I'm not referring to the generated code.  The compiler itself will 
exhibit undefined behavior due to the negative shift count in that test.


jeff
Raphael Moreira Zinsly Sept. 5, 2024, 7:18 p.m. UTC | #6
On Thu, Sep 5, 2024 at 3:41 PM Jeff Law <jlaw@ventanamicro.com> wrote:
>
>
>
> On 9/5/24 12:38 PM, Raphael Zinsly wrote:
> > On Thu, Sep 5, 2024 at 3:10 PM Jeff Law <jeffreyalaw@gmail.com> wrote:
> >> On 9/5/24 6:16 AM, Raphael Zinsly wrote:
> >>> On Wed, Sep 4, 2024 at 8:32 PM Jeff Law <jlaw@ventanamicro.com> wrote:
> >>>> On 9/2/24 2:01 PM, Raphael Moreira Zinsly wrote:
> >>>> ...
> >>>>> +      bool bit31 = (hival & 0x80000000) != 0;
> >>>>> +      int trailing_shift = ctz_hwi (loval) - ctz_hwi (hival);
> >>>>> +      int leading_shift = clz_hwi (loval) - clz_hwi (hival);
> >>>>> +      int shiftval = 0;
> >>>>> +
> >>>>> +      /* Adjust the shift into the high half accordingly.  */
> >>>>> +      if ((trailing_shift > 0 && hival == (loval >> trailing_shift))
> >>>>> +       || (trailing_shift < 0 && hival == (loval << trailing_shift)))
> >>>>> +     shiftval = 32 - trailing_shift;
> >>>>> +      else if ((leading_shift < 0 && hival == (loval >> leading_shift))
> >>>>> +             || (leading_shift > 0 && hival == (loval << leading_shift)))
> >>>> Don't these trigger undefined behavior when tailing_shift or
> >>>> leading_shift is < 0?  We shouldn't ever generate negative shift counts.
> >>>
> >>> The value of trailing/leading_shift is added to 32, we will never have
> >>> negative shift counts.
> >> In the IF you have this conditional:
> >>
> >>> (trailing_shift < 0 && hival == (loval << trailing_shift))
> >>
> >> How could that not be undefined behvaior?  You first test that the value
> >> is less than zero and if it is less than zero you use it as a shift count.
> >
> > I'm not using trailing_shift as the shift count, I'm using shiftval:
> >
> > +         /* Now we want to shift the previously generated constant into the
> > +            high half.  */
> > +         alt_codes[alt_cost - 2].code = ASHIFT;
> > +         alt_codes[alt_cost - 2].value = shiftval;
> > +         alt_codes[alt_cost - 2].use_uw = false;
> > +         alt_codes[alt_cost - 2].save_temporary = false;
> I'm not referring to the generated code.  The compiler itself will
> exhibit undefined behavior due to the negative shift count in that test.
>

Oh sorry. I get it now, the issue is with (loval << trailing_shift).
I was trying to cover all possibilities, but if the trailing_shift is
negative, the leading_shift should be positive and vice versa, so we
could keep only the positive tests.

I'll prepare a v2.


Thanks,
--
Raphael Moreira Zinsly
diff mbox series

Patch

diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
index b963a57881e..64d5611cbd2 100644
--- a/gcc/config/riscv/riscv.cc
+++ b/gcc/config/riscv/riscv.cc
@@ -1231,6 +1231,124 @@  riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
 	}
 
     }
+  else if (cost > 4 && TARGET_64BIT && can_create_pseudo_p ()
+	   && allow_new_pseudos)
+    {
+      struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
+      int alt_cost;
+
+      unsigned HOST_WIDE_INT loval = value & 0xffffffff;
+      unsigned HOST_WIDE_INT hival = (value & ~loval) >> 32;
+      bool bit31 = (hival & 0x80000000) != 0;
+      int trailing_shift = ctz_hwi (loval) - ctz_hwi (hival);
+      int leading_shift = clz_hwi (loval) - clz_hwi (hival);
+      int shiftval = 0;
+
+      /* Adjust the shift into the high half accordingly.  */
+      if ((trailing_shift > 0 && hival == (loval >> trailing_shift))
+	  || (trailing_shift < 0 && hival == (loval << trailing_shift)))
+	shiftval = 32 - trailing_shift;
+      else if ((leading_shift < 0 && hival == (loval >> leading_shift))
+		|| (leading_shift > 0 && hival == (loval << leading_shift)))
+	shiftval = 32 + leading_shift;
+
+      if (shiftval && !bit31)
+	alt_cost = 2 + riscv_build_integer_1 (alt_codes, sext_hwi (loval, 32),
+					      mode);
+
+      /* For constants where the upper half is a shift of the lower half we
+	 can do a shift followed by an or.  */
+      if (shiftval && alt_cost < cost && !bit31)
+	{
+	  /* We need to save the first constant we build.  */
+	  alt_codes[alt_cost - 3].save_temporary = true;
+
+	  /* Now we want to shift the previously generated constant into the
+	     high half.  */
+	  alt_codes[alt_cost - 2].code = ASHIFT;
+	  alt_codes[alt_cost - 2].value = shiftval;
+	  alt_codes[alt_cost - 2].use_uw = false;
+	  alt_codes[alt_cost - 2].save_temporary = false;
+
+	  /* And the final step, IOR the two halves together.  Since this uses
+	     the saved temporary, use CONCAT similar to what we do for Zbkb.  */
+	  alt_codes[alt_cost - 1].code = CONCAT;
+	  alt_codes[alt_cost - 1].value = 0;
+	  alt_codes[alt_cost - 1].use_uw = false;
+	  alt_codes[alt_cost - 1].save_temporary = false;
+
+	  memcpy (codes, alt_codes, sizeof (alt_codes));
+	  cost = alt_cost;
+	}
+
+      if (cost > 4 && !bit31 && TARGET_ZBA)
+	{
+	  int value = 0;
+
+	  /* Check for a shNadd.  */
+	  if (hival == loval * 3)
+	    value = 3;
+	  else if (hival == loval * 5)
+	    value = 5;
+	  else if (hival == loval * 9)
+	    value = 9;
+
+	  if (value)
+	    alt_cost = 2 + riscv_build_integer_1 (alt_codes,
+						  sext_hwi (loval, 32), mode);
+
+	  /* For constants where the upper half is a shNadd of the lower half
+	     we can do a similar transformation.  */
+	  if (value && alt_cost < cost)
+	    {
+	      alt_codes[alt_cost - 3].save_temporary = true;
+	      alt_codes[alt_cost - 2].code = FMA;
+	      alt_codes[alt_cost - 2].value = value;
+	      alt_codes[alt_cost - 2].use_uw = false;
+	      alt_codes[alt_cost - 2].save_temporary = false;
+	      alt_codes[alt_cost - 1].code = CONCAT;
+	      alt_codes[alt_cost - 1].value = 0;
+	      alt_codes[alt_cost - 1].use_uw = false;
+	      alt_codes[alt_cost - 1].save_temporary = false;
+
+	      memcpy (codes, alt_codes, sizeof (alt_codes));
+	      cost = alt_cost;
+	    }
+	}
+
+      if (cost > 4 && !bit31)
+	{
+	  int value = hival - loval;
+
+	  /* For constants were the halves differ by less than 2048 we can
+	     generate the upper half by using an addi on the lower half then
+	     using a shift 32 followed by an or.  */
+	  if (abs (value) <= 2047)
+	    {
+	      alt_cost = 3 + riscv_build_integer_1 (alt_codes,
+						    sext_hwi (loval, 32), mode);
+	      if (alt_cost < cost)
+		{
+		  alt_codes[alt_cost - 4].save_temporary = true;
+		  alt_codes[alt_cost - 3].code = PLUS;
+		  alt_codes[alt_cost - 3].value = value;
+		  alt_codes[alt_cost - 3].use_uw = false;
+		  alt_codes[alt_cost - 3].save_temporary = false;
+		  alt_codes[alt_cost - 2].code = ASHIFT;
+		  alt_codes[alt_cost - 2].value = 32;
+		  alt_codes[alt_cost - 2].use_uw = false;
+		  alt_codes[alt_cost - 2].save_temporary = false;
+		  alt_codes[alt_cost - 1].code = CONCAT;
+		  alt_codes[alt_cost - 1].value = 0;
+		  alt_codes[alt_cost - 1].use_uw = false;
+		  alt_codes[alt_cost - 1].save_temporary = false;
+
+		  memcpy (codes, alt_codes, sizeof (alt_codes));
+		  cost = alt_cost;
+		}
+	    }
+	}
+    }
 
   return cost;
 }
@@ -2864,12 +2982,22 @@  riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value,
 	    }
 	  else if (codes[i].code == CONCAT || codes[i].code == VEC_MERGE)
 	    {
-	      rtx t = can_create_pseudo_p () ? gen_reg_rtx (mode) : temp;
-	      rtx t2 = codes[i].code == VEC_MERGE ? old_value : x;
-	      gcc_assert (t2);
-	      t2 = gen_lowpart (SImode, t2);
-	      emit_insn (gen_riscv_xpack_di_si_2 (t, x, GEN_INT (32), t2));
-	      x = t;
+	      if (codes[i].code == CONCAT && !TARGET_ZBKB)
+		{
+		  /* The two values should have no bits in common, so we can
+		     use PLUS instead of IOR which has a higher chance of
+		     using a compressed instruction.  */
+		  x = gen_rtx_PLUS (mode, x, old_value);
+		}
+	      else
+		{
+		  rtx t = can_create_pseudo_p () ? gen_reg_rtx (mode) : temp;
+		  rtx t2 = codes[i].code == VEC_MERGE ? old_value : x;
+		  gcc_assert (t2);
+		  t2 = gen_lowpart (SImode, t2);
+		  emit_insn (gen_riscv_xpack_di_si_2 (t, x, GEN_INT (32), t2));
+		  x = t;
+		}
 	    }
 	  else
 	    x = gen_rtx_fmt_ee (codes[i].code, mode,
diff --git a/gcc/testsuite/gcc.target/riscv/synthesis-12.c b/gcc/testsuite/gcc.target/riscv/synthesis-12.c
new file mode 100644
index 00000000000..bf2f89042a0
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/synthesis-12.c
@@ -0,0 +1,26 @@ 
+/* { dg-do compile } */
+/* { dg-require-effective-target rv64 } */
+/* We aggressively skip as we really just need to test the basic synthesis
+   which shouldn't vary based on the optimization level.  -O1 seems to work
+   and eliminates the usual sources of extraneous dead code that would throw
+   off the counts.  */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-Og" "-O2" "-O3" "-Os" "-Oz" "-flto" } } */
+/* { dg-options "-march=rv64gc" } */
+
+/* Rather than test for a specific synthesis of all these constants or
+   having thousands of tests each testing one variant, we just test the
+   total number of instructions.
+
+   This isn't expected to change much and any change is worthy of a look.  */
+/* { dg-final { scan-assembler-times "\\t(add|addi|bseti|li|pack|ret|sh1add|sh2add|sh3add|slli|srli|xori|or)" 45 } } */
+
+
+unsigned long foo_0x7857f2de7857f2de(void) { return 0x7857f2de7857f2deUL; }
+unsigned long foo_0x7fffdffe3fffefff(void) { return 0x7fffdffe3fffefffUL; }
+unsigned long foo_0x1ffff7fe3fffeffc(void) { return 0x1ffff7fe3fffeffcUL; }
+unsigned long foo_0x0a3fdbf0028ff6fc(void) { return 0x0a3fdbf0028ff6fcUL; }
+unsigned long foo_0x014067e805019fa0(void) { return 0x014067e805019fa0UL; }
+unsigned long foo_0x09d87e90009d87e9(void) { return 0x09d87e90009d87e9UL; }
+unsigned long foo_0x2302320000118119(void) { return 0x2302320000118119UL; }
+unsigned long foo_0x000711eb00e23d60(void) { return 0x000711eb00e23d60UL; }
+unsigned long foo_0x5983800001660e00(void) { return 0x5983800001660e00UL; }
diff --git a/gcc/testsuite/gcc.target/riscv/synthesis-13.c b/gcc/testsuite/gcc.target/riscv/synthesis-13.c
new file mode 100644
index 00000000000..957410acda1
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/synthesis-13.c
@@ -0,0 +1,26 @@ 
+/* { dg-do compile } */
+/* { dg-require-effective-target rv64 } */
+/* We aggressively skip as we really just need to test the basic synthesis
+   which shouldn't vary based on the optimization level.  -O1 seems to work
+   and eliminates the usual sources of extraneous dead code that would throw
+   off the counts.  */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-Og" "-O2" "-O3" "-Os" "-Oz" "-flto" } } */
+/* { dg-options "-march=rv64gc_zba" } */
+
+/* Rather than test for a specific synthesis of all these constants or
+   having thousands of tests each testing one variant, we just test the
+   total number of instructions.
+
+   This isn't expected to change much and any change is worthy of a look.  */
+/* { dg-final { scan-assembler-times "\\t(add|addi|bseti|li|pack|ret|sh1add|sh2add|sh3add|slli|srli|xori|or)" 45 } } */
+
+
+unsigned long foo_0x7907d89a2857f2de(void) { return 0x7907d89a2857f2deUL; }
+unsigned long foo_0x4fffaffb0fffefff(void) { return 0x4fffaffb0fffefffUL; }
+unsigned long foo_0x23ff6fdc03ffeffc(void) { return 0x23ff6fdc03ffeffcUL; }
+unsigned long foo_0x170faedc028ff6fc(void) { return 0x170faedc028ff6fcUL; }
+unsigned long foo_0x5704dee01d019fa0(void) { return 0x5704dee01d019fa0UL; }
+unsigned long foo_0x0589c731009d87e9(void) { return 0x0589c731009d87e9UL; }
+unsigned long foo_0x0057857d00118119(void) { return 0x0057857d00118119UL; }
+unsigned long foo_0x546b32e010e23d60(void) { return 0x546b32e010e23d60UL; }
+unsigned long foo_0x64322a0021660e00(void) { return 0x64322a0021660e00UL; }
diff --git a/gcc/testsuite/gcc.target/riscv/synthesis-14.c b/gcc/testsuite/gcc.target/riscv/synthesis-14.c
new file mode 100644
index 00000000000..bd4e4afa55a
--- /dev/null
+++ b/gcc/testsuite/gcc.target/riscv/synthesis-14.c
@@ -0,0 +1,28 @@ 
+/* { dg-do compile } */
+/* { dg-require-effective-target rv64 } */
+/* We aggressively skip as we really just need to test the basic synthesis
+   which shouldn't vary based on the optimization level.  -O1 seems to work
+   and eliminates the usual sources of extraneous dead code that would throw
+   off the counts.  */
+/* { dg-skip-if "" { *-*-* } { "-O0" "-Og" "-O2" "-O3" "-Os" "-Oz" "-flto" } } */
+/* { dg-options "-march=rv64gc" } */
+
+/* Rather than test for a specific synthesis of all these constants or
+   having thousands of tests each testing one variant, we just test the
+   total number of instructions.
+
+   This isn't expected to change much and any change is worthy of a look.  */
+/* { dg-final { scan-assembler-times "\\t(add|addi|bseti|li|pack|ret|sh1add|sh2add|sh3add|slli|srli|xori|or)" 65 } } */
+
+
+unsigned long foo_0x7857faae7857f2de(void) { return 0x7857faae7857f2deUL; }
+unsigned long foo_0x0ffff7fe0fffefff(void) { return 0x0ffff7fe0fffefffUL; }
+unsigned long foo_0x7857f2de7857faae(void) { return 0x7857f2de7857faaeUL; }
+unsigned long foo_0x7857f2af7857faae(void) { return 0x7857f2af7857faaeUL; }
+unsigned long foo_0x5fbfffff5fbffae5(void) { return 0x5fbfffff5fbffae5UL; }
+unsigned long foo_0x3d3079db3d3079ac(void) { return 0x3d3079db3d3079acUL; }
+unsigned long foo_0x046075fe046078a8(void) { return 0x046075fe046078a8UL; }
+unsigned long foo_0x2411811a24118119(void) { return 0x2411811a24118119UL; }
+unsigned long foo_0x70e23d6a70e23d6b(void) { return 0x70e23d6a70e23d6bUL; }
+unsigned long foo_0x0c01df8c0c01df7d(void) { return 0x0c01df8c0c01df7dUL; }
+unsigned long foo_0x7fff07d07fff0000(void) { return 0x7fff07d07fff0000UL; }