diff mbox series

[v2,04/37] target/riscv: 8-bit Addition & Subtraction Instruction

Message ID 20210610075908.3305506-5-zhiwei_liu@c-sky.com
State New
Headers show
Series target/riscv: support packed extension v0.9.4 | expand

Commit Message

LIU Zhiwei June 10, 2021, 7:58 a.m. UTC
Include 5 groups: Wrap-around (dropping overflow), Signed Halving,
Unsigned Halving, Signed Saturation, and Unsigned Saturation.

Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com>
---
 include/tcg/tcg-op-gvec.h               |  6 ++
 target/riscv/helper.h                   |  9 +++
 target/riscv/insn32.decode              | 11 ++++
 target/riscv/insn_trans/trans_rvp.c.inc | 13 +++++
 target/riscv/packed_helper.c            | 73 +++++++++++++++++++++++++
 tcg/tcg-op-gvec.c                       | 47 ++++++++++++++++
 6 files changed, 159 insertions(+)

Comments

Richard Henderson June 10, 2021, 7:39 p.m. UTC | #1
On 6/10/21 12:58 AM, LIU Zhiwei wrote:
>   include/tcg/tcg-op-gvec.h               |  6 ++
>   tcg/tcg-op-gvec.c                       | 47 ++++++++++++++++

Likewise, should be split from the larger patch.

> +static void gen_addv_mask_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 m)
> +{
> +    TCGv_i32 t1 = tcg_temp_new_i32();
> +    TCGv_i32 t2 = tcg_temp_new_i32();
> +    TCGv_i32 t3 = tcg_temp_new_i32();
> +
> +    tcg_gen_andc_i32(t1, a, m);
> +    tcg_gen_andc_i32(t2, b, m);
> +    tcg_gen_xor_i32(t3, a, b);
> +    tcg_gen_add_i32(d, t1, t2);
> +    tcg_gen_and_i32(t3, t3, m);
> +    tcg_gen_xor_i32(d, d, t3);
> +
> +    tcg_temp_free_i32(t1);
> +    tcg_temp_free_i32(t2);
> +    tcg_temp_free_i32(t3);
> +}
> +
> +void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
> +{
> +    TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
> +    gen_addv_mask_i32(d, a, b, m);
> +}

There will only ever be one use; we might as well merge them.
The cast is unnecessary.


r~
LIU Zhiwei June 11, 2021, 4:36 a.m. UTC | #2
On 6/11/21 3:39 AM, Richard Henderson wrote:

> On 6/10/21 12:58 AM, LIU Zhiwei wrote:
>>   include/tcg/tcg-op-gvec.h |  6 ++
>>   tcg/tcg-op-gvec.c                       | 47 ++++++++++++++++
>
> Likewise, should be split from the larger patch.
>
OK
>> +static void gen_addv_mask_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, 
>> TCGv_i32 m)
>> +{
>> +    TCGv_i32 t1 = tcg_temp_new_i32();
>> +    TCGv_i32 t2 = tcg_temp_new_i32();
>> +    TCGv_i32 t3 = tcg_temp_new_i32();
>> +
>> +    tcg_gen_andc_i32(t1, a, m);
>> +    tcg_gen_andc_i32(t2, b, m);
>> +    tcg_gen_xor_i32(t3, a, b);
>> +    tcg_gen_add_i32(d, t1, t2);
>> +    tcg_gen_and_i32(t3, t3, m);
>> +    tcg_gen_xor_i32(d, d, t3);
>> +
>> +    tcg_temp_free_i32(t1);
>> +    tcg_temp_free_i32(t2);
>> +    tcg_temp_free_i32(t3);
>> +}
>> +
>> +void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
>> +{
>> +    TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
>> +    gen_addv_mask_i32(d, a, b, m);
>> +}
>
> There will only ever be one use; we might as well merge them.
> The cast is unnecessary.

A little puzzling. Should I still split it?


Zhiwei

>
>
> r~
LIU Zhiwei June 24, 2021, 6:05 a.m. UTC | #3
On 2021/6/11 上午3:39, Richard Henderson wrote:
> On 6/10/21 12:58 AM, LIU Zhiwei wrote:
>>   include/tcg/tcg-op-gvec.h |  6 ++
>>   tcg/tcg-op-gvec.c                       | 47 ++++++++++++++++
>
> Likewise, should be split from the larger patch.
>
>> +static void gen_addv_mask_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, 
>> TCGv_i32 m)
>> +{
>> +    TCGv_i32 t1 = tcg_temp_new_i32();
>> +    TCGv_i32 t2 = tcg_temp_new_i32();
>> +    TCGv_i32 t3 = tcg_temp_new_i32();
>> +
>> +    tcg_gen_andc_i32(t1, a, m);
>> +    tcg_gen_andc_i32(t2, b, m);
>> +    tcg_gen_xor_i32(t3, a, b);
>> +    tcg_gen_add_i32(d, t1, t2);
>> +    tcg_gen_and_i32(t3, t3, m);
>> +    tcg_gen_xor_i32(d, d, t3);
>> +
>> +    tcg_temp_free_i32(t1);
>> +    tcg_temp_free_i32(t2);
>> +    tcg_temp_free_i32(t3);
>> +}
>> +
>> +void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
>> +{
>> +    TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
>> +    gen_addv_mask_i32(d, a, b, m);
>> +}
>
> There will only ever be one use; we might as well merge them.
OK
> The cast is unnecessary.

I meet compiler error report without cast. So I just keep it.

../tcg/tcg-op-gvec.c: In function ‘tcg_gen_vec_sub8_i32’:
/home/roman/git/qemu/include/tcg/tcg.h:1327:5: error: overflow in implicit constant conversion [-Werror=overflow]
      (__builtin_constant_p(VECE)                                    \
      ^
../tcg/tcg-op-gvec.c:1947:35: note: in expansion of macro ‘dup_const’
      TCGv_i32 m = tcg_constant_i32(dup_const(MO_8, 0x80));
                                    ^~~~~~~~~
cc1: all warnings being treated as errors

Thanks,
Zhiwei

>
>
> r~
diff mbox series

Patch

diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
index 2dae9e78d0..392c0f95a4 100644
--- a/include/tcg/tcg-op-gvec.h
+++ b/include/tcg/tcg-op-gvec.h
@@ -385,11 +385,13 @@  void tcg_gen_vec_neg16_i64(TCGv_i64 d, TCGv_i64 a);
 void tcg_gen_vec_neg32_i64(TCGv_i64 d, TCGv_i64 a);
 
 void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
 void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
 void tcg_gen_vec_add16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
 void tcg_gen_vec_add32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
 
 void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
+void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
 void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
 void tcg_gen_vec_sub16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
 void tcg_gen_vec_sub32_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
@@ -406,9 +408,13 @@  void tcg_gen_vec_rotl16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c);
 #if TARGET_LONG_BITS == 64
 #define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i64
 #define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i64
+#define tcg_gen_vec_add8_tl  tcg_gen_vec_add8_i64
+#define tcg_gen_vec_sub8_tl  tcg_gen_vec_sub8_i64
 #else
 #define tcg_gen_vec_add16_tl tcg_gen_vec_add16_i32
 #define tcg_gen_vec_sub16_tl tcg_gen_vec_sub16_i32
+#define tcg_gen_vec_add8_tl  tcg_gen_vec_add8_i32
+#define tcg_gen_vec_sub8_tl  tcg_gen_vec_sub8_i32
 #endif
 
 #endif
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index b6a71ade33..629ff13402 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1179,3 +1179,12 @@  DEF_HELPER_3(rstsa16, tl, env, tl, tl)
 DEF_HELPER_3(urstsa16, tl, env, tl, tl)
 DEF_HELPER_3(kstsa16, tl, env, tl, tl)
 DEF_HELPER_3(ukstsa16, tl, env, tl, tl)
+
+DEF_HELPER_3(radd8, tl, env, tl, tl)
+DEF_HELPER_3(uradd8, tl, env, tl, tl)
+DEF_HELPER_3(kadd8, tl, env, tl, tl)
+DEF_HELPER_3(ukadd8, tl, env, tl, tl)
+DEF_HELPER_3(rsub8, tl, env, tl, tl)
+DEF_HELPER_3(ursub8, tl, env, tl, tl)
+DEF_HELPER_3(ksub8, tl, env, tl, tl)
+DEF_HELPER_3(uksub8, tl, env, tl, tl)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 57f72fabf6..13e1222296 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -764,3 +764,14 @@  rstsa16    1011011  ..... ..... 010 ..... 1110111 @r
 urstsa16   1101011  ..... ..... 010 ..... 1110111 @r
 kstsa16    1100011  ..... ..... 010 ..... 1110111 @r
 ukstsa16   1110011  ..... ..... 010 ..... 1110111 @r
+
+add8       0100100  ..... ..... 000 ..... 1110111 @r
+radd8      0000100  ..... ..... 000 ..... 1110111 @r
+uradd8     0010100  ..... ..... 000 ..... 1110111 @r
+kadd8      0001100  ..... ..... 000 ..... 1110111 @r
+ukadd8     0011100  ..... ..... 000 ..... 1110111 @r
+sub8       0100101  ..... ..... 000 ..... 1110111 @r
+rsub8      0000101  ..... ..... 000 ..... 1110111 @r
+ursub8     0010101  ..... ..... 000 ..... 1110111 @r
+ksub8      0001101  ..... ..... 000 ..... 1110111 @r
+uksub8     0011101  ..... ..... 000 ..... 1110111 @r
diff --git a/target/riscv/insn_trans/trans_rvp.c.inc b/target/riscv/insn_trans/trans_rvp.c.inc
index 43f395657a..80bec35ac9 100644
--- a/target/riscv/insn_trans/trans_rvp.c.inc
+++ b/target/riscv/insn_trans/trans_rvp.c.inc
@@ -115,3 +115,16 @@  GEN_RVP_R_OOL(rstsa16);
 GEN_RVP_R_OOL(urstsa16);
 GEN_RVP_R_OOL(kstsa16);
 GEN_RVP_R_OOL(ukstsa16);
+
+/* 8-bit Addition & Subtraction Instructions */
+GEN_RVP_R_INLINE(add8, tcg_gen_vec_add8_tl, tcg_gen_add_tl);
+GEN_RVP_R_INLINE(sub8, tcg_gen_vec_sub8_tl, tcg_gen_sub_tl);
+
+GEN_RVP_R_OOL(radd8);
+GEN_RVP_R_OOL(uradd8);
+GEN_RVP_R_OOL(kadd8);
+GEN_RVP_R_OOL(ukadd8);
+GEN_RVP_R_OOL(rsub8);
+GEN_RVP_R_OOL(ursub8);
+GEN_RVP_R_OOL(ksub8);
+GEN_RVP_R_OOL(uksub8);
diff --git a/target/riscv/packed_helper.c b/target/riscv/packed_helper.c
index b84abaaf25..62db072204 100644
--- a/target/riscv/packed_helper.c
+++ b/target/riscv/packed_helper.c
@@ -352,3 +352,76 @@  static inline void do_ukstsa16(CPURISCVState *env, void *vd, void *va,
 }
 
 RVPR(ukstsa16, 2, 2);
+
+/* 8-bit Addition & Subtraction Instructions */
+static inline void do_radd8(CPURISCVState *env, void *vd, void *va,
+                            void *vb, uint8_t i)
+{
+    int8_t *d = vd, *a = va, *b = vb;
+    d[i] = hadd32(a[i], b[i]);
+}
+
+RVPR(radd8, 1, 1);
+
+static inline void do_uradd8(CPURISCVState *env, void *vd, void *va,
+                                  void *vb, uint8_t i)
+{
+    uint8_t *d = vd, *a = va, *b = vb;
+    d[i] = haddu32(a[i], b[i]);
+}
+
+RVPR(uradd8, 1, 1);
+
+static inline void do_kadd8(CPURISCVState *env, void *vd, void *va,
+                            void *vb, uint8_t i)
+{
+    int8_t *d = vd, *a = va, *b = vb;
+    d[i] = sadd8(env, 0, a[i], b[i]);
+}
+
+RVPR(kadd8, 1, 1);
+
+static inline void do_ukadd8(CPURISCVState *env, void *vd, void *va,
+                             void *vb, uint8_t i)
+{
+    uint8_t *d = vd, *a = va, *b = vb;
+    d[i] = saddu8(env, 0, a[i], b[i]);
+}
+
+RVPR(ukadd8, 1, 1);
+
+static inline void do_rsub8(CPURISCVState *env, void *vd, void *va,
+                            void *vb, uint8_t i)
+{
+    int8_t *d = vd, *a = va, *b = vb;
+    d[i] = hsub32(a[i], b[i]);
+}
+
+RVPR(rsub8, 1, 1);
+
+static inline void do_ursub8(CPURISCVState *env, void *vd, void *va,
+                             void *vb, uint8_t i)
+{
+    uint8_t *d = vd, *a = va, *b = vb;
+    d[i] = hsubu64(a[i], b[i]);
+}
+
+RVPR(ursub8, 1, 1);
+
+static inline void do_ksub8(CPURISCVState *env, void *vd, void *va,
+                            void *vb, uint8_t i)
+{
+    int8_t *d = vd, *a = va, *b = vb;
+    d[i] = ssub8(env, 0, a[i], b[i]);
+}
+
+RVPR(ksub8, 1, 1);
+
+static inline void do_uksub8(CPURISCVState *env, void *vd, void *va,
+                             void *vb, uint8_t i)
+{
+    uint8_t *d = vd, *a = va, *b = vb;
+    d[i] = ssubu8(env, 0, a[i], b[i]);
+}
+
+RVPR(uksub8, 1, 1);
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index a8898ba7bf..484ced3054 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -1736,6 +1736,30 @@  void tcg_gen_vec_add8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
     gen_addv_mask(d, a, b, m);
 }
 
+static void gen_addv_mask_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 m)
+{
+    TCGv_i32 t1 = tcg_temp_new_i32();
+    TCGv_i32 t2 = tcg_temp_new_i32();
+    TCGv_i32 t3 = tcg_temp_new_i32();
+
+    tcg_gen_andc_i32(t1, a, m);
+    tcg_gen_andc_i32(t2, b, m);
+    tcg_gen_xor_i32(t3, a, b);
+    tcg_gen_add_i32(d, t1, t2);
+    tcg_gen_and_i32(t3, t3, m);
+    tcg_gen_xor_i32(d, d, t3);
+
+    tcg_temp_free_i32(t1);
+    tcg_temp_free_i32(t2);
+    tcg_temp_free_i32(t3);
+}
+
+void tcg_gen_vec_add8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+    TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
+    gen_addv_mask_i32(d, a, b, m);
+}
+
 void tcg_gen_vec_add16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
 {
     TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));
@@ -1900,6 +1924,29 @@  void tcg_gen_vec_sub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
     gen_subv_mask(d, a, b, m);
 }
 
+static void gen_subv_mask_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 m)
+{
+    TCGv_i32 t1 = tcg_temp_new_i32();
+    TCGv_i32 t2 = tcg_temp_new_i32();
+    TCGv_i32 t3 = tcg_temp_new_i32();
+
+    tcg_gen_or_i32(t1, a, m);
+    tcg_gen_andc_i32(t2, b, m);
+    tcg_gen_eqv_i32(t3, a, b);
+    tcg_gen_sub_i32(d, t1, t2);
+    tcg_gen_and_i32(t3, t3, m);
+    tcg_gen_xor_i32(d, d, t3);
+
+    tcg_temp_free_i32(t1);
+    tcg_temp_free_i32(t2);
+    tcg_temp_free_i32(t3);
+}
+
+void tcg_gen_vec_sub8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
+{
+    TCGv_i32 m = tcg_constant_i32((int32_t)dup_const(MO_8, 0x80));
+    gen_subv_mask_i32(d, a, b, m);
+}
 void tcg_gen_vec_sub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
 {
     TCGv_i64 m = tcg_constant_i64(dup_const(MO_16, 0x8000));