diff mbox series

[RFC,5/7] target/ppc: Implemented xvf16ger*

Message ID 20220426125028.18844-6-lucas.araujo@eldorado.org.br
State New
Headers show
Series VSX MMA Implementation | expand

Commit Message

Lucas Mateus Martins Araujo e Castro April 26, 2022, 12:50 p.m. UTC
From: "Lucas Mateus Castro (alqotel)" <lucas.araujo@eldorado.org.br>

Implement the following PowerISA v3.1 instructions:
xvf16ger2:   VSX Vector 16-bit Floating-Point GER (rank-2 update)
xvf16ger2nn: VSX Vector 16-bit Floating-Point GER (rank-2 update) Negative
multiply, Negative accumulate
xvf16ger2np: VSX Vector 16-bit Floating-Point GER (rank-2 update) Negative
multiply, Positive accumulate
xvf16ger2pn: VSX Vector 16-bit Floating-Point GER (rank-2 update) Positive
multiply, Negative accumulate
xvf16ger2pp: VSX Vector 16-bit Floating-Point GER (rank-2 update) Positive
multiply, Positive accumulate

Signed-off-by: Lucas Mateus Castro (alqotel) <lucas.araujo@eldorado.org.br>
---
 include/fpu/softfloat.h             |  9 ++++
 target/ppc/cpu.h                    |  3 ++
 target/ppc/fpu_helper.c             | 65 +++++++++++++++++++++++++++++
 target/ppc/helper.h                 |  1 +
 target/ppc/insn32.decode            |  6 +++
 target/ppc/translate/vsx-impl.c.inc |  6 +++
 6 files changed, 90 insertions(+)

Comments

Richard Henderson April 27, 2022, 12:26 a.m. UTC | #1
On 4/26/22 05:50, Lucas Mateus Castro(alqotel) wrote:
> +#define VSXGER16(NAME, ORIG_T, OR_EL)                                   \
> +    void NAME(CPUPPCState *env, uint32_t a_r, uint32_t b_r,             \
> +              uint32_t  at_r, uint32_t mask, uint32_t packed_flags)     \
> +    {                                                                   \
> +        ppc_vsr_t *at;                                                  \
> +        float32 psum, aux_acc, va, vb, vc, vd;                          \
> +        int i, j, xmsk_bit, ymsk_bit;                                   \
> +        uint8_t xmsk = mask & 0x0F;                                     \
> +        uint8_t ymsk = (mask >> 4) & 0x0F;                              \
> +        uint8_t pmsk = (mask >> 8) & 0x3;                               \
> +        ppc_vsr_t *b = cpu_vsr_ptr(env, b_r);                           \
> +        ppc_vsr_t *a = cpu_vsr_ptr(env, a_r);                           \
> +        float_status *excp_ptr = &env->fp_status;                       \
> +        bool acc = ger_acc_flag(packed_flags);                          \
> +        bool neg_acc = ger_neg_acc_flag(packed_flags);                  \
> +        bool neg_mul = ger_neg_mul_flag(packed_flags);                  \
> +        for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {    \
> +            at = cpu_vsr_ptr(env, at_r + i);                            \
> +            for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {\
> +                if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {           \
> +                    va = !(pmsk & 2) ? float32_zero :                   \
> +                                       GET_VSR(Vsr##OR_EL, a,           \
> +                                               2 * i, ORIG_T, float32); \
> +                    vb = !(pmsk & 2) ? float32_zero :                   \
> +                                       GET_VSR(Vsr##OR_EL, b,           \
> +                                               2 * j, ORIG_T, float32); \
> +                    vc = !(pmsk & 1) ? float32_zero :                   \
> +                                       GET_VSR(Vsr##OR_EL, a,           \
> +                                            2 * i + 1, ORIG_T, float32);\
> +                    vd = !(pmsk & 1) ? float32_zero :                   \
> +                                       GET_VSR(Vsr##OR_EL, b,           \
> +                                            2 * j + 1, ORIG_T, float32);\
> +                    psum = float32_mul(va, vb, excp_ptr);               \
> +                    psum = float32_muladd(vc, vd, psum, 0, excp_ptr);   \

This isn't correct -- the intermediate 'prod' (the first multiply) is not rounded.  I 
think the correct way to implement this (barring new softfloat functions) is to compute 
the intermediate product as float64 with float_round_to_odd, then float64r32_muladd into 
the correct rounding mode to finish.

> +                    if (acc) {                                          \
> +                        if (neg_mul) {                                  \
> +                            psum = float32_neg(psum);                   \
> +                        }                                               \
> +                        if (neg_acc) {                                  \
> +                            aux_acc = float32_neg(at->VsrSF(j));        \
> +                        } else {                                        \
> +                            aux_acc = at->VsrSF(j);                     \
> +                        }                                               \
> +                        at->VsrSF(j) = float32_add(psum, aux_acc,       \
> +                                                   excp_ptr);           \

This one, thankfully, uses the rounded intermediate result 'msum', so is ok.

Please do convert this from a macro.  Given that float16 and bfloat16 are addressed the 
same, I think the only callback you need is the conversion from float16_to_float64.  Drop 
the bf16 accessor to ppc_vsr_t.


r~
Lucas Mateus Martins Araujo e Castro April 27, 2022, 9:11 p.m. UTC | #2
On 26/04/2022 21:26, Richard Henderson wrote:
> On 4/26/22 05:50, Lucas Mateus Castro(alqotel) wrote:
>> +#define VSXGER16(NAME, ORIG_T, 
>> OR_EL)                                   \
>> +    void NAME(CPUPPCState *env, uint32_t a_r, uint32_t 
>> b_r,             \
>> +              uint32_t  at_r, uint32_t mask, uint32_t 
>> packed_flags)     \
>> + { \
>> +        ppc_vsr_t 
>> *at;                                                  \
>> +        float32 psum, aux_acc, va, vb, vc, 
>> vd;                          \
>> +        int i, j, xmsk_bit, 
>> ymsk_bit;                                   \
>> +        uint8_t xmsk = mask & 
>> 0x0F;                                     \
>> +        uint8_t ymsk = (mask >> 4) & 
>> 0x0F;                              \
>> +        uint8_t pmsk = (mask >> 8) & 
>> 0x3;                               \
>> +        ppc_vsr_t *b = cpu_vsr_ptr(env, 
>> b_r);                           \
>> +        ppc_vsr_t *a = cpu_vsr_ptr(env, 
>> a_r);                           \
>> +        float_status *excp_ptr = 
>> &env->fp_status;                       \
>> +        bool acc = 
>> ger_acc_flag(packed_flags);                          \
>> +        bool neg_acc = 
>> ger_neg_acc_flag(packed_flags);                  \
>> +        bool neg_mul = 
>> ger_neg_mul_flag(packed_flags);                  \
>> +        for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) 
>> {    \
>> +            at = cpu_vsr_ptr(env, at_r + 
>> i);                            \
>> +            for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 
>> 1) {\
>> +                if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) 
>> {           \
>> +                    va = !(pmsk & 2) ? float32_zero 
>> :                   \
>> +                                       GET_VSR(Vsr##OR_EL, 
>> a,           \
>> +                                               2 * i, ORIG_T, 
>> float32); \
>> +                    vb = !(pmsk & 2) ? float32_zero 
>> :                   \
>> +                                       GET_VSR(Vsr##OR_EL, 
>> b,           \
>> +                                               2 * j, ORIG_T, 
>> float32); \
>> +                    vc = !(pmsk & 1) ? float32_zero 
>> :                   \
>> +                                       GET_VSR(Vsr##OR_EL, 
>> a,           \
>> +                                            2 * i + 1, ORIG_T, 
>> float32);\
>> +                    vd = !(pmsk & 1) ? float32_zero 
>> :                   \
>> +                                       GET_VSR(Vsr##OR_EL, 
>> b,           \
>> +                                            2 * j + 1, ORIG_T, 
>> float32);\
>> +                    psum = float32_mul(va, vb, 
>> excp_ptr);               \
>> +                    psum = float32_muladd(vc, vd, psum, 0, 
>> excp_ptr);   \
>
> This isn't correct -- the intermediate 'prod' (the first multiply) is 
> not rounded.  I
> think the correct way to implement this (barring new softfloat 
> functions) is to compute
> the intermediate product as float64 with float_round_to_odd, then 
> float64r32_muladd into
> the correct rounding mode to finish.
While not mentioned in the pseudocode the instruction description says:

- Let prod be the single-precision product of src10 and src20

Which I understand as the result of the first multiplication being 
stored in a float32

But in xvbf16ger2* it's different (and I think this is the reason the 
last patch is resulting in the wrong signal in some 0 and inf results), 
the description says:

- Let prod be the product of src10 and src20, having infinite precision 
and unbounded exponent range. - Let psum be the sum of the product, 
src11 multiplied by src21, and prod, having infinite precision and 
unbounded exponent range.
- Let r1 be the value psum with its significand rounded to 24-bit 
precision using the rounding mode specified by RN, but retaining 
unbounded exponent range (i.e., cannot overflow or underflow).

>
>> +                    if (acc) 
>> {                                          \
>> +                        if (neg_mul) 
>> {                                  \
>> +                            psum = 
>> float32_neg(psum);                   \
>> + }                                               \
>> +                        if (neg_acc) 
>> {                                  \
>> +                            aux_acc = 
>> float32_neg(at->VsrSF(j));        \
>> +                        } else 
>> {                                        \
>> +                            aux_acc = 
>> at->VsrSF(j);                     \
>> + }                                               \
>> +                        at->VsrSF(j) = float32_add(psum, 
>> aux_acc,       \
>> + excp_ptr);           \
>
> This one, thankfully, uses the rounded intermediate result 'msum', so 
> is ok.
Yes this one is the easier one to deal with, in the description for the 
xvf16ger2* it specifies that msum and the result is rounded to 
single-precision and in the description for the xvbf16ger2 it specifies 
that r1 is 'rounded to a 24-bit significand precision and 8-bit exponent 
range (i.e., single-precision)'
>
> Please do convert this from a macro.  Given that float16 and bfloat16 
> are addressed the
> same, I think the only callback you need is the conversion from 
> float16_to_float64.  Drop
> the bf16 accessor to ppc_vsr_t.
>
Will do, although I'm considering instead of the callback being the 
conversion, maybe have it be a 4 float multiplication
     typedef float32 mul_4float(float16, float16, float16, float16);
Since float16 and bfloat16 are addressed the same, any thoughts?
>
> r~
Richard Henderson April 27, 2022, 10:30 p.m. UTC | #3
On 4/27/22 14:11, Lucas Mateus Martins Araujo e Castro wrote:
>> Please do convert this from a macro.  Given that float16 and bfloat16 are addressed the
>> same, I think the only callback you need is the conversion from float16_to_float64.  Drop
>> the bf16 accessor to ppc_vsr_t.
>>
> Will do, although I'm considering instead of the callback being the conversion, maybe have 
> it be a 4 float multiplication
>      typedef float32 mul_4float(float16, float16, float16, float16);
> Since float16 and bfloat16 are addressed the same, any thoughts?

The multiplication would be identical for the two types -- only the conversion is different.


r~
diff mbox series

Patch

diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h
index 3dcf20e3a2..63d7ff18f0 100644
--- a/include/fpu/softfloat.h
+++ b/include/fpu/softfloat.h
@@ -619,6 +619,15 @@  static inline float32 float32_chs(float32 a)
     return make_float32(float32_val(a) ^ 0x80000000);
 }
 
+static inline float32 float32_neg(float32 a)
+{
+    if (((a & 0x7f800000) == 0x7f800000) && (a & 0x007fffff)) {
+        return a;
+    } else {
+        return float32_chs(a);
+    }
+}
+
 static inline bool float32_is_infinity(float32 a)
 {
     return (float32_val(a) & 0x7fffffff) == 0x7f800000;
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index b5d7b35dda..91167f8cc0 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -225,6 +225,7 @@  typedef union _ppc_vsr_t {
     int16_t s16[8];
     int32_t s32[4];
     int64_t s64[2];
+    float16 f16[8];
     float32 f32[4];
     float64 f64[2];
     float128 f128;
@@ -2652,6 +2653,7 @@  static inline bool lsw_reg_in_range(int start, int nregs, int rx)
 #define VsrSW(i) s32[i]
 #define VsrD(i) u64[i]
 #define VsrSD(i) s64[i]
+#define VsrHF(i) f16[i]
 #define VsrSF(i) f32[i]
 #define VsrDF(i) f64[i]
 #else
@@ -2663,6 +2665,7 @@  static inline bool lsw_reg_in_range(int start, int nregs, int rx)
 #define VsrSW(i) s32[3 - (i)]
 #define VsrD(i) u64[1 - (i)]
 #define VsrSD(i) s64[1 - (i)]
+#define VsrHF(i) f16[7 - (i)]
 #define VsrSF(i) f32[3 - (i)]
 #define VsrDF(i) f64[1 - (i)]
 #endif
diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c
index 6b03666d09..c3aead642a 100644
--- a/target/ppc/fpu_helper.c
+++ b/target/ppc/fpu_helper.c
@@ -3478,6 +3478,67 @@  static inline bool ger_neg_acc_flag(uint32_t flag)
     return flag & 0x4;
 }
 
+#define float16_to_float32(A, PTR) float16_to_float32(A, true, PTR)
+
+#define GET_VSR(VSR, A, I, SRC_T, TARGET_T)                             \
+    SRC_T##_to_##TARGET_T(A->VSR(I), excp_ptr)
+
+#define VSXGER16(NAME, ORIG_T, OR_EL)                                   \
+    void NAME(CPUPPCState *env, uint32_t a_r, uint32_t b_r,             \
+              uint32_t  at_r, uint32_t mask, uint32_t packed_flags)     \
+    {                                                                   \
+        ppc_vsr_t *at;                                                  \
+        float32 psum, aux_acc, va, vb, vc, vd;                          \
+        int i, j, xmsk_bit, ymsk_bit;                                   \
+        uint8_t xmsk = mask & 0x0F;                                     \
+        uint8_t ymsk = (mask >> 4) & 0x0F;                              \
+        uint8_t pmsk = (mask >> 8) & 0x3;                               \
+        ppc_vsr_t *b = cpu_vsr_ptr(env, b_r);                           \
+        ppc_vsr_t *a = cpu_vsr_ptr(env, a_r);                           \
+        float_status *excp_ptr = &env->fp_status;                       \
+        bool acc = ger_acc_flag(packed_flags);                          \
+        bool neg_acc = ger_neg_acc_flag(packed_flags);                  \
+        bool neg_mul = ger_neg_mul_flag(packed_flags);                  \
+        for (i = 0, xmsk_bit = 1 << 3; i < 4; i++, xmsk_bit >>= 1) {    \
+            at = cpu_vsr_ptr(env, at_r + i);                            \
+            for (j = 0, ymsk_bit = 1 << 3; j < 4; j++, ymsk_bit >>= 1) {\
+                if ((xmsk_bit & xmsk) && (ymsk_bit & ymsk)) {           \
+                    va = !(pmsk & 2) ? float32_zero :                   \
+                                       GET_VSR(Vsr##OR_EL, a,           \
+                                               2 * i, ORIG_T, float32); \
+                    vb = !(pmsk & 2) ? float32_zero :                   \
+                                       GET_VSR(Vsr##OR_EL, b,           \
+                                               2 * j, ORIG_T, float32); \
+                    vc = !(pmsk & 1) ? float32_zero :                   \
+                                       GET_VSR(Vsr##OR_EL, a,           \
+                                            2 * i + 1, ORIG_T, float32);\
+                    vd = !(pmsk & 1) ? float32_zero :                   \
+                                       GET_VSR(Vsr##OR_EL, b,           \
+                                            2 * j + 1, ORIG_T, float32);\
+                    psum = float32_mul(va, vb, excp_ptr);               \
+                    psum = float32_muladd(vc, vd, psum, 0, excp_ptr);   \
+                    if (acc) {                                          \
+                        if (neg_mul) {                                  \
+                            psum = float32_neg(psum);                   \
+                        }                                               \
+                        if (neg_acc) {                                  \
+                            aux_acc = float32_neg(at->VsrSF(j));        \
+                        } else {                                        \
+                            aux_acc = at->VsrSF(j);                     \
+                        }                                               \
+                        at->VsrSF(j) = float32_add(psum, aux_acc,       \
+                                                   excp_ptr);           \
+                    } else {                                            \
+                        at->VsrSF(j) = psum;                            \
+                    }                                                   \
+                } else {                                                \
+                    at->VsrSF(j) = 0;                                   \
+                }                                                       \
+            }                                                           \
+        }                                                               \
+        do_float_check_status(env, GETPC());                            \
+    }
+
 #define VSXGER(NAME, TYPE, EL)                                          \
     void NAME(CPUPPCState *env, uint32_t a_r, uint32_t b_r,             \
               uint32_t  at_r, uint32_t mask, uint32_t packed_flags)     \
@@ -3522,7 +3583,11 @@  static inline bool ger_neg_acc_flag(uint32_t flag)
         do_float_check_status(env, GETPC());                            \
     }
 
+VSXGER16(helper_XVF16GER2, float16, HF)
 VSXGER(helper_XVF32GER, float32, SF)
 VSXGER(helper_XVF64GER, float64, DF)
 
+#undef VSXGER16
 #undef VSXGER
+#undef GET_VSR
+#undef float16_to_float32
diff --git a/target/ppc/helper.h b/target/ppc/helper.h
index 7d725292b1..cc59a3b71d 100644
--- a/target/ppc/helper.h
+++ b/target/ppc/helper.h
@@ -540,6 +540,7 @@  DEF_HELPER_5(XXBLENDVD, void, vsr, vsr, vsr, vsr, i32)
 DEF_HELPER_6(XVI4GER8, void, env, i32, i32, i32, i32, i32)
 DEF_HELPER_6(XVI8GER4, void, env, i32, i32, i32, i32, i32)
 DEF_HELPER_6(XVI16GER2, void, env, i32, i32, i32, i32, i32)
+DEF_HELPER_6(XVF16GER2, void, env, i32, i32, i32, i32, i32)
 DEF_HELPER_6(XVF32GER, void, env, i32, i32, i32, i32, i32)
 DEF_HELPER_6(XVF64GER, void, env, i32, i32, i32, i32, i32)
 
diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
index 9652ca286c..a204730d1d 100644
--- a/target/ppc/insn32.decode
+++ b/target/ppc/insn32.decode
@@ -736,6 +736,12 @@  XVI8GER4SPP     111011 ... -- ..... ..... 01100011 ..-  @XX3_at xa=%xx_xa
 XVI16GER2S      111011 ... -- ..... ..... 00101011 ..-  @XX3_at xa=%xx_xa
 XVI16GER2SPP    111011 ... -- ..... ..... 00101010 ..-  @XX3_at xa=%xx_xa
 
+XVF16GER2       111011 ... -- ..... ..... 00010011 ..-  @XX3_at xa=%xx_xa
+XVF16GER2PP     111011 ... -- ..... ..... 00010010 ..-  @XX3_at xa=%xx_xa
+XVF16GER2PN     111011 ... -- ..... ..... 10010010 ..-  @XX3_at xa=%xx_xa
+XVF16GER2NP     111011 ... -- ..... ..... 01010010 ..-  @XX3_at xa=%xx_xa
+XVF16GER2NN     111011 ... -- ..... ..... 11010010 ..-  @XX3_at xa=%xx_xa
+
 XVF32GER        111011 ... -- ..... ..... 00011011 ..-  @XX3_at xa=%xx_xa
 XVF32GERPP      111011 ... -- ..... ..... 00011010 ..-  @XX3_at xa=%xx_xa
 XVF32GERPN      111011 ... -- ..... ..... 10011010 ..-  @XX3_at xa=%xx_xa
diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc
index b1fb0f31f3..9285e27159 100644
--- a/target/ppc/translate/vsx-impl.c.inc
+++ b/target/ppc/translate/vsx-impl.c.inc
@@ -2917,6 +2917,12 @@  TRANS64(PMXVI16GER2SPP, do_ger_MMIRR_XX3, GER_SPP, gen_helper_XVI16GER2)
 #define GER_PN ger_pack_flags_xvf(true, false, true)
 #define GER_NN ger_pack_flags_xvf(true, true, true)
 
+TRANS(XVF16GER2, do_ger_XX3, GER_NOP, gen_helper_XVF16GER2)
+TRANS(XVF16GER2PP, do_ger_XX3, GER_PP, gen_helper_XVF16GER2)
+TRANS(XVF16GER2PN, do_ger_XX3, GER_PN, gen_helper_XVF16GER2)
+TRANS(XVF16GER2NP, do_ger_XX3, GER_NP, gen_helper_XVF16GER2)
+TRANS(XVF16GER2NN, do_ger_XX3, GER_NN, gen_helper_XVF16GER2)
+
 TRANS(XVF32GER, do_ger_XX3, GER_NOP, gen_helper_XVF32GER)
 TRANS(XVF32GERPP, do_ger_XX3, GER_PP, gen_helper_XVF32GER)
 TRANS(XVF32GERPN, do_ger_XX3, GER_PN, gen_helper_XVF32GER)