@@ -1983,3 +1983,16 @@ DEF_HELPER_6(th_vssra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(th_vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(th_vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(th_vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(th_vnclip_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclipu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vnclip_vx_w, void, ptr, ptr, tl, ptr, env, i32)
@@ -1738,18 +1738,20 @@ GEN_OPIVX_TRANS_TH(th_vssra_vx, opivx_check_th)
GEN_OPIVI_TRANS_TH(th_vssrl_vi, IMM_TRUNC_SEW, th_vssrl_vx, opivx_check_th)
GEN_OPIVI_TRANS_TH(th_vssra_vi, IMM_TRUNC_SEW, th_vssra_vx, opivx_check_th)
+/* Vector Narrowing Fixed-Point Clip Instructions */
+GEN_OPIVV_NARROW_TRANS_TH(th_vnclipu_vv)
+GEN_OPIVV_NARROW_TRANS_TH(th_vnclip_vv)
+GEN_OPIVX_NARROW_TRANS_TH(th_vnclipu_vx)
+GEN_OPIVX_NARROW_TRANS_TH(th_vnclip_vx)
+GEN_OPIVI_NARROW_TRANS_TH(th_vnclipu_vi, IMM_ZX, th_vnclipu_vx)
+GEN_OPIVI_NARROW_TRANS_TH(th_vnclip_vi, IMM_ZX, th_vnclip_vx)
+
#define TH_TRANS_STUB(NAME) \
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
{ \
return require_xtheadvector(s); \
}
-TH_TRANS_STUB(th_vnclipu_vv)
-TH_TRANS_STUB(th_vnclipu_vx)
-TH_TRANS_STUB(th_vnclipu_vi)
-TH_TRANS_STUB(th_vnclip_vv)
-TH_TRANS_STUB(th_vnclip_vx)
-TH_TRANS_STUB(th_vnclip_vi)
TH_TRANS_STUB(th_vfadd_vv)
TH_TRANS_STUB(th_vfadd_vf)
TH_TRANS_STUB(th_vfsub_vv)
@@ -646,14 +646,6 @@ GEN_VEXT_ST_WHOLE(vs8r_v, int8_t, ste_b)
* Vector Integer Arithmetic Instructions
*/
-/* (TD, T1, T2, TX1, TX2) */
-#define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t
-#define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t
-#define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t
-#define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t
-#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
-#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
-
#define DO_SUB(N, M) (N - M)
#define DO_RSUB(N, M) (M - N)
@@ -2677,8 +2669,7 @@ GEN_VEXT_VX_RM(vssra_vx_w, 4)
GEN_VEXT_VX_RM(vssra_vx_d, 8)
/* Vector Narrowing Fixed-Point Clip Instructions */
-static inline int8_t
-vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b)
+int8_t vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b)
{
uint8_t round, shift = b & 0xf;
int16_t res;
@@ -2696,8 +2687,7 @@ vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b)
}
}
-static inline int16_t
-vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b)
+int16_t vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b)
{
uint8_t round, shift = b & 0x1f;
int32_t res;
@@ -2715,8 +2705,7 @@ vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b)
}
}
-static inline int32_t
-vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
+int32_t vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b)
{
uint8_t round, shift = b & 0x3f;
int64_t res;
@@ -2748,8 +2737,7 @@ GEN_VEXT_VX_RM(vnclip_wx_b, 1)
GEN_VEXT_VX_RM(vnclip_wx_h, 2)
GEN_VEXT_VX_RM(vnclip_wx_w, 4)
-static inline uint8_t
-vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
+uint8_t vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
{
uint8_t round, shift = b & 0xf;
uint16_t res;
@@ -2764,8 +2752,7 @@ vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b)
}
}
-static inline uint16_t
-vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b)
+uint16_t vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b)
{
uint8_t round, shift = b & 0x1f;
uint32_t res;
@@ -2780,8 +2767,7 @@ vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b)
}
}
-static inline uint32_t
-vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
+uint32_t vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b)
{
uint8_t round, shift = b & 0x3f;
uint64_t res;
@@ -255,6 +255,12 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
#define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t
#define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t
#define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t
+#define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t
+#define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t
+#define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t
+#define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t
+#define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
+#define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
/* share functions */
static inline target_ulong adjust_addr(CPURISCVState *env, target_ulong addr)
@@ -326,4 +332,12 @@ int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b);
int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b);
int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b);
+int8_t vnclip8(CPURISCVState *env, int vxrm, int16_t a, int8_t b);
+int16_t vnclip16(CPURISCVState *env, int vxrm, int32_t a, int16_t b);
+int32_t vnclip32(CPURISCVState *env, int vxrm, int64_t a, int32_t b);
+
+uint8_t vnclipu8(CPURISCVState *env, int vxrm, uint16_t a, uint8_t b);
+uint16_t vnclipu16(CPURISCVState *env, int vxrm, uint32_t a, uint16_t b);
+uint32_t vnclipu32(CPURISCVState *env, int vxrm, uint64_t a, uint32_t b);
+
#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
@@ -2561,3 +2561,32 @@ GEN_TH_VX_RM(th_vssra_vx_b, 1, 1, clearb_th)
GEN_TH_VX_RM(th_vssra_vx_h, 2, 2, clearh_th)
GEN_TH_VX_RM(th_vssra_vx_w, 4, 4, clearl_th)
GEN_TH_VX_RM(th_vssra_vx_d, 8, 8, clearq_th)
+
+/* Vector Narrowing Fixed-Point Clip Instructions */
+THCALL(TH_OPIVV2_RM, th_vnclip_vv_b, NOP_SSS_B, H1, H2, H1, vnclip8)
+THCALL(TH_OPIVV2_RM, th_vnclip_vv_h, NOP_SSS_H, H2, H4, H2, vnclip16)
+THCALL(TH_OPIVV2_RM, th_vnclip_vv_w, NOP_SSS_W, H4, H8, H4, vnclip32)
+GEN_TH_VV_RM(th_vnclip_vv_b, 1, 1, clearb_th)
+GEN_TH_VV_RM(th_vnclip_vv_h, 2, 2, clearh_th)
+GEN_TH_VV_RM(th_vnclip_vv_w, 4, 4, clearl_th)
+
+THCALL(TH_OPIVX2_RM, th_vnclip_vx_b, NOP_SSS_B, H1, H2, vnclip8)
+THCALL(TH_OPIVX2_RM, th_vnclip_vx_h, NOP_SSS_H, H2, H4, vnclip16)
+THCALL(TH_OPIVX2_RM, th_vnclip_vx_w, NOP_SSS_W, H4, H8, vnclip32)
+GEN_TH_VX_RM(th_vnclip_vx_b, 1, 1, clearb_th)
+GEN_TH_VX_RM(th_vnclip_vx_h, 2, 2, clearh_th)
+GEN_TH_VX_RM(th_vnclip_vx_w, 4, 4, clearl_th)
+
+THCALL(TH_OPIVV2_RM, th_vnclipu_vv_b, NOP_UUU_B, H1, H2, H1, vnclipu8)
+THCALL(TH_OPIVV2_RM, th_vnclipu_vv_h, NOP_UUU_H, H2, H4, H2, vnclipu16)
+THCALL(TH_OPIVV2_RM, th_vnclipu_vv_w, NOP_UUU_W, H4, H8, H4, vnclipu32)
+GEN_TH_VV_RM(th_vnclipu_vv_b, 1, 1, clearb_th)
+GEN_TH_VV_RM(th_vnclipu_vv_h, 2, 2, clearh_th)
+GEN_TH_VV_RM(th_vnclipu_vv_w, 4, 4, clearl_th)
+
+THCALL(TH_OPIVX2_RM, th_vnclipu_vx_b, NOP_UUU_B, H1, H2, vnclipu8)
+THCALL(TH_OPIVX2_RM, th_vnclipu_vx_h, NOP_UUU_H, H2, H4, vnclipu16)
+THCALL(TH_OPIVX2_RM, th_vnclipu_vx_w, NOP_UUU_W, H4, H8, vnclipu32)
+GEN_TH_VX_RM(th_vnclipu_vx_b, 1, 1, clearb_th)
+GEN_TH_VX_RM(th_vnclipu_vx_h, 2, 2, clearh_th)
+GEN_TH_VX_RM(th_vnclipu_vx_w, 4, 4, clearl_th)
The instructions have the same function as RVV1.0. Overall there are only general differences between XTheadVector and RVV1.0. Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com> --- target/riscv/helper.h | 13 +++++++++ .../riscv/insn_trans/trans_xtheadvector.c.inc | 14 +++++---- target/riscv/vector_helper.c | 26 ++++------------- target/riscv/vector_internals.h | 14 +++++++++ target/riscv/xtheadvector_helper.c | 29 +++++++++++++++++++ 5 files changed, 70 insertions(+), 26 deletions(-)