@@ -1966,3 +1966,20 @@ DEF_HELPER_6(th_vwsmaccsu_vx_w, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(th_vwsmaccus_vx_b, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(th_vwsmaccus_vx_h, void, ptr, ptr, tl, ptr, env, i32)
DEF_HELPER_6(th_vwsmaccus_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+
+DEF_HELPER_6(th_vssrl_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vssrl_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vssrl_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vssrl_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vssra_vv_b, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vssra_vv_h, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vssra_vv_w, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vssra_vv_d, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(th_vssrl_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vssrl_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vssrl_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vssrl_vx_d, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vssra_vx_b, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vssra_vx_h, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vssra_vx_w, void, ptr, ptr, tl, ptr, env, i32)
+DEF_HELPER_6(th_vssra_vx_d, void, ptr, ptr, tl, ptr, env, i32)
@@ -1730,18 +1730,20 @@ GEN_OPIVX_WIDEN_TRANS_TH(th_vwsmacc_vx, opivx_widen_check_th)
GEN_OPIVX_WIDEN_TRANS_TH(th_vwsmaccsu_vx, opivx_widen_check_th)
GEN_OPIVX_WIDEN_TRANS_TH(th_vwsmaccus_vx, opivx_widen_check_th)
+/* Vector Single-Width Scaling Shift Instructions */
+GEN_OPIVV_TRANS_TH(th_vssrl_vv, opivv_check_th)
+GEN_OPIVV_TRANS_TH(th_vssra_vv, opivv_check_th)
+GEN_OPIVX_TRANS_TH(th_vssrl_vx, opivx_check_th)
+GEN_OPIVX_TRANS_TH(th_vssra_vx, opivx_check_th)
+GEN_OPIVI_TRANS_TH(th_vssrl_vi, IMM_TRUNC_SEW, th_vssrl_vx, opivx_check_th)
+GEN_OPIVI_TRANS_TH(th_vssra_vi, IMM_TRUNC_SEW, th_vssra_vx, opivx_check_th)
+
#define TH_TRANS_STUB(NAME) \
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
{ \
return require_xtheadvector(s); \
}
-TH_TRANS_STUB(th_vssrl_vv)
-TH_TRANS_STUB(th_vssrl_vx)
-TH_TRANS_STUB(th_vssrl_vi)
-TH_TRANS_STUB(th_vssra_vv)
-TH_TRANS_STUB(th_vssra_vx)
-TH_TRANS_STUB(th_vssra_vi)
TH_TRANS_STUB(th_vnclipu_vv)
TH_TRANS_STUB(th_vnclipu_vx)
TH_TRANS_STUB(th_vnclipu_vi)
@@ -2581,8 +2581,7 @@ GEN_VEXT_VX_RM(vsmul_vx_w, 4)
GEN_VEXT_VX_RM(vsmul_vx_d, 8)
/* Vector Single-Width Scaling Shift Instructions */
-static inline uint8_t
-vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
+uint8_t vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
{
uint8_t round, shift = b & 0x7;
uint8_t res;
@@ -2591,24 +2590,21 @@ vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b)
res = (a >> shift) + round;
return res;
}
-static inline uint16_t
-vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
+uint16_t vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b)
{
uint8_t round, shift = b & 0xf;
round = get_round(vxrm, a, shift);
return (a >> shift) + round;
}
-static inline uint32_t
-vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
+uint32_t vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b)
{
uint8_t round, shift = b & 0x1f;
round = get_round(vxrm, a, shift);
return (a >> shift) + round;
}
-static inline uint64_t
-vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
+uint64_t vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b)
{
uint8_t round, shift = b & 0x3f;
@@ -2633,32 +2629,28 @@ GEN_VEXT_VX_RM(vssrl_vx_h, 2)
GEN_VEXT_VX_RM(vssrl_vx_w, 4)
GEN_VEXT_VX_RM(vssrl_vx_d, 8)
-static inline int8_t
-vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
+int8_t vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b)
{
uint8_t round, shift = b & 0x7;
round = get_round(vxrm, a, shift);
return (a >> shift) + round;
}
-static inline int16_t
-vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
+int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b)
{
uint8_t round, shift = b & 0xf;
round = get_round(vxrm, a, shift);
return (a >> shift) + round;
}
-static inline int32_t
-vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
{
uint8_t round, shift = b & 0x1f;
round = get_round(vxrm, a, shift);
return (a >> shift) + round;
}
-static inline int64_t
-vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
{
uint8_t round, shift = b & 0x3f;
@@ -316,4 +316,14 @@ int64_t vsmul64(CPURISCVState *env, int vxrm, int64_t a, int64_t b);
uint8_t get_round(int vxrm, uint64_t v, uint8_t shift);
+uint8_t vssrl8(CPURISCVState *env, int vxrm, uint8_t a, uint8_t b);
+uint16_t vssrl16(CPURISCVState *env, int vxrm, uint16_t a, uint16_t b);
+uint32_t vssrl32(CPURISCVState *env, int vxrm, uint32_t a, uint32_t b);
+uint64_t vssrl64(CPURISCVState *env, int vxrm, uint64_t a, uint64_t b);
+
+int8_t vssra8(CPURISCVState *env, int vxrm, int8_t a, int8_t b);
+int16_t vssra16(CPURISCVState *env, int vxrm, int16_t a, int16_t b);
+int32_t vssra32(CPURISCVState *env, int vxrm, int32_t a, int32_t b);
+int64_t vssra64(CPURISCVState *env, int vxrm, int64_t a, int64_t b);
+
#endif /* TARGET_RISCV_VECTOR_INTERNALS_H */
@@ -2523,3 +2523,41 @@ THCALL(TH_OPIVX3_RM, th_vwsmaccus_vx_w, WOP_SUS_W, H8, H4, vwsmaccus32)
GEN_TH_VX_RM(th_vwsmaccus_vx_b, 1, 2, clearh_th)
GEN_TH_VX_RM(th_vwsmaccus_vx_h, 2, 4, clearl_th)
GEN_TH_VX_RM(th_vwsmaccus_vx_w, 4, 8, clearq_th)
+
+/* Vector Single-Width Scaling Shift Instructions */
+
+THCALL(TH_OPIVV2_RM, th_vssrl_vv_b, OP_UUU_B, H1, H1, H1, vssrl8)
+THCALL(TH_OPIVV2_RM, th_vssrl_vv_h, OP_UUU_H, H2, H2, H2, vssrl16)
+THCALL(TH_OPIVV2_RM, th_vssrl_vv_w, OP_UUU_W, H4, H4, H4, vssrl32)
+THCALL(TH_OPIVV2_RM, th_vssrl_vv_d, OP_UUU_D, H8, H8, H8, vssrl64)
+GEN_TH_VV_RM(th_vssrl_vv_b, 1, 1, clearb_th)
+GEN_TH_VV_RM(th_vssrl_vv_h, 2, 2, clearh_th)
+GEN_TH_VV_RM(th_vssrl_vv_w, 4, 4, clearl_th)
+GEN_TH_VV_RM(th_vssrl_vv_d, 8, 8, clearq_th)
+
+THCALL(TH_OPIVX2_RM, th_vssrl_vx_b, OP_UUU_B, H1, H1, vssrl8)
+THCALL(TH_OPIVX2_RM, th_vssrl_vx_h, OP_UUU_H, H2, H2, vssrl16)
+THCALL(TH_OPIVX2_RM, th_vssrl_vx_w, OP_UUU_W, H4, H4, vssrl32)
+THCALL(TH_OPIVX2_RM, th_vssrl_vx_d, OP_UUU_D, H8, H8, vssrl64)
+GEN_TH_VX_RM(th_vssrl_vx_b, 1, 1, clearb_th)
+GEN_TH_VX_RM(th_vssrl_vx_h, 2, 2, clearh_th)
+GEN_TH_VX_RM(th_vssrl_vx_w, 4, 4, clearl_th)
+GEN_TH_VX_RM(th_vssrl_vx_d, 8, 8, clearq_th)
+
+THCALL(TH_OPIVV2_RM, th_vssra_vv_b, OP_SSS_B, H1, H1, H1, vssra8)
+THCALL(TH_OPIVV2_RM, th_vssra_vv_h, OP_SSS_H, H2, H2, H2, vssra16)
+THCALL(TH_OPIVV2_RM, th_vssra_vv_w, OP_SSS_W, H4, H4, H4, vssra32)
+THCALL(TH_OPIVV2_RM, th_vssra_vv_d, OP_SSS_D, H8, H8, H8, vssra64)
+GEN_TH_VV_RM(th_vssra_vv_b, 1, 1, clearb_th)
+GEN_TH_VV_RM(th_vssra_vv_h, 2, 2, clearh_th)
+GEN_TH_VV_RM(th_vssra_vv_w, 4, 4, clearl_th)
+GEN_TH_VV_RM(th_vssra_vv_d, 8, 8, clearq_th)
+
+THCALL(TH_OPIVX2_RM, th_vssra_vx_b, OP_SSS_B, H1, H1, vssra8)
+THCALL(TH_OPIVX2_RM, th_vssra_vx_h, OP_SSS_H, H2, H2, vssra16)
+THCALL(TH_OPIVX2_RM, th_vssra_vx_w, OP_SSS_W, H4, H4, vssra32)
+THCALL(TH_OPIVX2_RM, th_vssra_vx_d, OP_SSS_D, H8, H8, vssra64)
+GEN_TH_VX_RM(th_vssra_vx_b, 1, 1, clearb_th)
+GEN_TH_VX_RM(th_vssra_vx_h, 2, 2, clearh_th)
+GEN_TH_VX_RM(th_vssra_vx_w, 4, 4, clearl_th)
+GEN_TH_VX_RM(th_vssra_vx_d, 8, 8, clearq_th)
The instructions have the same function as RVV1.0. Overall there are only general differences between XTheadVector and RVV1.0. Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com> --- target/riscv/helper.h | 17 +++++++++ .../riscv/insn_trans/trans_xtheadvector.c.inc | 14 ++++--- target/riscv/vector_helper.c | 24 ++++-------- target/riscv/vector_internals.h | 10 +++++ target/riscv/xtheadvector_helper.c | 38 +++++++++++++++++++ 5 files changed, 81 insertions(+), 22 deletions(-)