different from both the signed-to-signed and unsigned-to-unsigned
conversions already implemented, so we need a new set of helper
functions (neon_unarrow_sat*).
Signed-off-by: Juha Riihimäki <juha.riihimaki@nokia.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
---
target-arm/helpers.h | 3 ++
target-arm/neon_helper.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++
target-arm/translate.c | 28 ++++++++++++++++----
3 files changed, 88 insertions(+), 6 deletions(-)
@@ -299,10 +299,13 @@ DEF_HELPER_3(neon_qrdmulh_s32, i32, env, i32, i32)
DEF_HELPER_1(neon_narrow_u8, i32, i64)
DEF_HELPER_1(neon_narrow_u16, i32, i64)
+DEF_HELPER_2(neon_unarrow_sat8, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_u8, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_s8, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat16, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_u16, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_s16, i32, env, i64)
+DEF_HELPER_2(neon_unarrow_sat32, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_u32, i32, env, i64)
DEF_HELPER_2(neon_narrow_sat_s32, i32, env, i64)
DEF_HELPER_1(neon_narrow_high_u8, i32, i64)
@@ -1053,6 +1053,33 @@ uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
}
+uint32_t HELPER(neon_unarrow_sat8)(CPUState *env, uint64_t x)
+{
+ uint16_t s;
+ uint8_t d;
+ uint32_t res = 0;
+#define SAT8(n) \
+ s = x >> n; \
+ if (s & 0x8000) { \
+ SET_QC(); \
+ } else { \
+ if (s > 0xff) { \
+ d = 0xff; \
+ SET_QC(); \
+ } else { \
+ d = s; \
+ } \
+ res |= (uint32_t)d << (n / 2); \
+ }
+
+ SAT8(0);
+ SAT8(16);
+ SAT8(32);
+ SAT8(48);
+#undef SAT8
+ return res;
+}
+
uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
{
uint16_t s;
@@ -1099,6 +1126,29 @@ uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
return res;
}
+uint32_t HELPER(neon_unarrow_sat16)(CPUState *env, uint64_t x)
+{
+ uint32_t high;
+ uint32_t low;
+ low = x;
+ if (low & 0x80000000) {
+ low = 0;
+ SET_QC();
+ } else if (low > 0xffff) {
+ low = 0xffff;
+ SET_QC();
+ }
+ high = x >> 32;
+ if (high & 0x80000000) {
+ high = 0;
+ SET_QC();
+ } else if (high > 0xffff) {
+ high = 0xffff;
+ SET_QC();
+ }
+ return low | (high << 16);
+}
+
uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
{
uint32_t high;
@@ -1133,6 +1183,19 @@ uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
return (uint16_t)low | (high << 16);
}
+uint32_t HELPER(neon_unarrow_sat32)(CPUState *env, uint64_t x)
+{
+ if (x & 0x8000000000000000ull) {
+ SET_QC();
+ return 0;
+ }
+ if (x > 0xffffffffu) {
+ SET_QC();
+ return 0xffffffffu;
+ }
+ return x;
+}
+
uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
{
if (x > 0xffffffffu) {
@@ -4065,6 +4065,16 @@ static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
}
}
+static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
+{
+ switch (size) {
+ case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
+ default: abort();
+ }
+}
+
static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
int q, int u)
{
@@ -5461,12 +5471,18 @@ static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
for (pass = 0; pass < 2; pass++) {
neon_load_reg64(cpu_V0, rm + pass);
tmp = new_tmp();
- if (op == 36 && q == 0) {
- gen_neon_narrow(size, tmp, cpu_V0);
- } else if (q) {
- gen_neon_narrow_satu(size, tmp, cpu_V0);
- } else {
- gen_neon_narrow_sats(size, tmp, cpu_V0);
+ if (op == 36) {
+ if (q) { /* VQMOVUN */
+ gen_neon_unarrow_sats(size, tmp, cpu_V0);
+ } else { /* VMOVN */
+ gen_neon_narrow(size, tmp, cpu_V0);
+ }
+ } else { /* VQMOVN */
+ if (q) {
+ gen_neon_narrow_satu(size, tmp, cpu_V0);
+ } else {
+ gen_neon_narrow_sats(size, tmp, cpu_V0);
+ }
}
if (pass == 0) {
tmp2 = tmp;
From: Juha Riihimäki <juha.riihimaki@nokia.com> VQMOVUN does a signed-to-unsigned saturating conversion. This is