@@ -1088,6 +1088,7 @@ (define_expand "vec_extract<mode><vel>"
{
/* Emit the slide down to index 0 in a new vector. */
tmp = gen_reg_rtx (<MODE>mode);
+ operands[2] = gen_lowpart (Pmode, operands[2]);
rtx ops[] = {tmp, RVV_VUNDEF (<MODE>mode), operands[1], operands[2]};
riscv_vector::emit_vlmax_slide_insn
(code_for_pred_slide (UNSPEC_VSLIDEDOWN, <MODE>mode), ops);
@@ -11,7 +11,6 @@ typedef _Float16 vnx8hf __attribute__((vector_size (16)));
typedef float vnx4sf __attribute__((vector_size (16)));
typedef double vnx2df __attribute__((vector_size (16)));
-
#define VEC_EXTRACT(S,V,IDX) \
S \
__attribute__((noipa)) \
@@ -20,6 +19,14 @@ typedef double vnx2df __attribute__((vector_size (16)));
return v[IDX]; \
}
+#define VEC_EXTRACT_VAR1(S,V) \
+ S \
+ __attribute__((noipa)) \
+ vec_extract_var_##V (V v, int8_t idx) \
+ { \
+ return v[idx]; \
+ }
+
#define TEST_ALL1(T) \
T (_Float16, vnx8hf, 0) \
T (_Float16, vnx8hf, 3) \
@@ -43,17 +50,27 @@ typedef double vnx2df __attribute__((vector_size (16)));
T (int8_t, vnx16qi, 11) \
T (int8_t, vnx16qi, 15) \
+#define TEST_ALL_VAR1(T) \
+ T (_Float16, vnx8hf) \
+ T (float, vnx4sf) \
+ T (double, vnx2df) \
+ T (int64_t, vnx2di) \
+ T (int32_t, vnx4si) \
+ T (int16_t, vnx8hi) \
+ T (int8_t, vnx16qi) \
+
TEST_ALL1 (VEC_EXTRACT)
+TEST_ALL_VAR1 (VEC_EXTRACT_VAR1)
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m1,\s*ta,\s*ma} 5 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m1,\s*ta,\s*ma} 6 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m1,\s*ta,\s*ma} 6 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m1,\s*ta,\s*ma} 4 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m1,\s*ta,\s*ma} 6 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m1,\s*ta,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m1,\s*ta,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m1,\s*ta,\s*ma} 6 } } */
/* { dg-final { scan-assembler-times {\tvslidedown.vi} 14 } } */
-/* { dg-final { scan-assembler-times {\tvslidedown.vx} 0 } } */
+/* { dg-final { scan-assembler-times {\tvslidedown.vx} 7 } } */
-/* { dg-final { scan-assembler-times {\tvfmv.f.s} 8 } } */
-/* { dg-final { scan-assembler-times {\tvmv.x.s} 13 } } */
+/* { dg-final { scan-assembler-times {\tvfmv.f.s} 11 } } */
+/* { dg-final { scan-assembler-times {\tvmv.x.s} 17 } } */
/* { dg-final { scan-assembler-not {\tsext} } } */
@@ -19,6 +19,14 @@ typedef double vnx4df __attribute__((vector_size (32)));
return v[IDX]; \
}
+#define VEC_EXTRACT_VAR2(S,V) \
+ S \
+ __attribute__((noipa)) \
+ vec_extract_var_##V (V v, int16_t idx) \
+ { \
+ return v[idx]; \
+ }
+
#define TEST_ALL2(T) \
T (_Float16, vnx16hf, 0) \
T (_Float16, vnx16hf, 3) \
@@ -54,17 +62,27 @@ typedef double vnx4df __attribute__((vector_size (32)));
T (int8_t, vnx32qi, 16) \
T (int8_t, vnx32qi, 31) \
+#define TEST_ALL_VAR2(T) \
+ T (_Float16, vnx16hf) \
+ T (float, vnx8sf) \
+ T (double, vnx4df) \
+ T (int64_t, vnx4di) \
+ T (int32_t, vnx8si) \
+ T (int16_t, vnx16hi) \
+ T (int8_t, vnx32qi) \
+
TEST_ALL2 (VEC_EXTRACT)
+TEST_ALL_VAR2 (VEC_EXTRACT_VAR2)
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m2,\s*ta,\s*ma} 5 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m2,\s*ta,\s*ma} 10 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m2,\s*ta,\s*ma} 10 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m2,\s*ta,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m2,\s*ta,\s*ma} 6 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m2,\s*ta,\s*ma} 12 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m2,\s*ta,\s*ma} 12 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m2,\s*ta,\s*ma} 10 } } */
/* { dg-final { scan-assembler-times {\tvslidedown.vi} 26 } } */
-/* { dg-final { scan-assembler-times {\tvslidedown.vx} 0 } } */
+/* { dg-final { scan-assembler-times {\tvslidedown.vx} 7 } } */
-/* { dg-final { scan-assembler-times {\tvfmv.f.s} 14 } } */
-/* { dg-final { scan-assembler-times {\tvmv.x.s} 19 } } */
+/* { dg-final { scan-assembler-times {\tvfmv.f.s} 17 } } */
+/* { dg-final { scan-assembler-times {\tvmv.x.s} 23 } } */
/* { dg-final { scan-assembler-not {\tsext} } } */
@@ -19,6 +19,14 @@ typedef double vnx8df __attribute__((vector_size (64)));
return v[IDX]; \
}
+#define VEC_EXTRACT_VAR3(S,V) \
+ S \
+ __attribute__((noipa)) \
+ vec_extract_var_##V (V v, int32_t idx) \
+ { \
+ return v[idx]; \
+ }
+
#define TEST_ALL3(T) \
T (_Float16, vnx32hf, 0) \
T (_Float16, vnx32hf, 3) \
@@ -55,17 +63,27 @@ typedef double vnx8df __attribute__((vector_size (64)));
T (int8_t, vnx64qi, 32) \
T (int8_t, vnx64qi, 63) \
+#define TEST_ALL_VAR3(T) \
+ T (_Float16, vnx32hf) \
+ T (float, vnx16sf) \
+ T (double, vnx8df) \
+ T (int64_t, vnx8di) \
+ T (int32_t, vnx16si) \
+ T (int16_t, vnx32hi) \
+ T (int8_t, vnx64qi) \
+
TEST_ALL3 (VEC_EXTRACT)
+TEST_ALL_VAR3 (VEC_EXTRACT_VAR3)
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m4,\s*ta,\s*ma} 5 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m4,\s*ta,\s*ma} 11 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m4,\s*ta,\s*ma} 10 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m4,\s*ta,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m4,\s*ta,\s*ma} 6 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m4,\s*ta,\s*ma} 13 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m4,\s*ta,\s*ma} 12 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m4,\s*ta,\s*ma} 10 } } */
/* { dg-final { scan-assembler-times {\tvslidedown.vi} 25 } } */
-/* { dg-final { scan-assembler-times {\tvslidedown.vx} 2 } } */
+/* { dg-final { scan-assembler-times {\tvslidedown.vx} 9 } } */
-/* { dg-final { scan-assembler-times {\tvfmv.f.s} 15 } } */
-/* { dg-final { scan-assembler-times {\tvmv.x.s} 19 } } */
+/* { dg-final { scan-assembler-times {\tvfmv.f.s} 18 } } */
+/* { dg-final { scan-assembler-times {\tvmv.x.s} 23 } } */
/* { dg-final { scan-assembler-not {\tsext} } } */
@@ -19,6 +19,14 @@ typedef double vnx16df __attribute__((vector_size (128)));
return v[IDX]; \
}
+#define VEC_EXTRACT_VAR4(S,V) \
+ S \
+ __attribute__((noipa)) \
+ vec_extract_var_##V (V v, int64_t idx) \
+ { \
+ return v[idx]; \
+ }
+
#define TEST_ALL4(T) \
T (_Float16, vnx64hf, 0) \
T (_Float16, vnx64hf, 3) \
@@ -58,17 +66,27 @@ typedef double vnx16df __attribute__((vector_size (128)));
T (int8_t, vnx128qi, 64) \
T (int8_t, vnx128qi, 127) \
+#define TEST_ALL_VAR4(T) \
+ T (_Float16, vnx64hf) \
+ T (float, vnx32sf) \
+ T (double, vnx16df) \
+ T (int64_t, vnx16di) \
+ T (int32_t, vnx32si) \
+ T (int16_t, vnx64hi) \
+ T (int8_t, vnx128qi) \
+
TEST_ALL4 (VEC_EXTRACT)
+TEST_ALL_VAR4 (VEC_EXTRACT_VAR4)
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m8,\s*ta,\s*ma} 6 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m8,\s*ta,\s*ma} 13 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m8,\s*ta,\s*ma} 10 } } */
-/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m8,\s*ta,\s*ma} 8 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e8,\s*m8,\s*ta,\s*ma} 7 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e16,\s*m8,\s*ta,\s*ma} 15 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e32,\s*m8,\s*ta,\s*ma} 12 } } */
+/* { dg-final { scan-assembler-times {vset[i]*vli\s+[a-z0-9,]+,\s*e64,\s*m8,\s*ta,\s*ma} 10 } } */
/* { dg-final { scan-assembler-times {\tvslidedown.vi} 23 } } */
-/* { dg-final { scan-assembler-times {\tvslidedown.vx} 7 } } */
+/* { dg-final { scan-assembler-times {\tvslidedown.vx} 14 } } */
-/* { dg-final { scan-assembler-times {\tvfmv.f.s} 17 } } */
-/* { dg-final { scan-assembler-times {\tvmv.x.s} 20 } } */
+/* { dg-final { scan-assembler-times {\tvfmv.f.s} 20 } } */
+/* { dg-final { scan-assembler-times {\tvmv.x.s} 24 } } */
/* { dg-final { scan-assembler-not {\tsext} } } */
@@ -18,117 +18,25 @@ void check_##V##_##IDX () \
assert (res == v[IDX]); \
}
-#define CHECK_ALL(T) \
- T (float, vnx4sf, 0) \
- T (float, vnx4sf, 1) \
- T (float, vnx4sf, 3) \
- T (double, vnx2df, 0) \
- T (double, vnx2df, 1) \
- T (int64_t, vnx2di, 0) \
- T (int64_t, vnx2di, 1) \
- T (int32_t, vnx4si, 0) \
- T (int32_t, vnx4si, 1) \
- T (int32_t, vnx4si, 3) \
- T (int16_t, vnx8hi, 0) \
- T (int16_t, vnx8hi, 2) \
- T (int16_t, vnx8hi, 6) \
- T (int8_t, vnx16qi, 0) \
- T (int8_t, vnx16qi, 1) \
- T (int8_t, vnx16qi, 7) \
- T (int8_t, vnx16qi, 11) \
- T (int8_t, vnx16qi, 15) \
- T (float, vnx8sf, 0) \
- T (float, vnx8sf, 1) \
- T (float, vnx8sf, 3) \
- T (float, vnx8sf, 4) \
- T (float, vnx8sf, 7) \
- T (double, vnx4df, 0) \
- T (double, vnx4df, 1) \
- T (double, vnx4df, 2) \
- T (double, vnx4df, 3) \
- T (int64_t, vnx4di, 0) \
- T (int64_t, vnx4di, 1) \
- T (int64_t, vnx4di, 2) \
- T (int64_t, vnx4di, 3) \
- T (int32_t, vnx8si, 0) \
- T (int32_t, vnx8si, 1) \
- T (int32_t, vnx8si, 3) \
- T (int32_t, vnx8si, 4) \
- T (int32_t, vnx8si, 7) \
- T (int16_t, vnx16hi, 0) \
- T (int16_t, vnx16hi, 1) \
- T (int16_t, vnx16hi, 7) \
- T (int16_t, vnx16hi, 8) \
- T (int16_t, vnx16hi, 15) \
- T (int8_t, vnx32qi, 0) \
- T (int8_t, vnx32qi, 1) \
- T (int8_t, vnx32qi, 15) \
- T (int8_t, vnx32qi, 16) \
- T (int8_t, vnx32qi, 31) \
- T (float, vnx16sf, 0) \
- T (float, vnx16sf, 2) \
- T (float, vnx16sf, 6) \
- T (float, vnx16sf, 8) \
- T (float, vnx16sf, 14) \
- T (double, vnx8df, 0) \
- T (double, vnx8df, 2) \
- T (double, vnx8df, 4) \
- T (double, vnx8df, 6) \
- T (int64_t, vnx8di, 0) \
- T (int64_t, vnx8di, 2) \
- T (int64_t, vnx8di, 4) \
- T (int64_t, vnx8di, 6) \
- T (int32_t, vnx16si, 0) \
- T (int32_t, vnx16si, 2) \
- T (int32_t, vnx16si, 6) \
- T (int32_t, vnx16si, 8) \
- T (int32_t, vnx16si, 14) \
- T (int16_t, vnx32hi, 0) \
- T (int16_t, vnx32hi, 2) \
- T (int16_t, vnx32hi, 14) \
- T (int16_t, vnx32hi, 16) \
- T (int16_t, vnx32hi, 30) \
- T (int8_t, vnx64qi, 0) \
- T (int8_t, vnx64qi, 2) \
- T (int8_t, vnx64qi, 30) \
- T (int8_t, vnx64qi, 32) \
- T (int8_t, vnx64qi, 63) \
- T (float, vnx32sf, 0) \
- T (float, vnx32sf, 3) \
- T (float, vnx32sf, 12) \
- T (float, vnx32sf, 17) \
- T (float, vnx32sf, 14) \
- T (double, vnx16df, 0) \
- T (double, vnx16df, 4) \
- T (double, vnx16df, 8) \
- T (double, vnx16df, 12) \
- T (int64_t, vnx16di, 0) \
- T (int64_t, vnx16di, 4) \
- T (int64_t, vnx16di, 8) \
- T (int64_t, vnx16di, 12) \
- T (int32_t, vnx32si, 0) \
- T (int32_t, vnx32si, 4) \
- T (int32_t, vnx32si, 12) \
- T (int32_t, vnx32si, 16) \
- T (int32_t, vnx32si, 28) \
- T (int16_t, vnx64hi, 0) \
- T (int16_t, vnx64hi, 4) \
- T (int16_t, vnx64hi, 28) \
- T (int16_t, vnx64hi, 32) \
- T (int16_t, vnx64hi, 60) \
- T (int8_t, vnx128qi, 0) \
- T (int8_t, vnx128qi, 4) \
- T (int8_t, vnx128qi, 30) \
- T (int8_t, vnx128qi, 60) \
- T (int8_t, vnx128qi, 64) \
- T (int8_t, vnx128qi, 127) \
-
-CHECK_ALL (CHECK)
+#define CHECK_VAR(S, V) \
+__attribute__ ((noipa)) \
+void check_var_##V (int32_t idx) \
+ { \
+ V v; \
+ for (int i = 0; i < sizeof (V) / sizeof (S); i++) \
+ v[i] = i; \
+ S res = vec_extract_var_##V (v, idx); \
+ assert (res == v[idx]); \
+ }
-#define RUN(S, V, IDX) \
+#define RUN(S, V, IDX) \
check_##V##_##IDX ();
-#define RUN_ALL(T) \
+#define RUN_VAR(S, V) \
+ for (int i = 0; i < sizeof (V) / sizeof (S); i++) \
+ check_var_##V (i); \
+
+#define RUN_ALL(T) \
T (float, vnx4sf, 0) \
T (float, vnx4sf, 1) \
T (float, vnx4sf, 3) \
@@ -233,7 +141,37 @@ CHECK_ALL (CHECK)
T (int8_t, vnx128qi, 64) \
T (int8_t, vnx128qi, 127) \
+#define RUN_ALL_VAR(T) \
+ T (float, vnx4sf) \
+ T (double, vnx2df) \
+ T (int64_t, vnx2di) \
+ T (int32_t, vnx4si) \
+ T (int16_t, vnx8hi) \
+ T (int8_t, vnx16qi) \
+ T (float, vnx8sf) \
+ T (double, vnx4df) \
+ T (int64_t, vnx4di) \
+ T (int32_t, vnx8si) \
+ T (int16_t, vnx16hi) \
+ T (int8_t, vnx32qi) \
+ T (float, vnx16sf) \
+ T (double, vnx8df) \
+ T (int64_t, vnx8di) \
+ T (int32_t, vnx16si) \
+ T (int16_t, vnx32hi) \
+ T (int8_t, vnx64qi) \
+ T (float, vnx32sf) \
+ T (double, vnx16df) \
+ T (int64_t, vnx16di) \
+ T (int32_t, vnx32si) \
+ T (int16_t, vnx64hi) \
+ T (int8_t, vnx128qi) \
+
+RUN_ALL (CHECK)
+RUN_ALL_VAR (CHECK_VAR)
+
int main ()
{
RUN_ALL (RUN);
+ RUN_ALL_VAR (RUN_VAR);
}
@@ -18,35 +18,24 @@ void check_##V##_##IDX () \
assert (res == v[IDX]); \
}
-#define CHECK_ALL(T) \
- T (_Float16, vnx8hf, 0) \
- T (_Float16, vnx8hf, 3) \
- T (_Float16, vnx8hf, 7) \
- T (_Float16, vnx16hf, 0) \
- T (_Float16, vnx16hf, 3) \
- T (_Float16, vnx16hf, 7) \
- T (_Float16, vnx16hf, 8) \
- T (_Float16, vnx16hf, 15) \
- T (_Float16, vnx32hf, 0) \
- T (_Float16, vnx32hf, 3) \
- T (_Float16, vnx32hf, 7) \
- T (_Float16, vnx32hf, 8) \
- T (_Float16, vnx32hf, 16) \
- T (_Float16, vnx32hf, 31) \
- T (_Float16, vnx64hf, 0) \
- T (_Float16, vnx64hf, 3) \
- T (_Float16, vnx64hf, 7) \
- T (_Float16, vnx64hf, 8) \
- T (_Float16, vnx64hf, 16) \
- T (_Float16, vnx64hf, 31) \
- T (_Float16, vnx64hf, 42) \
- T (_Float16, vnx64hf, 63) \
-
-CHECK_ALL (CHECK)
+#define CHECK_VAR(S, V) \
+__attribute__ ((noipa)) \
+void check_var_##V (int32_t idx) \
+ { \
+ V v; \
+ for (int i = 0; i < sizeof (V) / sizeof (S); i++) \
+ v[i] = i; \
+ S res = vec_extract_var_##V (v, idx); \
+ assert (res == v[idx]); \
+ }
#define RUN(S, V, IDX) \
check_##V##_##IDX ();
+#define RUN_VAR(S, V) \
+ for (int i = 0; i < sizeof (V) / sizeof (S); i++) \
+ check_var_##V (i); \
+
#define RUN_ALL(T) \
T (_Float16, vnx8hf, 0) \
T (_Float16, vnx8hf, 3) \
@@ -71,7 +60,17 @@ CHECK_ALL (CHECK)
T (_Float16, vnx64hf, 42) \
T (_Float16, vnx64hf, 63) \
+#define RUN_ALL_VAR(T) \
+ T (_Float16, vnx8hf) \
+ T (_Float16, vnx16hf) \
+ T (_Float16, vnx32hf) \
+ T (_Float16, vnx64hf) \
+
+RUN_ALL (CHECK)
+RUN_ALL_VAR (CHECK_VAR)
+
int main ()
{
RUN_ALL (RUN);
+ RUN_ALL_VAR (RUN_VAR)
}