From patchwork Mon Dec 12 20:02:01 2011 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Richard Henderson X-Patchwork-Id: 130811 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) by ozlabs.org (Postfix) with SMTP id 4DEC71007D3 for ; Tue, 13 Dec 2011 07:03:09 +1100 (EST) Received: (qmail 26367 invoked by alias); 12 Dec 2011 20:03:06 -0000 Received: (qmail 26292 invoked by uid 22791); 12 Dec 2011 20:03:02 -0000 X-SWARE-Spam-Status: No, hits=-7.6 required=5.0 tests=AWL, BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, SPF_HELO_PASS, TW_CP X-Spam-Check-By: sourceware.org Received: from mx1.redhat.com (HELO mx1.redhat.com) (209.132.183.28) by sourceware.org (qpsmtpd/0.43rc1) with ESMTP; Mon, 12 Dec 2011 20:02:42 +0000 Received: from int-mx02.intmail.prod.int.phx2.redhat.com (int-mx02.intmail.prod.int.phx2.redhat.com [10.5.11.12]) by mx1.redhat.com (8.14.4/8.14.4) with ESMTP id pBCK255W008295 (version=TLSv1/SSLv3 cipher=DHE-RSA-AES256-SHA bits=256 verify=OK); Mon, 12 Dec 2011 15:02:05 -0500 Received: from anchor.twiddle.home (vpn-10-229.rdu.redhat.com [10.11.10.229]) by int-mx02.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id pBCK212U013648; Mon, 12 Dec 2011 15:02:02 -0500 Message-ID: <4EE65DB9.5040907@redhat.com> Date: Mon, 12 Dec 2011 12:02:01 -0800 From: Richard Henderson User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:8.0) Gecko/20111115 Thunderbird/8.0 MIME-Version: 1.0 To: sje@cup.hp.com CC: gcc-patches@gcc.gnu.org, richard.earnshaw@arm.com, ramana.radhakrishnan@linaro.org, jakub@redhat.com, dje.gcc@gmail.com, rdsandiford@googlemail.com, mingjie.xing@gmail.com, meissner@linux.vnet.ibm.com, "Mailaripillai, Kannan Jeganathan (STSD)" Subject: Re: [PATCH 3/6] ia64: Implement vec_perm_const. References: <1323378383-9824-1-git-send-email-rth@redhat.com> <1323378383-9824-4-git-send-email-rth@redhat.com> <1323709162.8237.210.camel@hpsje.cup.hp.com> In-Reply-To: <1323709162.8237.210.camel@hpsje.cup.hp.com> X-IsSubscribed: yes Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org On 12/12/2011 08:59 AM, Steve Ellcey wrote: > Richard, > > I am hitting an assert in expand_vec_perm_even_odd on IA64 HP-UX with > your patch. Try this version. r~ commit 33c2ab861e7fea4b6c3fc6e64c43f6d94eff4dfb Author: Richard Henderson Date: Mon Dec 5 12:59:16 2011 -0800 ia64: Implement vec_perm_const. diff --git a/gcc/config/ia64/ia64-protos.h b/gcc/config/ia64/ia64-protos.h index a680c31..1cb46b6 100644 --- a/gcc/config/ia64/ia64-protos.h +++ b/gcc/config/ia64/ia64-protos.h @@ -61,6 +61,10 @@ extern int ia64_hard_regno_rename_ok (int, int); extern enum reg_class ia64_secondary_reload_class (enum reg_class, enum machine_mode, rtx); extern const char *get_bundle_name (int); + +extern void expand_vec_perm_even_odd_1 (rtx, rtx, rtx, int); +extern bool ia64_expand_vec_perm_const (rtx op[4]); +extern void ia64_expand_vec_setv2sf (rtx op[3]); #endif /* RTX_CODE */ #ifdef TREE_CODE diff --git a/gcc/config/ia64/ia64.c b/gcc/config/ia64/ia64.c index 64bd999..6a115cc 100644 --- a/gcc/config/ia64/ia64.c +++ b/gcc/config/ia64/ia64.c @@ -329,6 +329,24 @@ static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t); static enum machine_mode ia64_get_reg_raw_mode (int regno); static section * ia64_hpux_function_section (tree, enum node_frequency, bool, bool); + +static bool ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode, + const unsigned char *sel); + +#define MAX_VECT_LEN 8 + +struct expand_vec_perm_d +{ + rtx target, op0, op1; + unsigned char perm[MAX_VECT_LEN]; + enum machine_mode vmode; + unsigned char nelt; + bool one_operand_p; + bool testing_p; +}; + +static bool ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d); + /* Table of valid machine attributes. */ static const struct attribute_spec ia64_attribute_table[] = @@ -623,6 +641,9 @@ static const struct attribute_spec ia64_attribute_table[] = #undef TARGET_DELAY_VARTRACK #define TARGET_DELAY_VARTRACK true +#undef TARGET_VECTORIZE_VEC_PERM_CONST_OK +#define TARGET_VECTORIZE_VEC_PERM_CONST_OK ia64_vectorize_vec_perm_const_ok + struct gcc_target targetm = TARGET_INITIALIZER; typedef enum @@ -1962,28 +1983,28 @@ ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode, void ia64_unpack_assemble (rtx out, rtx lo, rtx hi, bool highp) { - enum machine_mode mode = GET_MODE (lo); - rtx (*gen) (rtx, rtx, rtx); - rtx x; + enum machine_mode vmode = GET_MODE (lo); + unsigned int i, high, nelt = GET_MODE_NUNITS (vmode); + struct expand_vec_perm_d d; + bool ok; - switch (mode) + d.target = gen_lowpart (vmode, out); + d.op0 = (TARGET_BIG_ENDIAN ? hi : lo); + d.op1 = (TARGET_BIG_ENDIAN ? lo : hi); + d.vmode = vmode; + d.nelt = nelt; + d.one_operand_p = false; + d.testing_p = false; + + high = (highp ? nelt / 2 : 0); + for (i = 0; i < nelt / 2; ++i) { - case V8QImode: - gen = highp ? gen_vec_interleave_highv8qi : gen_vec_interleave_lowv8qi; - break; - case V4HImode: - gen = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi; - break; - default: - gcc_unreachable (); + d.perm[i * 2] = i + high; + d.perm[i * 2 + 1] = i + high + nelt; } - x = gen_lowpart (mode, out); - if (TARGET_BIG_ENDIAN) - x = gen (x, hi, lo); - else - x = gen (x, lo, hi); - emit_insn (x); + ok = ia64_expand_vec_perm_const_1 (&d); + gcc_assert (ok); } /* Return a vector of the sign-extension of VEC. */ @@ -10981,5 +11002,354 @@ ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED, { return NULL; } + +/* Construct (set target (vec_select op0 (parallel perm))) and + return true if that's a valid instruction in the active ISA. */ + +static bool +expand_vselect (rtx target, rtx op0, const unsigned char *perm, unsigned nelt) +{ + rtx rperm[MAX_VECT_LEN], x; + unsigned i; + + for (i = 0; i < nelt; ++i) + rperm[i] = GEN_INT (perm[i]); + + x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm)); + x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x); + x = gen_rtx_SET (VOIDmode, target, x); + + x = emit_insn (x); + if (recog_memoized (x) < 0) + { + remove_insn (x); + return false; + } + return true; +} + +/* Similar, but generate a vec_concat from op0 and op1 as well. */ + +static bool +expand_vselect_vconcat (rtx target, rtx op0, rtx op1, + const unsigned char *perm, unsigned nelt) +{ + enum machine_mode v2mode; + rtx x; + + v2mode = GET_MODE_2XWIDER_MODE (GET_MODE (op0)); + x = gen_rtx_VEC_CONCAT (v2mode, op0, op1); + return expand_vselect (target, x, perm, nelt); +} + +/* Try to instantiate D in a single instruction. */ + +static bool +expand_vec_perm_1 (struct expand_vec_perm_d *d) +{ + unsigned i, nelt = d->nelt; + unsigned char perm2[MAX_VECT_LEN]; + + /* Try single-operand selections. */ + if (d->one_operand_p + && expand_vselect (d->target, d->op0, d->perm, nelt)) + return true; + + /* Try two operand selections. */ + if (expand_vselect_vconcat (d->target, d->op0, d->op1, d->perm, nelt)) + return true; + + /* Recognize interleave style patterns with reversed operands. */ + if (!d->one_operand_p) + { + for (i = 0; i < nelt; ++i) + { + unsigned e = d->perm[i]; + if (e >= nelt) + e -= nelt; + else + e += nelt; + perm2[i] = e; + } + + if (expand_vselect_vconcat (d->target, d->op1, d->op0, perm2, nelt)) + return true; + } + + return false; +} + +/* Pattern match extract-even and extract-odd permutations. */ + +void +expand_vec_perm_even_odd_1 (rtx target, rtx op0, rtx op1, int odd) +{ + unsigned char perm2[MAX_VECT_LEN]; + enum machine_mode vmode = GET_MODE (target); + unsigned int i, nelt = GET_MODE_NUNITS (vmode); + rtx temp = gen_reg_rtx (vmode); + bool ok; + + /* Implement via MIX plus MUX. */ + for (i = 0; i < nelt / 2; i++) + { + perm2[i * 2] = i * 2 + odd; + perm2[i * 2 + 1] = i * 2 + odd + nelt; + } + ok = expand_vselect_vconcat (temp, op1, op0, perm2, nelt); + gcc_assert (ok); + + for (i = 0; i < nelt / 2; i++) + { + perm2[i] = i * 2; + perm2[i + nelt / 2] = i * 2 + 1; + } + ok = expand_vselect (target, temp, perm2, nelt); + gcc_assert (ok); +} + +static bool +expand_vec_perm_even_odd (struct expand_vec_perm_d *d) +{ + unsigned i, odd, nelt = d->nelt; + + if (d->one_operand_p) + return false; + odd = d->perm[0]; + if (odd != 0 && odd != 1) + return false; + + for (i = 1; i < nelt; ++i) + if (d->perm[i] != 2 * i + odd) + return false; + + /* V2SF and V2SI should have been matched already. */ + gcc_assert (d->nelt >= 4); + + if (d->testing_p) + return true; + + expand_vec_perm_even_odd_1 (d->target, d->op0, d->op1, odd); + return true; +} + +/* Pattern match broadcast permutations. */ + +static bool +expand_vec_perm_broadcast (struct expand_vec_perm_d *d) +{ + unsigned i, elt, nelt = d->nelt; + unsigned char perm2[2]; + rtx temp; + bool ok; + + if (!d->one_operand_p) + return false; + + elt = d->perm[0]; + for (i = 1; i < nelt; ++i) + if (d->perm[i] != elt) + return false; + + switch (d->vmode) + { + case V2SImode: + case V2SFmode: + /* Implementable by interleave. */ + perm2[0] = elt; + perm2[1] = elt + 2; + ok = expand_vselect_vconcat (d->target, d->op0, d->op0, perm2, 2); + gcc_assert (ok); + break; + + case V8QImode: + /* Implementable by extract + broadcast. */ + if (BYTES_BIG_ENDIAN) + elt = 7 - elt; + elt *= BITS_PER_UNIT; + temp = gen_reg_rtx (DImode); + emit_insn (gen_extzv (temp, gen_lowpart (DImode, d->op0), + GEN_INT (elt), GEN_INT (8))); + emit_insn (gen_mux1_brcst_qi (d->target, gen_lowpart (QImode, temp))); + break; + + case V4HImode: + /* Should have been matched directly by vec_select. */ + default: + gcc_unreachable (); + } + + return true; +} + +/* The guts of ia64_expand_vec_perm_const, also used by the ok hook. + With all of the interface bits taken care of, perform the expansion + in D and return true on success. */ + +static bool +ia64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d) +{ + if (expand_vec_perm_1 (d)) + return true; + if (expand_vec_perm_broadcast (d)) + return true; + if (expand_vec_perm_even_odd (d)) + return true; + return false; +} + +bool +ia64_expand_vec_perm_const (rtx operands[4]) +{ + struct expand_vec_perm_d d; + unsigned char perm[MAX_VECT_LEN]; + int i, nelt, which; + rtx sel; + + d.target = operands[0]; + d.op0 = operands[1]; + d.op1 = operands[2]; + sel = operands[3]; + + d.vmode = GET_MODE (d.target); + gcc_assert (VECTOR_MODE_P (d.vmode)); + d.nelt = nelt = GET_MODE_NUNITS (d.vmode); + d.testing_p = false; + + gcc_assert (GET_CODE (sel) == CONST_VECTOR); + gcc_assert (XVECLEN (sel, 0) == nelt); + gcc_checking_assert (sizeof (d.perm) == sizeof (perm)); + + for (i = which = 0; i < nelt; ++i) + { + rtx e = XVECEXP (sel, 0, i); + int ei = INTVAL (e) & (2 * nelt - 1); + + which |= (ei < nelt ? 1 : 2); + d.perm[i] = ei; + perm[i] = ei; + } + + switch (which) + { + default: + gcc_unreachable(); + + case 3: + if (!rtx_equal_p (d.op0, d.op1)) + { + d.one_operand_p = false; + break; + } + + /* The elements of PERM do not suggest that only the first operand + is used, but both operands are identical. Allow easier matching + of the permutation by folding the permutation into the single + input vector. */ + for (i = 0; i < nelt; ++i) + if (d.perm[i] >= nelt) + d.perm[i] -= nelt; + /* FALLTHRU */ + + case 1: + d.op1 = d.op0; + d.one_operand_p = true; + break; + + case 2: + for (i = 0; i < nelt; ++i) + d.perm[i] -= nelt; + d.op0 = d.op1; + d.one_operand_p = true; + break; + } + + if (ia64_expand_vec_perm_const_1 (&d)) + return true; + + /* If the mask says both arguments are needed, but they are the same, + the above tried to expand with one_operand_p true. If that didn't + work, retry with one_operand_p false, as that's what we used in _ok. */ + if (which == 3 && d.one_operand_p) + { + memcpy (d.perm, perm, sizeof (perm)); + d.one_operand_p = false; + return ia64_expand_vec_perm_const_1 (&d); + } + + return false; +} + +/* Implement targetm.vectorize.vec_perm_const_ok. */ + +static bool +ia64_vectorize_vec_perm_const_ok (enum machine_mode vmode, + const unsigned char *sel) +{ + struct expand_vec_perm_d d; + unsigned int i, nelt, which; + bool ret; + + d.vmode = vmode; + d.nelt = nelt = GET_MODE_NUNITS (d.vmode); + d.testing_p = true; + + /* Extract the values from the vector CST into the permutation + array in D. */ + memcpy (d.perm, sel, nelt); + for (i = which = 0; i < nelt; ++i) + { + unsigned char e = d.perm[i]; + gcc_assert (e < 2 * nelt); + which |= (e < nelt ? 1 : 2); + } + + /* For all elements from second vector, fold the elements to first. */ + if (which == 2) + for (i = 0; i < nelt; ++i) + d.perm[i] -= nelt; + + /* Check whether the mask can be applied to the vector type. */ + d.one_operand_p = (which != 3); + + /* Otherwise we have to go through the motions and see if we can + figure out how to generate the requested permutation. */ + d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1); + d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2); + if (!d.one_operand_p) + d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3); + + start_sequence (); + ret = ia64_expand_vec_perm_const_1 (&d); + end_sequence (); + + return ret; +} + +void +ia64_expand_vec_setv2sf (rtx operands[3]) +{ + struct expand_vec_perm_d d; + unsigned int which; + bool ok; + + d.target = operands[0]; + d.op0 = operands[0]; + d.op1 = gen_reg_rtx (V2SFmode); + d.vmode = V2SFmode; + d.nelt = 2; + d.one_operand_p = false; + d.testing_p = false; + + which = INTVAL (operands[2]); + gcc_assert (which <= 1); + d.perm[0] = 1 - which; + d.perm[1] = which + 2; + + emit_insn (gen_fpack (d.op1, operands[1], CONST0_RTX (SFmode))); + + ok = ia64_expand_vec_perm_const_1 (&d); + gcc_assert (ok); +} #include "gt-ia64.h" diff --git a/gcc/config/ia64/vect.md b/gcc/config/ia64/vect.md index 2f068eb..9cc6c0f 100644 --- a/gcc/config/ia64/vect.md +++ b/gcc/config/ia64/vect.md @@ -20,11 +20,14 @@ ;; Integer vector operations +(define_mode_iterator VEC [V8QI V4HI V2SI V2SF]) (define_mode_iterator VECINT [V8QI V4HI V2SI]) (define_mode_iterator VECINT12 [V8QI V4HI]) (define_mode_iterator VECINT24 [V4HI V2SI]) (define_mode_attr vecsize [(V8QI "1") (V4HI "2") (V2SI "4")]) (define_mode_attr vecwider [(V8QI "V4HI") (V4HI "V2SI")]) +(define_mode_attr vecint + [(V8QI "V8QI") (V4HI "V4HI") (V2SI "V2SI") (V2SF "V2SI")]) (define_expand "mov" [(set (match_operand:VECINT 0 "general_operand" "") @@ -756,7 +759,7 @@ } [(set_attr "itanium_class" "mmshf")]) -(define_insn "vec_interleave_lowv8qi" +(define_insn "*vec_interleave_lowv8qi" [(set (match_operand:V8QI 0 "gr_register_operand" "=r") (vec_select:V8QI (vec_concat:V16QI @@ -776,7 +779,7 @@ } [(set_attr "itanium_class" "mmshf")]) -(define_insn "vec_interleave_highv8qi" +(define_insn "*vec_interleave_highv8qi" [(set (match_operand:V8QI 0 "gr_register_operand" "=r") (vec_select:V8QI (vec_concat:V16QI @@ -796,7 +799,7 @@ } [(set_attr "itanium_class" "mmshf")]) -(define_insn "mix1_even" +(define_insn "*mix1_even" [(set (match_operand:V8QI 0 "gr_register_operand" "=r") (vec_select:V8QI (vec_concat:V16QI @@ -816,7 +819,7 @@ } [(set_attr "itanium_class" "mmshf")]) -(define_insn "mix1_odd" +(define_insn "*mix1_odd" [(set (match_operand:V8QI 0 "gr_register_operand" "=r") (vec_select:V8QI (vec_concat:V16QI @@ -872,7 +875,7 @@ "mux1 %0 = %1, @shuf" [(set_attr "itanium_class" "mmshf")]) -(define_insn "mux1_alt" +(define_insn "*mux1_alt" [(set (match_operand:V8QI 0 "gr_register_operand" "=r") (vec_select:V8QI (match_operand:V8QI 1 "gr_register_operand" "r") @@ -900,7 +903,7 @@ "mux1 %0 = %1, @brcst" [(set_attr "itanium_class" "mmshf")]) -(define_insn "*mux1_brcst_qi" +(define_insn "mux1_brcst_qi" [(set (match_operand:V8QI 0 "gr_register_operand" "=r") (vec_duplicate:V8QI (match_operand:QI 1 "gr_register_operand" "r")))] @@ -908,31 +911,7 @@ "mux1 %0 = %1, @brcst" [(set_attr "itanium_class" "mmshf")]) -(define_expand "vec_extract_evenv8qi" - [(match_operand:V8QI 0 "gr_register_operand" "") - (match_operand:V8QI 1 "gr_register_operand" "") - (match_operand:V8QI 2 "gr_register_operand" "")] - "" -{ - rtx temp = gen_reg_rtx (V8QImode); - emit_insn (gen_mix1_even (temp, operands[1], operands[2])); - emit_insn (gen_mux1_alt (operands[0], temp)); - DONE; -}) - -(define_expand "vec_extract_oddv8qi" - [(match_operand:V8QI 0 "gr_register_operand" "") - (match_operand:V8QI 1 "gr_register_operand" "") - (match_operand:V8QI 2 "gr_register_operand" "")] - "" -{ - rtx temp = gen_reg_rtx (V8QImode); - emit_insn (gen_mix1_odd (temp, operands[1], operands[2])); - emit_insn (gen_mux1_alt (operands[0], temp)); - DONE; -}) - -(define_insn "vec_interleave_lowv4hi" +(define_insn "*vec_interleave_lowv4hi" [(set (match_operand:V4HI 0 "gr_register_operand" "=r") (vec_select:V4HI (vec_concat:V8HI @@ -950,7 +929,7 @@ } [(set_attr "itanium_class" "mmshf")]) -(define_insn "vec_interleave_highv4hi" +(define_insn "*vec_interleave_highv4hi" [(set (match_operand:V4HI 0 "gr_register_operand" "=r") (vec_select:V4HI (vec_concat:V8HI @@ -1034,38 +1013,6 @@ } [(set_attr "itanium_class" "mmshf")]) -(define_expand "vec_extract_evenodd_helper" - [(set (match_operand:V4HI 0 "gr_register_operand" "") - (vec_select:V4HI - (match_operand:V4HI 1 "gr_register_operand" "") - (parallel [(const_int 0) (const_int 2) - (const_int 1) (const_int 3)])))] - "") - -(define_expand "vec_extract_evenv4hi" - [(match_operand:V4HI 0 "gr_register_operand") - (match_operand:V4HI 1 "gr_reg_or_0_operand") - (match_operand:V4HI 2 "gr_reg_or_0_operand")] - "" -{ - rtx temp = gen_reg_rtx (V4HImode); - emit_insn (gen_mix2_even (temp, operands[1], operands[2])); - emit_insn (gen_vec_extract_evenodd_helper (operands[0], temp)); - DONE; -}) - -(define_expand "vec_extract_oddv4hi" - [(match_operand:V4HI 0 "gr_register_operand") - (match_operand:V4HI 1 "gr_reg_or_0_operand") - (match_operand:V4HI 2 "gr_reg_or_0_operand")] - "" -{ - rtx temp = gen_reg_rtx (V4HImode); - emit_insn (gen_mix2_odd (temp, operands[1], operands[2])); - emit_insn (gen_vec_extract_evenodd_helper (operands[0], temp)); - DONE; -}) - (define_insn "*mux2_brcst_hi" [(set (match_operand:V4HI 0 "gr_register_operand" "=r") (vec_duplicate:V4HI @@ -1074,7 +1021,7 @@ "mux2 %0 = %1, 0" [(set_attr "itanium_class" "mmshf")]) -(define_insn "vec_interleave_lowv2si" +(define_insn "*vec_interleave_lowv2si" [(set (match_operand:V2SI 0 "gr_register_operand" "=r") (vec_select:V2SI (vec_concat:V4SI @@ -1091,7 +1038,7 @@ } [(set_attr "itanium_class" "mmshf")]) -(define_insn "vec_interleave_highv2si" +(define_insn "*vec_interleave_highv2si" [(set (match_operand:V2SI 0 "gr_register_operand" "=r") (vec_select:V2SI (vec_concat:V4SI @@ -1108,36 +1055,6 @@ } [(set_attr "itanium_class" "mmshf")]) -(define_expand "vec_extract_evenv2si" - [(match_operand:V2SI 0 "gr_register_operand" "") - (match_operand:V2SI 1 "gr_register_operand" "") - (match_operand:V2SI 2 "gr_register_operand" "")] - "" -{ - if (TARGET_BIG_ENDIAN) - emit_insn (gen_vec_interleave_highv2si (operands[0], operands[1], - operands[2])); - else - emit_insn (gen_vec_interleave_lowv2si (operands[0], operands[1], - operands[2])); - DONE; -}) - -(define_expand "vec_extract_oddv2si" - [(match_operand:V2SI 0 "gr_register_operand" "") - (match_operand:V2SI 1 "gr_register_operand" "") - (match_operand:V2SI 2 "gr_register_operand" "")] - "" -{ - if (TARGET_BIG_ENDIAN) - emit_insn (gen_vec_interleave_lowv2si (operands[0], operands[1], - operands[2])); - else - emit_insn (gen_vec_interleave_highv2si (operands[0], operands[1], - operands[2])); - DONE; -}) - (define_expand "vec_initv2si" [(match_operand:V2SI 0 "gr_register_operand" "") (match_operand 1 "" "")] @@ -1479,7 +1396,7 @@ } [(set_attr "itanium_class" "fmisc")]) -(define_insn "vec_interleave_highv2sf" +(define_insn "*vec_interleave_highv2sf" [(set (match_operand:V2SF 0 "fr_register_operand" "=f") (vec_select:V2SF (vec_concat:V4SF @@ -1496,7 +1413,7 @@ } [(set_attr "itanium_class" "fmisc")]) -(define_insn "vec_interleave_lowv2sf" +(define_insn "*vec_interleave_lowv2sf" [(set (match_operand:V2SF 0 "fr_register_operand" "=f") (vec_select:V2SF (vec_concat:V4SF @@ -1530,58 +1447,13 @@ } [(set_attr "itanium_class" "fmisc")]) -(define_expand "vec_extract_evenv2sf" - [(match_operand:V2SF 0 "gr_register_operand" "") - (match_operand:V2SF 1 "gr_register_operand" "") - (match_operand:V2SF 2 "gr_register_operand" "")] - "" -{ - if (TARGET_BIG_ENDIAN) - emit_insn (gen_vec_interleave_highv2sf (operands[0], operands[1], - operands[2])); - else - emit_insn (gen_vec_interleave_lowv2sf (operands[0], operands[1], - operands[2])); - DONE; -}) - -(define_expand "vec_extract_oddv2sf" - [(match_operand:V2SF 0 "gr_register_operand" "") - (match_operand:V2SF 1 "gr_register_operand" "") - (match_operand:V2SF 2 "gr_register_operand" "")] - "" -{ - if (TARGET_BIG_ENDIAN) - emit_insn (gen_vec_interleave_lowv2sf (operands[0], operands[1], - operands[2])); - else - emit_insn (gen_vec_interleave_highv2sf (operands[0], operands[1], - operands[2])); - DONE; -}) - (define_expand "vec_setv2sf" [(match_operand:V2SF 0 "fr_register_operand" "") (match_operand:SF 1 "fr_register_operand" "") (match_operand 2 "const_int_operand" "")] "" { - rtx op0 = operands[0]; - rtx tmp = gen_reg_rtx (V2SFmode); - - emit_insn (gen_fpack (tmp, operands[1], CONST0_RTX (SFmode))); - - switch (INTVAL (operands[2])) - { - case 0: - emit_insn (gen_fmix_lr (op0, tmp, op0)); - break; - case 1: - emit_insn (gen_vec_interleave_lowv2sf (op0, op0, tmp)); - break; - default: - gcc_unreachable (); - } + ia64_expand_vec_setv2sf (operands); DONE; }) @@ -1703,10 +1575,7 @@ { rtx op1 = gen_lowpart (V8QImode, operands[1]); rtx op2 = gen_lowpart (V8QImode, operands[2]); - if (TARGET_BIG_ENDIAN) - emit_insn (gen_vec_extract_oddv8qi (operands[0], op1, op2)); - else - emit_insn (gen_vec_extract_evenv8qi (operands[0], op1, op2)); + expand_vec_perm_even_odd_1 (operands[0], op1, op2, TARGET_BIG_ENDIAN); DONE; }) @@ -1718,13 +1587,23 @@ { rtx op1 = gen_lowpart (V4HImode, operands[1]); rtx op2 = gen_lowpart (V4HImode, operands[2]); - if (TARGET_BIG_ENDIAN) - emit_insn (gen_vec_extract_oddv4hi (operands[0], op1, op2)); - else - emit_insn (gen_vec_extract_evenv4hi (operands[0], op1, op2)); + expand_vec_perm_even_odd_1 (operands[0], op1, op2, TARGET_BIG_ENDIAN); DONE; }) +(define_expand "vec_perm_const" + [(match_operand:VEC 0 "register_operand" "") + (match_operand:VEC 1 "register_operand" "") + (match_operand:VEC 2 "register_operand" "") + (match_operand: 3 "" "")] + "" +{ + if (ia64_expand_vec_perm_const (operands)) + DONE; + else + FAIL; +}) + ;; Missing operations ;; fprcpa ;; fpsqrta