From patchwork Tue Nov 25 14:03:54 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Alan Lawrence X-Patchwork-Id: 414690 Return-Path: X-Original-To: incoming@patchwork.ozlabs.org Delivered-To: patchwork-incoming@bilbo.ozlabs.org Received: from sourceware.org (server1.sourceware.org [209.132.180.131]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by ozlabs.org (Postfix) with ESMTPS id 156DA1401DC for ; Wed, 26 Nov 2014 01:04:09 +1100 (AEDT) DomainKey-Signature: a=rsa-sha1; c=nofws; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :message-id:date:from:mime-version:to:subject:content-type; q= dns; s=default; b=F8Pn/30ooh/PnBBlQ5q4uGXn6riSJGVLeosFvRvVRj/liY fqFh2HO6LEnk6Sp/zDRmqZpmY6H2JwOTWTUVsLOkwZw1Qdts8b1aNtVyO5E0HJdL pTxUUZa3ixWa8u470dECPxy/DyLR7Ywg1BNcCizJ7v5fY2wayX3HWhXQTS7u8= DKIM-Signature: v=1; a=rsa-sha1; c=relaxed; d=gcc.gnu.org; h=list-id :list-unsubscribe:list-archive:list-post:list-help:sender :message-id:date:from:mime-version:to:subject:content-type; s= default; bh=/wrk+4hxdzpZfGsQiWVQQOYbG2s=; b=On+ZtImxUvbug+9bXwvO nr9oRMfbMPcq24n1qTf7btWKZa9HHELt2P6W9/EiZhE1ZzDlBjoXvtvNSXnlKgwM wOrWCEukPQMm61SPXfeiLbbkFUSBGgAH89Tfoefjx/4uYYPHPxeC2iE0nEm5XYvJ pOCiRTRKJ73E8DAcdInSQ+8= Received: (qmail 21362 invoked by alias); 25 Nov 2014 14:04:01 -0000 Mailing-List: contact gcc-patches-help@gcc.gnu.org; run by ezmlm Precedence: bulk List-Id: List-Unsubscribe: List-Archive: List-Post: List-Help: Sender: gcc-patches-owner@gcc.gnu.org Delivered-To: mailing list gcc-patches@gcc.gnu.org Received: (qmail 21349 invoked by uid 89); 25 Nov 2014 14:04:00 -0000 Authentication-Results: sourceware.org; auth=none X-Virus-Found: No X-Spam-SWARE-Status: No, score=-1.9 required=5.0 tests=BAYES_00, SPF_PASS autolearn=ham version=3.3.2 X-HELO: service87.mimecast.com Received: from service87.mimecast.com (HELO service87.mimecast.com) (91.220.42.44) by sourceware.org (qpsmtpd/0.93/v0.84-503-g423c35a) with ESMTP; Tue, 25 Nov 2014 14:03:57 +0000 Received: from cam-owa1.Emea.Arm.com (fw-tnat.cambridge.arm.com [217.140.96.140]) by service87.mimecast.com; Tue, 25 Nov 2014 14:03:54 +0000 Received: from [10.1.209.51] ([10.1.255.212]) by cam-owa1.Emea.Arm.com with Microsoft SMTPSVC(6.0.3790.3959); Tue, 25 Nov 2014 14:03:55 +0000 Message-ID: <54748C4A.5080406@arm.com> Date: Tue, 25 Nov 2014 14:03:54 +0000 From: Alan Lawrence User-Agent: Thunderbird 2.0.0.24 (X11/20101213) MIME-Version: 1.0 To: "gcc-patches@gcc.gnu.org" Subject: [PATCH][AArch64]Fix ICE at -O0 on vld1_lane intrinsics X-MC-Unique: 114112514035412401 X-IsSubscribed: yes vld1_lane intrinsics ICE at -O0 because they contain a call to the vset_lane intrinsics, through which the lane index is not constant-propagated. (They are fine at -O1 and higher!). This fixes the ICE by replacing said call by a macro. Rather than defining many individual macros __aarch64_vset(q?)_lane_[uspf](8|16|32|64), instead this introduces a __AARCH64_NUM_LANES macro using sizeof(), such that a single __aarch64_vset_lane_any macro handles all variants (with bounds-checking and endianness-flipping). This reduces potential for error vs. writing the number of lanes for each variant by hand as previously. Also factor the endianness-flipping out to a separate macro __aarch64_lane; I intend to use this for vget_lane too in another patch. Tested with check-gcc on aarch64-none-elf and aarch64_be-none-elf (including new test that FAILs without this patch). Ok for trunk? gcc/ChangeLog: * config/aarch64/arm_neon.h (__AARCH64_NUM_LANES, __aarch64_lane *2): New. (aarch64_vset_lane_any): Redefine using previous, same for BE + LE. (vset_lane_f32, vset_lane_f64, vset_lane_p8, vset_lane_p16, vset_lane_s8, vset_lane_s16, vset_lane_s32, vset_lane_s64, vset_lane_u8, vset_lane_u16, vset_lane_u32, vset_lane_u64): Remove number of lanes. (vld1_lane_f32, vld1_lane_f64, vld1_lane_p8, vld1_lane_p16, vld1_lane_s8, vld1_lane_s16, vld1_lane_s32, vld1_lane_s64, vld1_lane_u8, vld1_lane_u16, vld1_lane_u32, vld1_lane_u64): Call __aarch64_vset_lane_any rather than vset_lane_xxx. gcc/testsuite/ChangeLog: * gcc.target/aarch64/vld1_lane-o0.c: New test. diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 921a5db..1291a8d 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -604,173 +604,28 @@ typedef struct poly16x8x4_t #define __aarch64_vdupq_laneq_u64(__a, __b) \ __aarch64_vdup_lane_any (u64, q, q, __a, __b) -/* vset_lane and vld1_lane internal macro. */ +/* Internal macro for lane indices. */ + +#define __AARCH64_NUM_LANES(__v) (sizeof (__v) / sizeof (__v[0])) -#ifdef __AARCH64EB__ /* For big-endian, GCC's vector indices are the opposite way around to the architectural lane indices used by Neon intrinsics. */ -#define __aarch64_vset_lane_any(__vec, __index, __val, __lanes) \ - __extension__ \ - ({ \ - __builtin_aarch64_im_lane_boundsi (__index, __lanes); \ - __vec[__lanes - 1 - __index] = __val; \ - __vec; \ - }) +#ifdef __AARCH64EB__ +#define __aarch64_lane(__vec, __idx) (__AARCH64_NUM_LANES (__vec) - 1 - __idx) #else -#define __aarch64_vset_lane_any(__vec, __index, __val, __lanes) \ - __extension__ \ - ({ \ - __builtin_aarch64_im_lane_boundsi (__index, __lanes); \ - __vec[__index] = __val; \ - __vec; \ - }) +#define __aarch64_lane(__vec, __idx) __idx #endif -/* vset_lane */ - -__extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) -vset_lane_f32 (float32_t __elem, float32x2_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 2); -} - -__extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) -vset_lane_f64 (float64_t __elem, float64x1_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 1); -} - -__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) -vset_lane_p8 (poly8_t __elem, poly8x8_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 8); -} - -__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) -vset_lane_p16 (poly16_t __elem, poly16x4_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 4); -} - -__extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) -vset_lane_s8 (int8_t __elem, int8x8_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 8); -} - -__extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) -vset_lane_s16 (int16_t __elem, int16x4_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 4); -} - -__extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) -vset_lane_s32 (int32_t __elem, int32x2_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 2); -} - -__extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) -vset_lane_s64 (int64_t __elem, int64x1_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 1); -} - -__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) -vset_lane_u8 (uint8_t __elem, uint8x8_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 8); -} - -__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) -vset_lane_u16 (uint16_t __elem, uint16x4_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 4); -} - -__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) -vset_lane_u32 (uint32_t __elem, uint32x2_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 2); -} - -__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) -vset_lane_u64 (uint64_t __elem, uint64x1_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 1); -} - -__extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) -vsetq_lane_f32 (float32_t __elem, float32x4_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 4); -} - -__extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) -vsetq_lane_f64 (float64_t __elem, float64x2_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 2); -} - -__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) -vsetq_lane_p8 (poly8_t __elem, poly8x16_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 16); -} - -__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) -vsetq_lane_p16 (poly16_t __elem, poly16x8_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 8); -} - -__extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) -vsetq_lane_s8 (int8_t __elem, int8x16_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 16); -} - -__extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) -vsetq_lane_s16 (int16_t __elem, int16x8_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 8); -} - -__extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) -vsetq_lane_s32 (int32_t __elem, int32x4_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 4); -} - -__extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) -vsetq_lane_s64 (int64_t __elem, int64x2_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 2); -} - -__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) -vsetq_lane_u8 (uint8_t __elem, uint8x16_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 16); -} - -__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) -vsetq_lane_u16 (uint16_t __elem, uint16x8_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 8); -} - -__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) -vsetq_lane_u32 (uint32_t __elem, uint32x4_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 4); -} +/* vset_lane and vld1_lane internal macro. */ -__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) -vsetq_lane_u64 (uint64_t __elem, uint64x2_t __vec, const int __index) -{ - return __aarch64_vset_lane_any (__vec, __index, __elem, 2); -} +#define __aarch64_vset_lane_any(__elem, __vec, __index) \ + __extension__ \ + ({ \ + __builtin_aarch64_im_lane_boundsi (__index, \ + __AARCH64_NUM_LANES (__vec)); \ + __vec[__aarch64_lane (__vec, __index)] = __elem; \ + __vec; \ + }) /* vadd */ __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) @@ -4387,6 +4242,154 @@ vreinterpretq_u32_p16 (poly16x8_t __a) return (uint32x4_t) __a; } +/* vset_lane */ + +__extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) +vset_lane_f32 (float32_t __elem, float32x2_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) +vset_lane_f64 (float64_t __elem, float64x1_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) +vset_lane_p8 (poly8_t __elem, poly8x8_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) +vset_lane_p16 (poly16_t __elem, poly16x4_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) +vset_lane_s8 (int8_t __elem, int8x8_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) +vset_lane_s16 (int16_t __elem, int16x4_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) +vset_lane_s32 (int32_t __elem, int32x2_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) +vset_lane_s64 (int64_t __elem, int64x1_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) +vset_lane_u8 (uint8_t __elem, uint8x8_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) +vset_lane_u16 (uint16_t __elem, uint16x4_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) +vset_lane_u32 (uint32_t __elem, uint32x2_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) +vset_lane_u64 (uint64_t __elem, uint64x1_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +/* vsetq_lane */ + +__extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) +vsetq_lane_f32 (float32_t __elem, float32x4_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) +vsetq_lane_f64 (float64_t __elem, float64x2_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) +vsetq_lane_p8 (poly8_t __elem, poly8x16_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) +vsetq_lane_p16 (poly16_t __elem, poly16x8_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) +vsetq_lane_s8 (int8_t __elem, int8x16_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) +vsetq_lane_s16 (int16_t __elem, int16x8_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) +vsetq_lane_s32 (int32_t __elem, int32x4_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) +vsetq_lane_s64 (int64_t __elem, int64x2_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) +vsetq_lane_u8 (uint8_t __elem, uint8x16_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) +vsetq_lane_u16 (uint16_t __elem, uint16x8_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) +vsetq_lane_u32 (uint32_t __elem, uint32x4_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + +__extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) +vsetq_lane_u64 (uint64_t __elem, uint64x2_t __vec, const int __index) +{ + return __aarch64_vset_lane_any (__elem, __vec, __index); +} + #define __GET_LOW(__TYPE) \ uint64x2_t tmp = vreinterpretq_u64_##__TYPE (__a); \ uint64x1_t lo = vcreate_u64 (vgetq_lane_u64 (tmp, 0)); \ @@ -16007,73 +16010,73 @@ vld1q_dup_u64 (const uint64_t* __a) __extension__ static __inline float32x2_t __attribute__ ((__always_inline__)) vld1_lane_f32 (const float32_t *__src, float32x2_t __vec, const int __lane) { - return vset_lane_f32 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline float64x1_t __attribute__ ((__always_inline__)) vld1_lane_f64 (const float64_t *__src, float64x1_t __vec, const int __lane) { - return vset_lane_f64 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline poly8x8_t __attribute__ ((__always_inline__)) vld1_lane_p8 (const poly8_t *__src, poly8x8_t __vec, const int __lane) { - return vset_lane_p8 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline poly16x4_t __attribute__ ((__always_inline__)) vld1_lane_p16 (const poly16_t *__src, poly16x4_t __vec, const int __lane) { - return vset_lane_p16 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline int8x8_t __attribute__ ((__always_inline__)) vld1_lane_s8 (const int8_t *__src, int8x8_t __vec, const int __lane) { - return vset_lane_s8 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline int16x4_t __attribute__ ((__always_inline__)) vld1_lane_s16 (const int16_t *__src, int16x4_t __vec, const int __lane) { - return vset_lane_s16 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline int32x2_t __attribute__ ((__always_inline__)) vld1_lane_s32 (const int32_t *__src, int32x2_t __vec, const int __lane) { - return vset_lane_s32 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline int64x1_t __attribute__ ((__always_inline__)) vld1_lane_s64 (const int64_t *__src, int64x1_t __vec, const int __lane) { - return vset_lane_s64 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vld1_lane_u8 (const uint8_t *__src, uint8x8_t __vec, const int __lane) { - return vset_lane_u8 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vld1_lane_u16 (const uint16_t *__src, uint16x4_t __vec, const int __lane) { - return vset_lane_u16 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vld1_lane_u32 (const uint32_t *__src, uint32x2_t __vec, const int __lane) { - return vset_lane_u32 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) vld1_lane_u64 (const uint64_t *__src, uint64x1_t __vec, const int __lane) { - return vset_lane_u64 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } /* vld1q_lane */ @@ -16081,73 +16084,73 @@ vld1_lane_u64 (const uint64_t *__src, uint64x1_t __vec, const int __lane) __extension__ static __inline float32x4_t __attribute__ ((__always_inline__)) vld1q_lane_f32 (const float32_t *__src, float32x4_t __vec, const int __lane) { - return vsetq_lane_f32 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline float64x2_t __attribute__ ((__always_inline__)) vld1q_lane_f64 (const float64_t *__src, float64x2_t __vec, const int __lane) { - return vsetq_lane_f64 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline poly8x16_t __attribute__ ((__always_inline__)) vld1q_lane_p8 (const poly8_t *__src, poly8x16_t __vec, const int __lane) { - return vsetq_lane_p8 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline poly16x8_t __attribute__ ((__always_inline__)) vld1q_lane_p16 (const poly16_t *__src, poly16x8_t __vec, const int __lane) { - return vsetq_lane_p16 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline int8x16_t __attribute__ ((__always_inline__)) vld1q_lane_s8 (const int8_t *__src, int8x16_t __vec, const int __lane) { - return vsetq_lane_s8 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline int16x8_t __attribute__ ((__always_inline__)) vld1q_lane_s16 (const int16_t *__src, int16x8_t __vec, const int __lane) { - return vsetq_lane_s16 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline int32x4_t __attribute__ ((__always_inline__)) vld1q_lane_s32 (const int32_t *__src, int32x4_t __vec, const int __lane) { - return vsetq_lane_s32 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline int64x2_t __attribute__ ((__always_inline__)) vld1q_lane_s64 (const int64_t *__src, int64x2_t __vec, const int __lane) { - return vsetq_lane_s64 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vld1q_lane_u8 (const uint8_t *__src, uint8x16_t __vec, const int __lane) { - return vsetq_lane_u8 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vld1q_lane_u16 (const uint16_t *__src, uint16x8_t __vec, const int __lane) { - return vsetq_lane_u16 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vld1q_lane_u32 (const uint32_t *__src, uint32x4_t __vec, const int __lane) { - return vsetq_lane_u32 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vld1q_lane_u64 (const uint64_t *__src, uint64x2_t __vec, const int __lane) { - return vsetq_lane_u64 (*__src, __vec, __lane); + return __aarch64_vset_lane_any (*__src, __vec, __lane); } /* vldn */ diff --git a/gcc/testsuite/gcc.target/aarch64/vld1_lane-o0.c b/gcc/testsuite/gcc.target/aarch64/vld1_lane-o0.c new file mode 100644 index 0000000..58e0c9d --- /dev/null +++ b/gcc/testsuite/gcc.target/aarch64/vld1_lane-o0.c @@ -0,0 +1,13 @@ +/* PR/63950 Test bounds checking at -O0. */ + +/* { dg-options "-std=c99 -O0" } */ + +#include + +int +main (int argc, char **argv) +{ + int16x4_t in = vcreate_s16 (0xdeadbeef00000000ULL); + int16_t src = 17; + int16x4_t out = vld1_lane_s16 (&src, in, 1); +}